Merge "Support dynamic dependent pipeline additions" into feature/zuulv3
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 8872397..60873a9 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -35,6 +35,8 @@
- shared-jobs:
include: jobs
untrusted-projects:
+ - zuul-jobs:
+ shadow: common-config
- project1
- project2
@@ -83,6 +85,20 @@
**exclude**
A list of configuration classes that should not be loaded.
+ **shadow**
+ A list of projects which this project is permitted to shadow.
+ Normally, only one project in Zuul may contain definitions for a
+ given job. If a project earlier in the configuration defines a
+ job which a later project redefines, the later definition is
+ considered an error and is not permitted. The "shadow" attribute
+ of a project indicates that job definitions in this project which
+ conflict with the named projects should be ignored, and those in
+ the named project should be used instead. The named projects must
+ still appear earlier in the configuration. In the example above,
+ if a job definition appears in both the "common-config" and
+ "zuul-jobs" projects, the definition in "common-config" will be
+ used.
+
The order of the projects listed in a tenant is important. A job
which is defined in one project may not be redefined in another
project; therefore, once a job appears in one project, a project
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 5a5b7bd..76f73c3 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -496,12 +496,21 @@
different string. Default: "FAILURE".
**success-url**
- When a job succeeds, this URL is reported along with the result.
+ When a job succeeds, this URL is reported along with the result. If
+ this value is not supplied, Zuul uses the content of the job
+ :ref:`return value <return_values>` **zuul.log_url**. This is
+ recommended as it allows the code which stores the URL to the job
+ artifacts to report exactly where they were stored. To override
+ this value, or if it is not set, supply an absolute URL in this
+ field. If a relative URL is supplied in this field, and
+ **zuul.log_url** is set, then the two will be combined to produce
+ the URL used for the report. This can be used to specify that
+ certain jobs should "deep link" into the stored job artifacts.
Default: none.
**failure-url**
When a job fails, this URL is reported along with the result.
- Default: none.
+ Otherwise behaves the same as **success-url**.
**hold-following-changes**
In a dependent pipeline, this option may be used to indicate that no
@@ -700,8 +709,23 @@
roles: a Galaxy role, which is simply a role that is installed from
Ansible Galaxy, or a Zuul role, which is a role provided by a
project managed by Zuul. Zuul roles are able to benefit from
- speculative merging and cross-project dependencies when used by jobs
- in untrusted projects.
+ speculative merging and cross-project dependencies when used by
+ playbooks in untrusted projects. Roles are added to the Ansible
+ role path in the order they appear on the job -- roles earlier in
+ the list will take precedence over those which follow.
+
+ In the case of job inheritance or variance, the roles used for each
+ of the playbooks run by the job will be only those which were
+ defined along with that playbook. If a child job inherits from a
+ parent which defines a pre and post playbook, then the pre and post
+ playbooks it inherits from the parent job will run only with the
+ roles that were defined on the parent. If the child adds its own
+ pre and post playbooks, then any roles added by the child will be
+ available to the child's playbooks. This is so that a job which
+ inherits from a parent does not inadvertantly alter the behavior of
+ the parent's playbooks by the addition of conflicting roles. Roles
+ added by a child will appear before those it inherits from its
+ parent.
A project which supplies a role may be structured in one of two
configurations: a bare role (in which the role exists at the root of
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 58f3371..78121bc 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -101,6 +101,8 @@
.. TODO: describe standard lib and link to published docs for it.
+.. _return_values:
+
Return Values
-------------
diff --git a/tests/base.py b/tests/base.py
index 9709bf7..1cc9999 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1359,12 +1359,12 @@
self.recordResult(result)
return result
- def runAnsible(self, cmd, timeout, trusted=False):
+ def runAnsible(self, cmd, timeout, config_file, trusted):
build = self.executor_server.job_builds[self.job.unique]
if self.executor_server._run_ansible:
result = super(RecordingAnsibleJob, self).runAnsible(
- cmd, timeout, trusted=trusted)
+ cmd, timeout, config_file, trusted)
else:
result = build.run()
return result
diff --git a/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml b/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml
index b92ff5c..5e412c3 100644
--- a/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml
+++ b/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml
@@ -3,4 +3,4 @@
- zuul_return:
data:
zuul:
- log_url: test/log/url
+ log_url: http://example.com/test/log/url/
diff --git a/tests/fixtures/config/data-return/git/common-config/zuul.yaml b/tests/fixtures/config/data-return/git/common-config/zuul.yaml
index 8aea931..8d602f1 100644
--- a/tests/fixtures/config/data-return/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/data-return/git/common-config/zuul.yaml
@@ -15,8 +15,14 @@
- job:
name: data-return
+- job:
+ name: data-return-relative
+ run: playbooks/data-return
+ success-url: docs/index.html
+
- project:
name: org/project
check:
jobs:
- data-return
+ - data-return-relative
diff --git a/tests/fixtures/config/roles/git/org_project/playbooks/parent-post.yaml b/tests/fixtures/config/roles/git/org_project/playbooks/parent-post.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/roles/git/org_project/playbooks/parent-post.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/roles/git/org_project/playbooks/parent-pre.yaml b/tests/fixtures/config/roles/git/org_project/playbooks/parent-pre.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/roles/git/org_project/playbooks/parent-pre.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/roles/git/org_project/roles/README b/tests/fixtures/config/roles/git/org_project/roles/README
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/fixtures/config/roles/git/org_project/roles/README
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 7fe101e..a52a2ee 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -54,7 +54,8 @@
encryption.deserialize_rsa_keypair(f.read())
self.context = model.SourceContext(self.project, 'master',
'test', True)
- self.start_mark = yaml.Mark('name', 0, 0, 0, '', 0)
+ m = yaml.Mark('name', 0, 0, 0, '', 0)
+ self.start_mark = configloader.ZuulMark(m, m, '')
@property
def job(self):
@@ -95,9 +96,9 @@
def test_job_inheritance(self):
# This is standard job inheritance.
- base_pre = model.PlaybookContext(self.context, 'base-pre')
- base_run = model.PlaybookContext(self.context, 'base-run')
- base_post = model.PlaybookContext(self.context, 'base-post')
+ base_pre = model.PlaybookContext(self.context, 'base-pre', [])
+ base_run = model.PlaybookContext(self.context, 'base-run', [])
+ base_post = model.PlaybookContext(self.context, 'base-post', [])
base = model.Job('base')
base.timeout = 30
@@ -121,9 +122,9 @@
def test_job_variants(self):
# This simulates freezing a job.
- py27_pre = model.PlaybookContext(self.context, 'py27-pre')
- py27_run = model.PlaybookContext(self.context, 'py27-run')
- py27_post = model.PlaybookContext(self.context, 'py27-post')
+ py27_pre = model.PlaybookContext(self.context, 'py27-pre', [])
+ py27_run = model.PlaybookContext(self.context, 'py27-run', [])
+ py27_post = model.PlaybookContext(self.context, 'py27-post', [])
py27 = model.Job('py27')
py27.timeout = 30
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 57f7947..f87773b 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -719,6 +719,25 @@
class TestRoles(ZuulTestCase):
tenant_config_file = 'config/roles/main.yaml'
+ def _assertRolePath(self, build, playbook, content):
+ path = os.path.join(self.test_root, build.uuid,
+ 'ansible', playbook, 'ansible.cfg')
+ roles_paths = []
+ with open(path) as f:
+ for line in f:
+ if line.startswith('roles_path'):
+ roles_paths.append(line)
+ print(roles_paths)
+ if content:
+ self.assertEqual(len(roles_paths), 1,
+ "Should have one roles_path line in %s" %
+ (playbook,))
+ self.assertIn(content, roles_paths[0])
+ else:
+ self.assertEqual(len(roles_paths), 0,
+ "Should have no roles_path line in %s" %
+ (playbook,))
+
def test_role(self):
# This exercises a proposed change to a role being checked out
# and used.
@@ -733,6 +752,51 @@
dict(name='project-test', result='SUCCESS', changes='1,1 2,1'),
])
+ def test_role_inheritance(self):
+ self.executor_server.hold_jobs_in_build = True
+ conf = textwrap.dedent(
+ """
+ - job:
+ name: parent
+ roles:
+ - zuul: bare-role
+ pre-run: playbooks/parent-pre
+ post-run: playbooks/parent-post
+
+ - job:
+ name: project-test
+ parent: parent
+ roles:
+ - zuul: org/project
+
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - project-test
+ """)
+
+ file_dict = {'.zuul.yaml': conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 1)
+ build = self.getBuildByName('project-test')
+ self._assertRolePath(build, 'pre_playbook_0', 'role_0')
+ self._assertRolePath(build, 'playbook_0', 'role_0')
+ self._assertRolePath(build, 'playbook_0', 'role_1')
+ self._assertRolePath(build, 'post_playbook_0', 'role_0')
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ dict(name='project-test', result='SUCCESS', changes='1,1'),
+ ])
+
class TestShadow(ZuulTestCase):
tenant_config_file = 'config/shadow/main.yaml'
@@ -759,6 +823,10 @@
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return', result='SUCCESS', changes='1,1'),
- ])
- self.assertIn('- data-return test/log/url',
+ dict(name='data-return-relative', result='SUCCESS', changes='1,1'),
+ ], ordered=False)
+ self.assertIn('- data-return http://example.com/test/log/url/',
+ A.messages[-1])
+ self.assertIn('- data-return-relative '
+ 'http://example.com/test/log/url/docs/index.html',
A.messages[-1])
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index c4dd532..cc979f2 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -74,7 +74,7 @@
if not stdout_lines and stdout:
stdout_lines = stdout.split('\n')
- for key in ('changed', 'cmd', 'zuul_log_id',
+ for key in ('changed', 'cmd', 'zuul_log_id', 'invocation',
'stderr', 'stderr_lines'):
result.pop(key, None)
return stdout_lines
@@ -306,10 +306,14 @@
pass
elif result._task.action not in ('command', 'shell'):
- self._log_message(
- result=result,
- status=status,
- result_dict=result_dict)
+ if 'msg' in result_dict:
+ self._log_message(msg=result_dict['msg'],
+ result=result, status=status)
+ else:
+ self._log_message(
+ result=result,
+ status=status,
+ result_dict=result_dict)
elif 'results' in result_dict:
for res in result_dict['results']:
self._log_message(
@@ -448,15 +452,29 @@
def _dump_result_dict(self, result_dict):
result_dict = result_dict.copy()
for key in list(result_dict.keys()):
- if key.startswith('_ansible') or key == 'zuul_log_id':
+ if key.startswith('_ansible'):
del result_dict[key]
+ zuul_filter_result(result_dict)
return result_dict
def _log_message(self, result, msg=None, status="ok", result_dict=None):
hostname = self._get_hostname(result)
+ if result._task.no_log:
+ self._log("{host} | {msg}".format(
+ host=hostname,
+ msg="Output suppressed because no_log was given"))
+ return
if msg:
- self._log("{host} | {status}: {msg}".format(
- host=hostname, status=status, msg=msg))
+ msg_lines = msg.strip().split('\n')
+ if len(msg_lines) > 1:
+ self._log("{host} | {status}:".format(
+ host=hostname, status=status))
+ for msg_line in msg_lines:
+ self._log("{host} | {msg_line}".format(
+ host=hostname, msg_line=msg_line))
+ else:
+ self._log("{host} | {status}: {msg}".format(
+ host=hostname, status=status, msg=msg))
else:
self._log("{host} | {status}".format(
host=hostname, status=status, msg=msg))
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 3c9ecf7..f8e2d15 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -15,8 +15,8 @@
import copy
import os
import logging
-import pprint
import textwrap
+import io
import voluptuous as vs
@@ -131,7 +131,7 @@
{error}
- The error appears in a {stanza} stanza with the content:
+ The error appears in the following {stanza} stanza:
{content}
@@ -140,20 +140,42 @@
m = m.format(intro=intro,
error=indent(str(e)),
stanza=stanza,
- content=indent(pprint.pformat(conf)),
+ content=indent(start_mark.snippet.rstrip()),
start_mark=str(start_mark))
raise ConfigurationSyntaxError(m)
+class ZuulMark(object):
+ # The yaml mark class differs between the C and python versions.
+ # The C version does not provide a snippet, and also appears to
+ # lose data under some circumstances.
+ def __init__(self, start_mark, end_mark, stream):
+ self.name = start_mark.name
+ self.index = start_mark.index
+ self.line = start_mark.line
+ self.column = start_mark.column
+ self.snippet = stream[start_mark.index:end_mark.index]
+
+ def __str__(self):
+ return ' in "{name}", line {line}, column {column}'.format(
+ name=self.name,
+ line=self.line + 1,
+ column=self.column + 1,
+ )
+
+
class ZuulSafeLoader(yaml.SafeLoader):
zuul_node_types = frozenset(('job', 'nodeset', 'secret', 'pipeline',
'project', 'project-template',
'semaphore'))
def __init__(self, stream, context):
- super(ZuulSafeLoader, self).__init__(stream)
+ wrapped_stream = io.StringIO(stream)
+ wrapped_stream.name = str(context)
+ super(ZuulSafeLoader, self).__init__(wrapped_stream)
self.name = str(context)
self.zuul_context = context
+ self.zuul_stream = stream
def construct_mapping(self, node, deep=False):
r = super(ZuulSafeLoader, self).construct_mapping(node, deep)
@@ -161,7 +183,8 @@
if len(keys) == 1 and keys.intersection(self.zuul_node_types):
d = list(r.values())[0]
if isinstance(d, dict):
- d['_start_mark'] = node.start_mark
+ d['_start_mark'] = ZuulMark(node.start_mark, node.end_mark,
+ self.zuul_stream)
d['_source_context'] = self.zuul_context
return r
@@ -224,7 +247,7 @@
vs.Required('nodes'): to_list(node),
'groups': to_list(group),
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
return vs.Schema(nodeset)
@@ -262,7 +285,7 @@
secret = {vs.Required('name'): str,
vs.Required('data'): data,
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
return vs.Schema(secret)
@@ -319,7 +342,7 @@
'post-run': to_list(str),
'run': str,
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
'roles': to_list(role),
'required-projects': to_list(vs.Any(job_project, str)),
'vars': dict,
@@ -400,21 +423,34 @@
parent = layout.getJob(conf['parent'])
job.inheritFrom(parent)
+ # Roles are part of the playbook context so we must establish
+ # them earlier than playbooks.
+ if 'roles' in conf:
+ roles = []
+ for role in conf.get('roles', []):
+ if 'zuul' in role:
+ r = JobParser._makeZuulRole(tenant, job, role)
+ if r:
+ roles.append(r)
+ job.addRoles(roles)
+
for pre_run_name in as_list(conf.get('pre-run')):
pre_run = model.PlaybookContext(job.source_context,
- pre_run_name)
+ pre_run_name, job.roles)
job.pre_run = job.pre_run + (pre_run,)
for post_run_name in as_list(conf.get('post-run')):
post_run = model.PlaybookContext(job.source_context,
- post_run_name)
+ post_run_name, job.roles)
job.post_run = (post_run,) + job.post_run
if 'run' in conf:
- run = model.PlaybookContext(job.source_context, conf['run'])
+ run = model.PlaybookContext(job.source_context, conf['run'],
+ job.roles)
job.run = (run,)
else:
if not project_pipeline:
run_name = os.path.join('playbooks', job.name)
- run = model.PlaybookContext(job.source_context, run_name)
+ run = model.PlaybookContext(job.source_context, run_name,
+ job.roles)
job.implied_run = (run,) + job.implied_run
for k in JobParser.simple_attributes:
@@ -460,15 +496,6 @@
job.dependencies = frozenset(as_list(conf.get('dependencies')))
- if 'roles' in conf:
- roles = []
- for role in conf.get('roles', []):
- if 'zuul' in role:
- r = JobParser._makeZuulRole(tenant, job, role)
- if r:
- roles.append(r)
- job.roles = job.roles.union(set(roles))
-
variables = conf.get('vars', None)
if variables:
job.updateVariables(variables)
@@ -539,7 +566,7 @@
'merge', 'merge-resolve',
'cherry-pick'),
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
for p in layout.pipelines.values():
@@ -603,7 +630,7 @@
'cherry-pick'),
'default-branch': str,
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
for p in layout.pipelines.values():
@@ -758,7 +785,7 @@
'window-decrease-type': window_type,
'window-decrease-factor': window_factor,
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
pipeline['require'] = PipelineParser.getDriverSchema('require',
connections)
@@ -865,7 +892,7 @@
semaphore = {vs.Required('name'): str,
'max': int,
'_source_context': model.SourceContext,
- '_start_mark': yaml.Mark,
+ '_start_mark': ZuulMark,
}
return vs.Schema(semaphore)
diff --git a/zuul/driver/bubblewrap/__init__.py b/zuul/driver/bubblewrap/__init__.py
index 5ec2448..3609a71 100644
--- a/zuul/driver/bubblewrap/__init__.py
+++ b/zuul/driver/bubblewrap/__init__.py
@@ -150,6 +150,7 @@
'--ro-bind', '/bin', '/bin',
'--ro-bind', '/sbin', '/sbin',
'--ro-bind', '/etc/resolv.conf', '/etc/resolv.conf',
+ '--ro-bind', '/etc/hosts', '/etc/hosts',
'--ro-bind', '{ssh_auth_sock}', '{ssh_auth_sock}',
'--dir', '{work_dir}',
'--bind', '{work_dir}', '{work_dir}',
@@ -166,6 +167,9 @@
if os.path.isdir('/lib64'):
bwrap_command.extend(['--ro-bind', '/lib64', '/lib64'])
+ if os.path.isfile('/etc/nsswitch.conf'):
+ bwrap_command.extend(['--ro-bind', '/etc/nsswitch.conf',
+ '/etc/nsswitch.conf'])
return bwrap_command
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index c36d569..2a205bf 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -237,7 +237,6 @@
params['playbooks'] = [x.toDict() for x in job.run]
params['pre_playbooks'] = [x.toDict() for x in job.pre_run]
params['post_playbooks'] = [x.toDict() for x in job.post_run]
- params['roles'] = [x.toDict() for x in job.roles]
nodeset = item.current_build_set.getJobNodeSet(job.name)
nodes = []
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index cdd082e..824a47a 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -72,13 +72,6 @@
self._running = False
-class JobDirPlaybook(object):
- def __init__(self, root):
- self.root = root
- self.trusted = None
- self.path = None
-
-
class SshAgent(object):
log = logging.getLogger("zuul.ExecutorServer")
@@ -151,6 +144,24 @@
return result
+class JobDirPlaybook(object):
+ def __init__(self, root):
+ self.root = root
+ self.trusted = None
+ self.path = None
+ self.roles = []
+ self.roles_path = []
+ self.ansible_config = os.path.join(self.root, 'ansible.cfg')
+ self.project_link = os.path.join(self.root, 'project')
+
+ def addRole(self):
+ count = len(self.roles)
+ root = os.path.join(self.root, 'role_%i' % (count,))
+ os.makedirs(root)
+ self.roles.append(root)
+ return root
+
+
class JobDir(object):
def __init__(self, root, keep, build_uuid):
'''
@@ -163,11 +174,23 @@
'''
# root
# ansible
- # trusted.cfg
- # untrusted.cfg
+ # inventory.yaml
+ # playbook_0
+ # project -> ../trusted/project_0/...
+ # role_0 -> ../trusted/project_0/...
+ # trusted
+ # project_0
+ # <git.example.com>
+ # <project>
# work
+ # .ssh
+ # known_hosts
# src
+ # <git.example.com>
+ # <project>
# logs
+ # job-output.txt
+ # results.json
self.keep = keep
if root:
tmpdir = root
@@ -175,16 +198,16 @@
tmpdir = tempfile.gettempdir()
self.root = os.path.join(tmpdir, build_uuid)
os.mkdir(self.root, 0o700)
- # Work
self.work_root = os.path.join(self.root, 'work')
os.makedirs(self.work_root)
self.src_root = os.path.join(self.work_root, 'src')
os.makedirs(self.src_root)
self.log_root = os.path.join(self.work_root, 'logs')
os.makedirs(self.log_root)
- # Ansible
self.ansible_root = os.path.join(self.root, 'ansible')
os.makedirs(self.ansible_root)
+ self.trusted_root = os.path.join(self.root, 'trusted')
+ os.makedirs(self.trusted_root)
ssh_dir = os.path.join(self.work_root, '.ssh')
os.mkdir(ssh_dir, 0o700)
self.result_data_file = os.path.join(self.work_root, 'results.json')
@@ -196,13 +219,23 @@
self.playbook = None # A pointer to the candidate we have chosen
self.pre_playbooks = []
self.post_playbooks = []
- self.roles = []
- self.trusted_roles_path = []
- self.untrusted_roles_path = []
- self.untrusted_config = os.path.join(
- self.ansible_root, 'untrusted.cfg')
- self.trusted_config = os.path.join(self.ansible_root, 'trusted.cfg')
self.job_output_file = os.path.join(self.log_root, 'job-output.txt')
+ self.trusted_projects = []
+ self.trusted_project_index = {}
+
+ def addTrustedProject(self, canonical_name, branch):
+ # Trusted projects are placed in their own directories so that
+ # we can support using different branches of the same project
+ # in different playbooks.
+ count = len(self.trusted_projects)
+ root = os.path.join(self.trusted_root, 'project_%i' % (count,))
+ os.makedirs(root)
+ self.trusted_projects.append(root)
+ self.trusted_project_index[(canonical_name, branch)] = root
+ return root
+
+ def getTrustedProject(self, canonical_name, branch):
+ return self.trusted_project_index.get((canonical_name, branch))
def addPrePlaybook(self):
count = len(self.pre_playbooks)
@@ -228,17 +261,6 @@
self.playbooks.append(playbook)
return playbook
- def addRole(self):
- count = len(self.roles)
- root = os.path.join(self.ansible_root, 'role_%i' % (count,))
- os.makedirs(root)
- trusted = os.path.join(root, 'trusted')
- os.makedirs(trusted)
- untrusted = os.path.join(root, 'untrusted')
- os.makedirs(untrusted)
- self.roles.append(root)
- return root
-
def cleanup(self):
if not self.keep:
shutil.rmtree(self.root)
@@ -760,8 +782,13 @@
projects.add((project['connection'], project['name']))
# ...as well as all playbook and role projects.
- repos = (args['pre_playbooks'] + args['playbooks'] +
- args['post_playbooks'] + args['roles'])
+ repos = []
+ playbooks = (args['pre_playbooks'] + args['playbooks'] +
+ args['post_playbooks'])
+ for playbook in playbooks:
+ repos.append(playbook)
+ repos += playbook['roles']
+
for repo in repos:
self.log.debug("Job %s: updating playbook or role %s" %
(self.job.unique, repo))
@@ -806,12 +833,9 @@
for repo in repos.values():
repo.deleteRemote('origin')
- # is the playbook in a repo that we have already prepared?
- trusted, untrusted = self.preparePlaybookRepos(args)
+ # This prepares each playbook and the roles needed for each.
+ self.preparePlaybooks(args)
- self.prepareRoles(args, trusted, untrusted)
-
- # TODOv3: Ansible the ansible thing here.
self.prepareAnsibleFiles(args)
data = {
@@ -997,42 +1021,29 @@
raise Exception("Unable to find playbook %s" % path)
return None
- def preparePlaybookRepos(self, args):
- trusted = untrusted = False
+ def preparePlaybooks(self, args):
for playbook in args['pre_playbooks']:
jobdir_playbook = self.jobdir.addPrePlaybook()
- self.preparePlaybookRepo(jobdir_playbook, playbook,
- args, required=True)
- if playbook['trusted']:
- trusted = True
- else:
- untrusted = True
+ self.preparePlaybook(jobdir_playbook, playbook,
+ args, required=True)
for playbook in args['playbooks']:
jobdir_playbook = self.jobdir.addPlaybook()
- self.preparePlaybookRepo(jobdir_playbook, playbook,
- args, required=False)
- if playbook['trusted']:
- trusted = True
- else:
- untrusted = True
+ self.preparePlaybook(jobdir_playbook, playbook,
+ args, required=False)
if jobdir_playbook.path is not None:
self.jobdir.playbook = jobdir_playbook
break
+
if self.jobdir.playbook is None:
raise Exception("No valid playbook found")
for playbook in args['post_playbooks']:
jobdir_playbook = self.jobdir.addPostPlaybook()
- self.preparePlaybookRepo(jobdir_playbook, playbook,
- args, required=True)
- if playbook['trusted']:
- trusted = True
- else:
- untrusted = True
- return (trusted, untrusted)
+ self.preparePlaybook(jobdir_playbook, playbook,
+ args, required=True)
- def preparePlaybookRepo(self, jobdir_playbook, playbook, args, required):
+ def preparePlaybook(self, jobdir_playbook, playbook, args, required):
self.log.debug("Prepare playbook repo for %s" % (playbook,))
# Check out the playbook repo if needed and set the path to
# the playbook that should be run.
@@ -1040,6 +1051,7 @@
source = self.executor_server.connections.getSource(
playbook['connection'])
project = source.getProject(playbook['project'])
+ path = None
if not playbook['trusted']:
# This is a project repo, so it is safe to use the already
# checked out version (from speculative merging) of the
@@ -1052,34 +1064,48 @@
project.canonical_hostname,
project.name,
playbook['path'])
- jobdir_playbook.path = self.findPlaybook(
- path,
- required=required,
- trusted=playbook['trusted'])
- return
- # The playbook repo is either a config repo, or it isn't in
- # the stack of changes we are testing, so check out the branch
- # tip into a dedicated space.
+ break
+ if not path:
+ # The playbook repo is either a config repo, or it isn't in
+ # the stack of changes we are testing, so check out the branch
+ # tip into a dedicated space.
+ path = self.checkoutTrustedProject(project, playbook['branch'])
+ path = os.path.join(path, playbook['path'])
- merger = self.executor_server._getMerger(jobdir_playbook.root,
- self.log)
- merger.checkoutBranch(playbook['connection'], project.name,
- playbook['branch'])
-
- path = os.path.join(jobdir_playbook.root,
- project.canonical_hostname,
- project.name,
- playbook['path'])
jobdir_playbook.path = self.findPlaybook(
path,
required=required,
trusted=playbook['trusted'])
- def prepareRoles(self, args, trusted, untrusted):
- for role in args['roles']:
- if role['type'] == 'zuul':
- root = self.jobdir.addRole()
- self.prepareZuulRole(args, role, root, trusted, untrusted)
+ # If this playbook doesn't exist, don't bother preparing
+ # roles.
+ if not jobdir_playbook.path:
+ return
+
+ for role in playbook['roles']:
+ self.prepareRole(jobdir_playbook, role, args)
+
+ self.writeAnsibleConfig(jobdir_playbook)
+
+ def checkoutTrustedProject(self, project, branch):
+ root = self.jobdir.getTrustedProject(project.canonical_name,
+ branch)
+ if not root:
+ root = self.jobdir.addTrustedProject(project.canonical_name,
+ branch)
+ merger = self.executor_server._getMerger(root, self.log)
+ merger.checkoutBranch(project.connection_name, project.name,
+ branch)
+
+ path = os.path.join(root,
+ project.canonical_hostname,
+ project.name)
+ return path
+
+ def prepareRole(self, jobdir_playbook, role, args):
+ if role['type'] == 'zuul':
+ root = jobdir_playbook.addRole()
+ self.prepareZuulRole(jobdir_playbook, role, args, root)
def findRole(self, path, trusted=False):
d = os.path.join(path, 'tasks')
@@ -1094,102 +1120,56 @@
# This repo has a collection of roles
if not trusted:
for entry in os.listdir(d):
- self._blockPluginDirs(os.path.join(d, entry))
+ if os.path.isdir(os.path.join(d, entry)):
+ self._blockPluginDirs(os.path.join(d, entry))
return d
# It is neither a bare role, nor a collection of roles
raise Exception("Unable to find role in %s" % (path,))
- def prepareZuulRole(self, args, role, root, trusted, untrusted):
+ def prepareZuulRole(self, jobdir_playbook, role, args, root):
self.log.debug("Prepare zuul role for %s" % (role,))
# Check out the role repo if needed
source = self.executor_server.connections.getSource(
role['connection'])
project = source.getProject(role['project'])
- untrusted_role_repo = None
- trusted_role_repo = None
- trusted_root = os.path.join(root, 'trusted')
- untrusted_root = os.path.join(root, 'untrusted')
name = role['target_name']
+ path = None
- if untrusted:
- # There is at least one untrusted playbook. For that
- # case, use the already checked out version (from
- # speculative merging) of the role.
+ if not jobdir_playbook.trusted:
+ # This playbook is untrested. Use the already checked out
+ # version (from speculative merging) of the role if it
+ # exists.
for i in args['items']:
if (i['connection'] == role['connection'] and
i['project'] == role['project']):
- # We already have this repo prepared;
- # copy it into location.
-
+ # We already have this repo prepared; use it.
path = os.path.join(self.jobdir.src_root,
project.canonical_hostname,
project.name)
- # The name of the symlink is the requested name of
- # the role (which may be the repo name or may be
- # something else; this can come into play if this
- # is a bare role).
- link = os.path.join(untrusted_root, name)
- link = os.path.realpath(link)
- if not link.startswith(os.path.realpath(untrusted_root)):
- raise Exception("Invalid role name %s", name)
- os.symlink(path, link)
- untrusted_role_repo = link
break
- if trusted or not untrusted_role_repo:
- # There is at least one trusted playbook which will need a
- # trusted checkout of the role, or the role did not appear
+ if not path:
+ # This is a trusted playbook or the role did not appear
# in the dependency chain for the change (in which case,
# there is no existing untrusted checkout of it). Check
# out the branch tip into a dedicated space.
- merger = self.executor_server._getMerger(trusted_root,
- self.log)
- merger.checkoutBranch(role['connection'], project.name,
- 'master')
- orig_repo_path = os.path.join(trusted_root,
- project.canonical_hostname,
- project.name)
- if name != project.name:
- # The requested name of the role is not the same as
- # the project name, so rename the git repo as the
- # requested name. It is the only item in this
- # directory, so we don't need to worry about
- # collisions.
- target = os.path.join(trusted_root,
- project.canonical_hostname,
- name)
- target = os.path.realpath(target)
- if not target.startswith(os.path.realpath(trusted_root)):
- raise Exception("Invalid role name %s", name)
- os.rename(orig_repo_path, target)
- trusted_role_repo = target
- else:
- trusted_role_repo = orig_repo_path
+ path = self.checkoutTrustedProject(project, 'master')
- if not untrusted_role_repo:
- # In the case that there was no untrusted checkout,
- # use the trusted checkout.
- untrusted_role_repo = trusted_role_repo
- untrusted_root = trusted_root
+ # The name of the symlink is the requested name of the role
+ # (which may be the repo name or may be something else; this
+ # can come into play if this is a bare role).
+ link = os.path.join(root, name)
+ link = os.path.realpath(link)
+ if not link.startswith(os.path.realpath(root)):
+ raise Exception("Invalid role name %s", name)
+ os.symlink(path, link)
- if untrusted:
- untrusted_role_path = self.findRole(untrusted_role_repo,
- trusted=False)
- if untrusted_role_path is None:
- # In the case of a bare role, add the containing directory
- untrusted_role_path = os.path.join(untrusted_root,
- project.canonical_hostname)
- self.jobdir.untrusted_roles_path.append(untrusted_role_path)
-
- if trusted:
- trusted_role_path = self.findRole(trusted_role_repo,
- trusted=True)
- if trusted_role_path is None:
- # In the case of a bare role, add the containing directory
- trusted_role_path = os.path.join(trusted_root,
- project.canonical_hostname)
- self.jobdir.trusted_roles_path.append(trusted_role_path)
+ role_path = self.findRole(link, trusted=jobdir_playbook.trusted)
+ if role_path is None:
+ # In the case of a bare role, add the containing directory
+ role_path = root
+ jobdir_playbook.roles_path.append(role_path)
def prepareAnsibleFiles(self, args):
all_vars = dict(args['vars'])
@@ -1213,11 +1193,10 @@
for key in node['host_keys']:
known_hosts.write('%s\n' % key)
- self.writeAnsibleConfig(self.jobdir.untrusted_config)
- self.writeAnsibleConfig(self.jobdir.trusted_config, trusted=True)
+ def writeAnsibleConfig(self, jobdir_playbook):
+ trusted = jobdir_playbook.trusted
- def writeAnsibleConfig(self, config_path, trusted=False):
- with open(config_path, 'w') as config:
+ with open(jobdir_playbook.ansible_config, 'w') as config:
config.write('[defaults]\n')
config.write('hostfile = %s\n' % self.jobdir.inventory)
config.write('local_tmp = %s/.ansible/local_tmp\n' %
@@ -1240,12 +1219,10 @@
% self.executor_server.action_dir)
config.write('lookup_plugins = %s\n'
% self.executor_server.lookup_dir)
- roles_path = self.jobdir.untrusted_roles_path
- else:
- roles_path = self.jobdir.trusted_roles_path
- if roles_path:
- config.write('roles_path = %s\n' % ':'.join(roles_path))
+ if jobdir_playbook.roles_path:
+ config.write('roles_path = %s\n' % ':'.join(
+ jobdir_playbook.roles_path))
# On trusted jobs, we want to prevent the printing of args,
# since trusted jobs might have access to secrets that they may
@@ -1286,7 +1263,7 @@
except Exception:
self.log.exception("Exception while killing ansible process:")
- def runAnsible(self, cmd, timeout, trusted=False):
+ def runAnsible(self, cmd, timeout, config_file, trusted):
env_copy = os.environ.copy()
env_copy.update(self.ssh_agent.env)
env_copy['LOGNAME'] = 'zuul'
@@ -1301,10 +1278,8 @@
env_copy['PYTHONPATH'] = os.path.pathsep.join(pythonpath)
if trusted:
- config_file = self.jobdir.trusted_config
opt_prefix = 'trusted'
else:
- config_file = self.jobdir.untrusted_config
opt_prefix = 'untrusted'
ro_dirs = get_default(self.executor_server.config, 'executor',
'%s_ro_dirs' % opt_prefix)
@@ -1409,7 +1384,9 @@
cmd.extend(['-e', 'zuul_execution_phase_index=%s' % index])
result, code = self.runAnsible(
- cmd=cmd, timeout=timeout, trusted=playbook.trusted)
+ cmd=cmd, timeout=timeout,
+ config_file=playbook.ansible_config,
+ trusted=playbook.trusted)
self.log.debug("Ansible complete, result %s code %s" % (
self.RESULT_MAP[result], code))
return result, code
diff --git a/zuul/model.py b/zuul/model.py
index f5cbdac..3c07740 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -20,6 +20,7 @@
import struct
import time
from uuid import uuid4
+import urllib.parse
MERGER_MERGE = 1 # "git merge"
MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
@@ -636,11 +637,17 @@
Jobs refer to objects of this class for their main, pre, and post
playbooks so that we can keep track of which repos and security
- contexts are needed in order to run them."""
+ contexts are needed in order to run them.
- def __init__(self, source_context, path):
+ We also keep a list of roles so that playbooks only run with the
+ roles which were defined at the point the playbook was defined.
+
+ """
+
+ def __init__(self, source_context, path, roles):
self.source_context = source_context
self.path = path
+ self.roles = roles
def __repr__(self):
return '<PlaybookContext %s %s>' % (self.source_context,
@@ -653,7 +660,8 @@
if not isinstance(other, PlaybookContext):
return False
return (self.source_context == other.source_context and
- self.path == other.path)
+ self.path == other.path and
+ self.roles == other.roles)
def toDict(self):
# Render to a dict to use in passing json to the executor
@@ -662,6 +670,7 @@
project=self.source_context.project.name,
branch=self.source_context.branch,
trusted=self.source_context.trusted,
+ roles=[r.toDict() for r in self.roles],
path=self.path)
@@ -707,7 +716,7 @@
if not isinstance(other, ZuulRole):
return False
return (super(ZuulRole, self).__eq__(other) and
- self.connection_name == other.connection_name,
+ self.connection_name == other.connection_name and
self.project_name == other.project_name)
def toDict(self):
@@ -792,7 +801,7 @@
semaphore=None,
attempts=3,
final=False,
- roles=frozenset(),
+ roles=(),
required_projects={},
allowed_projects=None,
override_branch=None,
@@ -857,6 +866,13 @@
if not self.run:
self.run = self.implied_run
+ def addRoles(self, roles):
+ newroles = list(self.roles)
+ for role in reversed(roles):
+ if role not in newroles:
+ newroles.insert(0, role)
+ self.roles = tuple(newroles)
+
def updateVariables(self, other_vars):
v = self.variables
Job._deepUpdate(v, other_vars)
@@ -932,7 +948,7 @@
if other._get('post_run') is not None:
self.post_run = other.post_run + self.post_run
if other._get('roles') is not None:
- self.roles = self.roles.union(other.roles)
+ self.addRoles(other.roles)
if other._get('variables') is not None:
self.updateVariables(other.variables)
if other._get('required_projects') is not None:
@@ -1630,13 +1646,29 @@
result = job.failure_message
if job.failure_url:
pattern = job.failure_url
- url = None
+ url = None # The final URL
+ default_url = build.result_data.get('zuul', {}).get('log_url')
if pattern:
- url = self.formatUrlPattern(pattern, job, build)
+ job_url = self.formatUrlPattern(pattern, job, build)
+ else:
+ job_url = None
+ try:
+ if job_url:
+ u = urllib.parse.urlparse(job_url)
+ if u.scheme:
+ # The job success or failure url is absolute, so it's
+ # our final url.
+ url = job_url
+ else:
+ # We have a relative job url. Combine it with our
+ # default url.
+ if default_url:
+ url = urllib.parse.urljoin(default_url, job_url)
+ except Exception:
+ self.log.exception("Error while parsing url for job %s:"
+ % (job,))
if not url:
- url = build.result_data.get('zuul', {}).get('log_url')
- if not url:
- url = build.url or job.name
+ url = default_url or build.url or job.name
return (result, url)
def formatJSON(self):