Merge "Parent legacy jobs with artifacts legacy-publish-openstack-artifacts" into feature/zuulv3
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index fbb8cbc..b3a4c3f 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -115,6 +115,11 @@
A list of zookeeper hosts for Zuul to use when communicating
with Nodepool.
+ .. attr:: session_timeout
+ :default: 10.0
+
+ The ZooKeeper session timeout, in seconds.
+
.. _scheduler:
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 3d24f5d..6962b8f 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -244,6 +244,11 @@
The path to the source code, relative to the work dir. E.g.,
`src/git.example.com/org/project`.
+ .. var:: required
+
+ A boolean indicating whether this project appears in the
+ :attr:`job.required-projects` list for this job.
+
.. var:: tenant
The name of the current Zuul tenant.
diff --git a/tests/base.py b/tests/base.py
index df9fbc1..2e3d682 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1485,7 +1485,10 @@
result = super(RecordingAnsibleJob, self).runAnsible(
cmd, timeout, playbook)
else:
- result = build.run()
+ if playbook.path:
+ result = build.run()
+ else:
+ result = (self.RESULT_NORMAL, 0)
return result
def getHostList(self, args):
diff --git a/zuul/ansible/filter/zuul_filters.py b/zuul/ansible/filter/zuul_filters.py
index 17ef2bb..eb6776c 100644
--- a/zuul/ansible/filter/zuul_filters.py
+++ b/zuul/ansible/filter/zuul_filters.py
@@ -25,7 +25,8 @@
ZUUL_SHORT_PROJECT_NAME=short_name,
ZUUL_PIPELINE=zuul['pipeline'],
ZUUL_VOTING=zuul['voting'],
- WORKSPACE='/home/zuul/workspace')
+ WORKSPACE='/home/zuul/workspace',
+ BUILD_TIMEOUT=str(int(zuul['timeout']) * 1000))
if 'branch' in zuul:
params['ZUUL_BRANCH'] = zuul['branch']
diff --git a/zuul/cmd/migrate.py b/zuul/cmd/migrate.py
index 8688ab4..efb2796 100644
--- a/zuul/cmd/migrate.py
+++ b/zuul/cmd/migrate.py
@@ -932,8 +932,6 @@
timeout = self.getTimeout()
if timeout:
output['timeout'] = timeout
- output.setdefault('vars', {})
- output['vars']['BUILD_TIMEOUT'] = str(timeout * 1000)
if self.nodes:
if len(self.nodes) == 1:
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index a9923c6..bba1922 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -154,8 +154,10 @@
zookeeper = zuul.zk.ZooKeeper()
zookeeper_hosts = get_default(self.config, 'zookeeper',
'hosts', '127.0.0.1:2181')
+ zookeeper_timeout = float(get_default(self.config, 'zookeeper',
+ 'session_timeout', 10.0))
- zookeeper.connect(zookeeper_hosts)
+ zookeeper.connect(zookeeper_hosts, timeout=zookeeper_timeout)
cache_expiry = get_default(self.config, 'webapp', 'status_expiry', 1)
listen_address = get_default(self.config, 'webapp', 'listen_address',
diff --git a/zuul/configloader.py b/zuul/configloader.py
index afdf329..d597ee0 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -856,9 +856,6 @@
pipeline_defined = False
for template in configs:
if pipeline.name in template.pipelines:
- ProjectParser.log.debug(
- "Applying template %s to pipeline %s" %
- (template.name, pipeline.name))
pipeline_defined = True
template_pipeline = template.pipelines[pipeline.name]
project_pipeline.job_list.inheritFrom(
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index f97d286..eb2bfdf 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -234,6 +234,7 @@
params['vars'] = copy.deepcopy(job.variables)
params['zuul'] = zuul_params
projects = set()
+ required_projects = set()
def make_project_dict(project, override_branch=None):
project_config = item.current_build_set.layout.project_configs.get(
@@ -260,6 +261,7 @@
make_project_dict(project,
job_project.override_branch))
projects.add(project)
+ required_projects.add(project)
for i in all_items:
if i.change.project not in projects:
project = i.change.project
@@ -273,6 +275,7 @@
canonical_hostname=p.canonical_hostname,
canonical_name=p.canonical_name,
src_dir=os.path.join('src', p.canonical_name),
+ required=(p in required_projects),
))
build = Build(job, uuid)
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 27fd85f..b37a82e 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -346,6 +346,15 @@
self.trusted_projects = []
self.trusted_project_index = {}
+ # Create a JobDirPlaybook for the Ansible setup run. This
+ # doesn't use an actual playbook, but it lets us use the same
+ # methods to write an ansible.cfg as the rest of the Ansible
+ # runs.
+ setup_root = os.path.join(self.ansible_root, 'setup_playbook')
+ os.makedirs(setup_root)
+ self.setup_playbook = JobDirPlaybook(setup_root)
+ self.setup_playbook.trusted = True
+
def addTrustedProject(self, canonical_name, branch):
# Trusted projects are placed in their own directories so that
# we can support using different branches of the same project
@@ -504,6 +513,21 @@
super(ExecutorMergeWorker, self).handleNoop(packet)
+class ExecutorExecuteWorker(gear.TextWorker):
+ def __init__(self, executor_server, *args, **kw):
+ self.zuul_executor_server = executor_server
+ super(ExecutorExecuteWorker, self).__init__(*args, **kw)
+
+ def handleNoop(self, packet):
+ # Delay our response to running a new job based on the number
+ # of jobs we're currently running, in an attempt to spread
+ # load evenly among executors.
+ workers = len(self.zuul_executor_server.job_workers)
+ delay = (workers ** 2) / 1000.0
+ time.sleep(delay)
+ return super(ExecutorExecuteWorker, self).handleNoop(packet)
+
+
class ExecutorServer(object):
log = logging.getLogger("zuul.ExecutorServer")
@@ -603,7 +627,8 @@
ssl_ca = get_default(self.config, 'gearman', 'ssl_ca')
self.merger_worker = ExecutorMergeWorker(self, 'Zuul Executor Merger')
self.merger_worker.addServer(server, port, ssl_key, ssl_cert, ssl_ca)
- self.executor_worker = gear.TextWorker('Zuul Executor Server')
+ self.executor_worker = ExecutorExecuteWorker(
+ self, 'Zuul Executor Server')
self.executor_worker.addServer(server, port, ssl_key, ssl_cert, ssl_ca)
self.log.debug("Waiting for server")
self.merger_worker.waitForServer()
@@ -1101,6 +1126,17 @@
def runPlaybooks(self, args):
result = None
+ # Run the Ansible 'setup' module on all hosts in the inventory
+ # at the start of the job with a 60 second timeout. If we
+ # aren't able to connect to all the hosts and gather facts
+ # within that timeout, there is likely a network problem
+ # between here and the hosts in the inventory; return them and
+ # reschedule the job.
+ setup_status, setup_code = self.runAnsibleSetup(
+ self.jobdir.setup_playbook)
+ if setup_status != self.RESULT_NORMAL or setup_code != 0:
+ return result
+
pre_failed = False
success = False
for index, playbook in enumerate(self.jobdir.pre_playbooks):
@@ -1213,6 +1249,8 @@
return None
def preparePlaybooks(self, args):
+ self.writeAnsibleConfig(self.jobdir.setup_playbook)
+
for playbook in args['pre_playbooks']:
jobdir_playbook = self.jobdir.addPrePlaybook()
self.preparePlaybook(jobdir_playbook, playbook,
@@ -1289,7 +1327,7 @@
jobdir_playbook.secrets_content = yaml.safe_dump(
secrets, default_flow_style=False)
- self.writeAnsibleConfig(jobdir_playbook, playbook)
+ self.writeAnsibleConfig(jobdir_playbook)
def checkoutTrustedProject(self, project, branch):
root = self.jobdir.getTrustedProject(project.canonical_name,
@@ -1420,7 +1458,7 @@
job_output_file=self.jobdir.job_output_file)
logging_config.writeJson(self.jobdir.logging_json)
- def writeAnsibleConfig(self, jobdir_playbook, playbook):
+ def writeAnsibleConfig(self, jobdir_playbook):
trusted = jobdir_playbook.trusted
# TODO(mordred) This should likely be extracted into a more generalized
@@ -1467,7 +1505,7 @@
# role. Otherwise, printing the args could be useful for
# debugging.
config.write('display_args_to_stdout = %s\n' %
- str(not playbook['secrets']))
+ str(not jobdir_playbook.secrets_content))
config.write('[ssh_connection]\n')
# NB: when setting pipelining = True, keep_remote_files
@@ -1640,6 +1678,21 @@
return (self.RESULT_NORMAL, ret)
+ def runAnsibleSetup(self, playbook):
+ if self.executor_server.verbose:
+ verbose = '-vvv'
+ else:
+ verbose = '-v'
+
+ cmd = ['ansible', '*', verbose, '-m', 'setup',
+ '-a', 'gather_subset=!all']
+
+ result, code = self.runAnsible(
+ cmd=cmd, timeout=60, playbook=playbook)
+ self.log.debug("Ansible complete, result %s code %s" % (
+ self.RESULT_MAP[result], code))
+ return result, code
+
def runAnsiblePlaybook(self, playbook, timeout, success=None,
phase=None, index=None):
if self.executor_server.verbose:
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index b94b8a5..2cb34b8 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -435,12 +435,12 @@
loader = zuul.configloader.ConfigLoader()
build_set = item.current_build_set
- self.log.debug("Loading dynamic layout")
try:
# First parse the config as it will land with the
# full set of config and project repos. This lets us
# catch syntax errors in config repos even though we won't
# actually run with that config.
+ self.log.debug("Loading dynamic layout (phase 1)")
loader.createDynamicLayout(
item.pipeline.layout.tenant,
build_set.files,
@@ -450,10 +450,12 @@
# Then create the config a second time but without changes
# to config repos so that we actually use this config.
+ self.log.debug("Loading dynamic layout (phase 2)")
layout = loader.createDynamicLayout(
item.pipeline.layout.tenant,
build_set.files,
include_config_projects=False)
+ self.log.debug("Loading dynamic layout complete")
except zuul.configloader.ConfigurationSyntaxError as e:
self.log.info("Configuration syntax error "
"in dynamic layout")
diff --git a/zuul/nodepool.py b/zuul/nodepool.py
index dc855cd..47f5c3c 100644
--- a/zuul/nodepool.py
+++ b/zuul/nodepool.py
@@ -117,7 +117,7 @@
try:
for node in nodes:
self.log.debug("Locking node %s" % (node,))
- self.sched.zk.lockNode(node)
+ self.sched.zk.lockNode(node, timeout=30)
locked_nodes.append(node)
except Exception:
self.log.exception("Error locking nodes:")
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index 00ac9c6..89f5efe 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -108,7 +108,7 @@
except Exception as e:
self.log.exception("Finger client exception:")
msg = "Failure from finger client: %s" % e
- ws.send_str(msg.decode('utf8'))
+ await ws.send_str(msg.decode('utf8'))
return (1000, "No more data")
diff --git a/zuul/zk.py b/zuul/zk.py
index 5ea4e56..dcaa172 100644
--- a/zuul/zk.py
+++ b/zuul/zk.py
@@ -90,7 +90,7 @@
def resetLostFlag(self):
self._became_lost = False
- def connect(self, hosts, read_only=False):
+ def connect(self, hosts, read_only=False, timeout=10.0):
'''
Establish a connection with ZooKeeper cluster.
@@ -100,10 +100,12 @@
:param str hosts: Comma-separated list of hosts to connect to (e.g.
127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
:param bool read_only: If True, establishes a read-only connection.
-
+ :param float timeout: The ZooKeeper session timeout, in
+ seconds (default: 10.0).
'''
if self.client is None:
- self.client = KazooClient(hosts=hosts, read_only=read_only)
+ self.client = KazooClient(hosts=hosts, read_only=read_only,
+ timeout=timeout)
self.client.add_listener(self._connection_listener)
self.client.start()