Merge "Gate on zuul-stream-functional" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index 80028fb..ab03a59 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -28,6 +28,16 @@
- "zuul/ansible/callback/.*"
- "playbooks/zuul-stream/.*"
+- job:
+ name: zuul-migrate
+ parent: unittests
+ run: playbooks/zuul-migrate
+ # We're adding zuul to the required-projects so that we can also trigger
+ # this from project-config changes
+ required-projects:
+ - openstack-infra/project-config
+ - openstack-infra/zuul
+
- project:
name: openstack-infra/zuul
check:
@@ -38,6 +48,10 @@
- tox-pep8
- tox-py35
- zuul-stream-functional
+ - zuul-migrate:
+ files:
+ - zuul/cmd/migrate.py
+ - playbooks/zuul-migrate.yaml
gate:
jobs:
- tox-docs
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 54bc10a..4722750 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -163,6 +163,11 @@
The maximum number of nodes a job can request. A value of
'-1' value removes the limit.
+ .. attr:: max-job-timeout
+ :default: 10800
+
+ The maximum timeout for jobs. A value of '-1' value removes the limit.
+
.. attr:: exclude-unprotected-branches
:default: false
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 51a148a..3d24f5d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -314,6 +314,13 @@
The patchset identifier for the change. If a change is
revised, this will have a different value.
+.. var:: zuul_success
+
+ Post run playbook(s) will be passed this variable to indicate if the run
+ phase of the job was successful or not. This variable is meant to be used
+ with the `boolean` filter.
+
+
Change Items
++++++++++++
diff --git a/playbooks/zuul-migrate.yaml b/playbooks/zuul-migrate.yaml
new file mode 100644
index 0000000..66c7bd5
--- /dev/null
+++ b/playbooks/zuul-migrate.yaml
@@ -0,0 +1,18 @@
+- hosts: all
+ tasks:
+
+ - name: Install migration dependencies
+ command: "python3 -m pip install --user src/git.openstack.org/openstack-infra/zuul[migrate]"
+
+ - name: Migrate the data
+ command: "python3 ../zuul/zuul/cmd/migrate.py zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml . --mapping=zuul/mapping.yaml -v -m"
+ args:
+ chdir: src/git.openstack.org/openstack-infra/project-config
+
+ - name: Collect generated files
+ synchronize:
+ dest: "{{ zuul.executor.log_root }}"
+ mode: pull
+ src: "src/git.openstack.org/openstack-infra/project-config/zuul.d"
+ verify_host: true
+ no_log: true
diff --git a/setup.cfg b/setup.cfg
index ce7a40e..63ff562 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,6 +27,7 @@
zuul-executor = zuul.cmd.executor:main
zuul-bwrap = zuul.driver.bubblewrap:main
zuul-web = zuul.cmd.web:main
+ zuul-migrate = zuul.cmd.migrate:main
[build_sphinx]
source-dir = doc/source
@@ -37,3 +38,5 @@
[extras]
mysql_reporter=
PyMySQL
+migrate=
+ jenkins-job-builder==1.6.2
diff --git a/tests/base.py b/tests/base.py
index fcc5e84..c159865 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1012,8 +1012,7 @@
def getGithubClient(self,
project=None,
- user_id=None,
- use_app=True):
+ user_id=None):
return self.github_client
def openFakePullRequest(self, project, branch, subject, files=[],
diff --git a/tests/fixtures/config/multi-tenant/main.yaml b/tests/fixtures/config/multi-tenant/main.yaml
index 4916905..e667588 100644
--- a/tests/fixtures/config/multi-tenant/main.yaml
+++ b/tests/fixtures/config/multi-tenant/main.yaml
@@ -1,5 +1,6 @@
- tenant:
name: tenant-one
+ max-job-timeout: 1800
source:
gerrit:
config-projects:
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 6dd8333..2248aa9 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -800,14 +800,22 @@
self.db = model.TimeDataBase(self.tmp_root)
def test_timedatabase(self):
- self.assertEqual(self.db.getEstimatedTime('job-name'), 0)
- self.db.update('job-name', 50, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 50)
- self.db.update('job-name', 100, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 75)
+ pipeline = Dummy(layout=Dummy(tenant=Dummy(name='test-tenant')))
+ change = Dummy(project=Dummy(canonical_name='git.example.com/foo/bar'))
+ job = Dummy(name='job-name')
+ item = Dummy(pipeline=pipeline,
+ change=change)
+ build = Dummy(build_set=Dummy(item=item),
+ job=job)
+
+ self.assertEqual(self.db.getEstimatedTime(build), 0)
+ self.db.update(build, 50, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 50)
+ self.db.update(build, 100, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 75)
for x in range(10):
- self.db.update('job-name', 100, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 100)
+ self.db.update(build, 100, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 100)
class TestGraph(BaseTestCase):
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index f33d964..f22d98c 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -617,7 +617,6 @@
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
- @skip("Disabled for early v3 development")
def _test_time_database(self, iteration):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -626,7 +625,7 @@
self.waitUntilSettled()
time.sleep(2)
- data = json.loads(self.sched.formatStatusJSON())
+ data = json.loads(self.sched.formatStatusJSON('tenant-one'))
found_job = None
for pipeline in data['pipelines']:
if pipeline['name'] != 'gate':
@@ -652,7 +651,6 @@
self.executor_server.release()
self.waitUntilSettled()
- @skip("Disabled for early v3 development")
def test_time_database(self):
"Test the time database"
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 2b27b0e..d55ff92 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -1402,7 +1402,7 @@
class TestMaxNodesPerJob(AnsibleZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
- def test_max_nodes_reached(self):
+ def test_max_timeout_exceeded(self):
in_repo_conf = textwrap.dedent(
"""
- job:
@@ -1437,6 +1437,32 @@
"B should not fail because of nodes limit")
+class TestMaxTimeout(AnsibleZuulTestCase):
+ tenant_config_file = 'config/multi-tenant/main.yaml'
+
+ def test_max_nodes_reached(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: test-job
+ timeout: 3600
+ """)
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertIn('The job "test-job" exceeds tenant max-job-timeout',
+ A.messages[0], "A should fail because of timeout limit")
+
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertNotIn("exceeds tenant max-job-timeout", B.messages[0],
+ "B should not fail because of timeout limit")
+
+
class TestBaseJobs(ZuulTestCase):
tenant_config_file = 'config/base-jobs/main.yaml'
diff --git a/tools/run-migration.sh b/tools/run-migration.sh
new file mode 100755
index 0000000..6c7e250
--- /dev/null
+++ b/tools/run-migration.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stupid script I'm using to test migration script locally
+# Assumes project-config is adjacent to zuul and has the mapping file
+
+BASE_DIR=$(cd $(dirname $0)/../..; pwd)
+cd $BASE_DIR/project-config
+python3 $BASE_DIR/zuul/zuul/cmd/migrate.py --mapping=zuul/mapping.yaml \
+ zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml .
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 06ef0ba..63c621d 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -82,7 +82,7 @@
self.log.info("Starting log streamer")
streamer = zuul.lib.log_streamer.LogStreamer(
- self.user, '0.0.0.0', self.finger_port, self.job_dir)
+ self.user, '::', self.finger_port, self.job_dir)
# Keep running until the parent dies:
pipe_read = os.fdopen(pipe_read)
diff --git a/zuul/cmd/migrate.py b/zuul/cmd/migrate.py
new file mode 100644
index 0000000..2f4a279
--- /dev/null
+++ b/zuul/cmd/migrate.py
@@ -0,0 +1,775 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# TODO(mordred):
+# * Read and apply filters from the jobs: section
+# * Figure out shared job queues
+# * Emit job definitions
+# * figure out from builders whether or not it's a normal job or a
+# a devstack-legacy job
+# * Handle emitting arbitrary tox jobs (see tox-py27dj18)
+
+import argparse
+import collections
+import copy
+import itertools
+import logging
+import os
+import re
+from typing import Any, Dict, List, Optional # flake8: noqa
+
+import jenkins_jobs.builder
+from jenkins_jobs.formatter import deep_format
+import jenkins_jobs.formatter
+import jenkins_jobs.parser
+import yaml
+
+DESCRIPTION = """Migrate zuul v2 and Jenkins Job Builder to Zuul v3.
+
+This program takes a zuul v2 layout.yaml and a collection of Jenkins Job
+Builder job definitions and transforms them into a Zuul v3 config. An
+optional mapping config can be given that defines how to map old jobs
+to new jobs.
+"""
+def project_representer(dumper, data):
+ return dumper.represent_mapping('tag:yaml.org,2002:map',
+ data.items())
+
+
+def construct_yaml_map(self, node):
+ data = collections.OrderedDict()
+ yield data
+ value = self.construct_mapping(node)
+
+ if isinstance(node, yaml.MappingNode):
+ self.flatten_mapping(node)
+ else:
+ raise yaml.constructor.ConstructorError(
+ None, None,
+ 'expected a mapping node, but found %s' % node.id,
+ node.start_mark)
+
+ mapping = collections.OrderedDict()
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=False)
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise yaml.constructor.ConstructorError(
+ 'while constructing a mapping', node.start_mark,
+ 'found unacceptable key (%s)' % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=False)
+ mapping[key] = value
+ data.update(mapping)
+
+
+class IndentedEmitter(yaml.emitter.Emitter):
+ def expect_block_sequence(self):
+ self.increase_indent(flow=False, indentless=False)
+ self.state = self.expect_first_block_sequence_item
+
+
+class IndentedDumper(IndentedEmitter, yaml.serializer.Serializer,
+ yaml.representer.Representer, yaml.resolver.Resolver):
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ IndentedEmitter.__init__(
+ self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break)
+ yaml.serializer.Serializer.__init__(
+ self, encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version, tags=tags)
+ yaml.representer.Representer.__init__(
+ self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ yaml.resolver.Resolver.__init__(self)
+
+
+def ordered_load(stream, *args, **kwargs):
+ return yaml.load(stream=stream, *args, **kwargs)
+
+def ordered_dump(data, stream=None, *args, **kwargs):
+ return yaml.dump(data, stream=stream, default_flow_style=False,
+ Dumper=IndentedDumper, width=80, *args, **kwargs)
+
+def get_single_key(var):
+ if isinstance(var, str):
+ return var
+ elif isinstance(var, list):
+ return var[0]
+ return list(var.keys())[0]
+
+
+def has_single_key(var):
+ if isinstance(var, list):
+ return len(var) == 1
+ if isinstance(var, str):
+ return True
+ dict_keys = list(var.keys())
+ if len(dict_keys) != 1:
+ return False
+ if var[get_single_key(from_dict)]:
+ return False
+ return True
+
+
+def combination_matches(combination, match_combinations):
+ """
+ Checks if the given combination is matches for any of the given combination
+ globs, being those a set of combinations where if a key is missing, it's
+ considered matching
+
+ (key1=2, key2=3)
+
+ would match the combination match:
+ (key2=3)
+
+ but not:
+ (key1=2, key2=2)
+ """
+ for cmatch in match_combinations:
+ for key, val in combination.items():
+ if cmatch.get(key, val) != val:
+ break
+ else:
+ return True
+ return False
+
+
+def expandYamlForTemplateJob(self, project, template, jobs_glob=None):
+ dimensions = []
+ template_name = template['name']
+ orig_template = copy.deepcopy(template)
+
+ # reject keys that are not useful during yaml expansion
+ for k in ['jobs']:
+ project.pop(k)
+ excludes = project.pop('exclude', [])
+ for (k, v) in project.items():
+ tmpk = '{{{0}}}'.format(k)
+ if tmpk not in template_name:
+ continue
+ if type(v) == list:
+ dimensions.append(zip([k] * len(v), v))
+ # XXX somewhat hackish to ensure we actually have a single
+ # pass through the loop
+ if len(dimensions) == 0:
+ dimensions = [(("", ""),)]
+
+ for values in itertools.product(*dimensions):
+ params = copy.deepcopy(project)
+ params = self.applyDefaults(params, template)
+
+ expanded_values = {}
+ for (k, v) in values:
+ if isinstance(v, dict):
+ inner_key = next(iter(v))
+ expanded_values[k] = inner_key
+ expanded_values.update(v[inner_key])
+ else:
+ expanded_values[k] = v
+
+ params.update(expanded_values)
+ params = deep_format(params, params)
+ if combination_matches(params, excludes):
+ log = logging.getLogger("zuul.Migrate.YamlParser")
+ log.debug('Excluding combination %s', str(params))
+ continue
+
+ allow_empty_variables = self.config \
+ and self.config.has_section('job_builder') \
+ and self.config.has_option(
+ 'job_builder', 'allow_empty_variables') \
+ and self.config.getboolean(
+ 'job_builder', 'allow_empty_variables')
+
+ for key in template.keys():
+ if key not in params:
+ params[key] = template[key]
+
+ params['template-name'] = template_name
+ expanded = deep_format(template, params, allow_empty_variables)
+
+ job_name = expanded.get('name')
+ if jobs_glob and not matches(job_name, jobs_glob):
+ continue
+
+ self.formatDescription(expanded)
+ expanded['orig_template'] = orig_template
+ expanded['template_name'] = template_name
+ self.jobs.append(expanded)
+
+
+jenkins_jobs.parser.YamlParser.expandYamlForTemplateJob = expandYamlForTemplateJob
+
+
+class JJB(jenkins_jobs.builder.Builder):
+ def __init__(self):
+ self.global_config = None
+ self._plugins_list = []
+
+ def expandComponent(self, component_type, component, template_data):
+ component_list_type = component_type + 's'
+ new_components = []
+ if isinstance(component, dict):
+ name, component_data = next(iter(component.items()))
+ if template_data:
+ component_data = jenkins_jobs.formatter.deep_format(
+ component_data, template_data, True)
+ else:
+ name = component
+ component_data = {}
+
+ new_component = self.parser.data.get(component_type, {}).get(name)
+ if new_component:
+ for new_sub_component in new_component[component_list_type]:
+ new_components.extend(
+ self.expandComponent(component_type,
+ new_sub_component, component_data))
+ else:
+ new_components.append({name: component_data})
+ return new_components
+
+ def expandMacros(self, job):
+ for component_type in ['builder', 'publisher', 'wrapper']:
+ component_list_type = component_type + 's'
+ new_components = []
+ for new_component in job.get(component_list_type, []):
+ new_components.extend(self.expandComponent(component_type,
+ new_component, {}))
+ job[component_list_type] = new_components
+
+
+class OldProject:
+ def __init__(self, name, gate_jobs):
+ self.name = name
+ self.gate_jobs = gate_jobs
+
+
+class OldJob:
+ def __init__(self, name):
+ self.name = name
+ self.queue_name = None
+
+ def __repr__(self):
+ return self.name
+
+
+class Job:
+
+ def __init__(self,
+ orig: str,
+ name: str=None,
+ content: Dict[str, Any]=None,
+ vars: Dict[str, str]=None,
+ required_projects: List[str]=None,
+ nodes: List[str]=None,
+ parent=None) -> None:
+ self.orig = orig
+ self.voting = True
+ self.name = name
+ self.content = content.copy() if content else None
+ self.vars = vars or {}
+ self.required_projects = required_projects or []
+ self.nodes = nodes or []
+ self.parent = parent
+ self.branch = None
+
+ if self.content and not self.name:
+ self.name = get_single_key(content)
+ if not self.name:
+ self.name = self.orig
+ self.name = self.name.replace('-{name}', '').replace('{name}-', '')
+ if self.orig.endswith('-nv'):
+ self.voting = False
+ if self.name.endswith('-nv'):
+ # NOTE(mordred) This MIGHT not be safe - it's possible, although
+ # silly, for someone to have -nv and normal versions of the same
+ # job in the same pipeline. Let's deal with that if we find it
+ # though.
+ self.name = self.name.replace('-nv', '')
+
+ def _stripNodeName(self, node):
+ node_key = '-{node}'.format(node=node)
+ self.name = self.name.replace(node_key, '')
+
+ def setVars(self, vars):
+ self.vars = vars
+
+ def setRequiredProjects(self, required_projects):
+ self.required_projects = required_projects
+
+ def setParent(self, parent):
+ self.parent = parent
+
+ def extractNode(self, default_node, labels):
+ matching_label = None
+ for label in labels:
+ if label in self.orig:
+ if not matching_label:
+ matching_label = label
+ elif len(label) > len(matching_label):
+ matching_label = label
+
+ if matching_label:
+ if matching_label == default_node:
+ self._stripNodeName(matching_label)
+ else:
+ self.nodes.append(matching_label)
+
+ def getDepends(self):
+ return [self.parent.name]
+
+ def getNodes(self):
+ return self.nodes
+
+ def toDict(self):
+ if self.content:
+ output = self.content
+ else:
+ output = collections.OrderedDict()
+ output[self.name] = collections.OrderedDict()
+
+ if self.parent:
+ output[self.name].setdefault('dependencies', self.getDepends())
+
+ if not self.voting:
+ output[self.name].setdefault('voting', False)
+
+ if self.nodes:
+ output[self.name].setdefault('nodes', self.getNodes())
+
+ if self.required_projects:
+ output[self.name].setdefault(
+ 'required-projects', self.required_projects)
+
+ if self.vars:
+ job_vars = output[self.name].get('vars', collections.OrderedDict())
+ job_vars.update(self.vars)
+
+ if self.branch:
+ output[self.name]['branch'] = self.branch
+
+ if not output[self.name]:
+ return self.name
+
+ return output
+
+
+class JobMapping:
+ log = logging.getLogger("zuul.Migrate.JobMapping")
+
+ def __init__(self, nodepool_config, layout, mapping_file=None):
+ self.layout = layout
+ self.job_direct = {}
+ self.labels = []
+ self.job_mapping = []
+ self.template_mapping = {}
+ nodepool_data = ordered_load(open(nodepool_config, 'r'))
+ for label in nodepool_data['labels']:
+ self.labels.append(label['name'])
+ if not mapping_file:
+ self.default_node = 'ubuntu-xenial'
+ else:
+ mapping_data = ordered_load(open(mapping_file, 'r'))
+ self.default_node = mapping_data['default-node']
+ for map_info in mapping_data.get('job-mapping', []):
+ if map_info['old'].startswith('^'):
+ map_info['pattern'] = re.compile(map_info['old'])
+ self.job_mapping.append(map_info)
+ else:
+ self.job_direct[map_info['old']] = map_info['new']
+
+ for map_info in mapping_data.get('template-mapping', []):
+ self.template_mapping[map_info['old']] = map_info['new']
+
+ def makeNewName(self, new_name, match_dict):
+ return new_name.format(**match_dict)
+
+ def hasProjectTemplate(self, old_name):
+ return old_name in self.template_mapping
+
+ def getNewTemplateName(self, old_name):
+ return self.template_mapping.get(old_name, old_name)
+
+ def mapNewJob(self, name, info) -> Optional[Job]:
+ matches = info['pattern'].search(name)
+ if not matches:
+ return None
+ match_dict = matches.groupdict()
+ if isinstance(info['new'], dict):
+ job = Job(orig=name, content=info['new'])
+ else:
+ job = Job(orig=name, name=info['new'].format(**match_dict))
+
+ if 'vars' in info:
+ job.setVars(self._expandVars(info, match_dict))
+
+ if 'required-projects' in info:
+ job.setRequiredProjects(
+ self._expandRequiredProjects(info, match_dict))
+
+ return job
+
+ def _expandVars(self, info, match_dict):
+ job_vars = info['vars'].copy()
+ for key in job_vars.keys():
+ job_vars[key] = job_vars[key].format(**match_dict)
+ return job_vars
+
+ def _expandRequiredProjects(self, info, match_dict):
+ required_projects = []
+ job_projects = info['required-projects'].copy()
+ for project in job_projects:
+ required_projects.append(project.format(**match_dict))
+ return required_projects
+
+ def getNewJob(self, job_name, remove_gate):
+ if job_name in self.job_direct:
+ if isinstance(self.job_direct[job_name], dict):
+ return Job(job_name, content=self.job_direct[job_name])
+ else:
+ return Job(job_name, name=self.job_direct[job_name])
+
+ new_job = None
+ for map_info in self.job_mapping:
+ new_job = self.mapNewJob(job_name, map_info)
+ if new_job:
+ break
+ if not new_job:
+ orig_name = job_name
+ if remove_gate:
+ job_name = job_name.replace('gate-', '', 1)
+ job_name = 'legacy-{job_name}'.format(job_name=job_name)
+ new_job = Job(orig=orig_name, name=job_name)
+
+ new_job.extractNode(self.default_node, self.labels)
+
+ # Handle matchers
+ for layout_job in self.layout.get('jobs', []):
+ if re.search(layout_job['name'], new_job.orig):
+ if not layout_job.get('voting', True):
+ new_job.voting = False
+ if layout_job.get('branch'):
+ new_job.branch = layout_job['branch']
+
+ return new_job
+
+class ChangeQueue:
+ def __init__(self):
+ self.name = ''
+ self.assigned_name = None
+ self.generated_name = None
+ self.projects = []
+ self._jobs = set()
+
+ def getJobs(self):
+ return self._jobs
+
+ def getProjects(self):
+ return [p.name for p in self.projects]
+
+ def addProject(self, project):
+ if project not in self.projects:
+ self.projects.append(project)
+ self._jobs |= project.gate_jobs
+
+ names = [x.name for x in self.projects]
+ names.sort()
+ self.generated_name = names[0].split('/')[-1]
+
+ for job in self._jobs:
+ if job.queue_name:
+ if (self.assigned_name and
+ job.queue_name != self.assigned_name):
+ raise Exception("More than one name assigned to "
+ "change queue: %s != %s" %
+ (self.assigned_name,
+ job.queue_name))
+ self.assigned_name = job.queue_name
+ self.name = self.assigned_name or self.generated_name
+
+ def mergeChangeQueue(self, other):
+ for project in other.projects:
+ self.addProject(project)
+
+class ZuulMigrate:
+
+ log = logging.getLogger("zuul.Migrate")
+
+ def __init__(self, layout, job_config, nodepool_config,
+ outdir, mapping, move):
+ self.layout = ordered_load(open(layout, 'r'))
+ self.job_config = job_config
+ self.outdir = outdir
+ self.mapping = JobMapping(nodepool_config, self.layout, mapping)
+ self.move = move
+
+ self.jobs = {}
+ self.old_jobs = {}
+
+ def run(self):
+ self.loadJobs()
+ self.buildChangeQueues()
+ self.convertJobs()
+ self.writeJobs()
+
+ def loadJobs(self):
+ self.log.debug("Loading jobs")
+ builder = JJB()
+ builder.load_files([self.job_config])
+ builder.parser.expandYaml()
+ unseen = set(self.jobs.keys())
+ for job in builder.parser.jobs:
+ builder.expandMacros(job)
+ self.jobs[job['name']] = job
+ unseen.discard(job['name'])
+ for name in unseen:
+ del self.jobs[name]
+
+ def getOldJob(self, name):
+ if name not in self.old_jobs:
+ self.old_jobs[name] = OldJob(name)
+ return self.old_jobs[name]
+
+ def flattenOldJobs(self, tree, name=None):
+ if isinstance(tree, str):
+ n = tree.format(name=name)
+ return [self.getOldJob(n)]
+
+ new_list = [] # type: ignore
+ if isinstance(tree, list):
+ for job in tree:
+ new_list.extend(self.flattenOldJobs(job, name))
+ elif isinstance(tree, dict):
+ parent_name = get_single_key(tree)
+ jobs = self.flattenOldJobs(tree[parent_name], name)
+ for job in jobs:
+ new_list.append(self.getOldJob(job))
+ new_list.append(self.getOldJob(parent_name))
+ return new_list
+
+ def buildChangeQueues(self):
+ self.log.debug("Building shared change queues")
+
+ for j in self.layout['jobs']:
+ if '^' in j['name'] or '$' in j['name']:
+ continue
+ job = self.getOldJob(j['name'])
+ job.queue_name = j.get('queue-name')
+
+ change_queues = []
+
+ for project in self.layout.get('projects'):
+ if 'gate' not in project:
+ continue
+ gate_jobs = set()
+ for template in project['template']:
+ for pt in self.layout.get('project-templates'):
+ if pt['name'] != template['name']:
+ continue
+ if 'gate' not in pt['name']:
+ continue
+ gate_jobs |= set(self.flattenOldJobs(pt['gate'], project['name']))
+ gate_jobs |= set(self.flattenOldJobs(project['gate']))
+ old_project = OldProject(project['name'], gate_jobs)
+ change_queue = ChangeQueue()
+ change_queue.addProject(old_project)
+ change_queues.append(change_queue)
+ self.log.debug("Created queue: %s" % change_queue)
+
+ # Iterate over all queues trying to combine them, and keep doing
+ # so until they can not be combined further.
+ last_change_queues = change_queues
+ while True:
+ new_change_queues = self.combineChangeQueues(last_change_queues)
+ if len(last_change_queues) == len(new_change_queues):
+ break
+ last_change_queues = new_change_queues
+
+ self.log.debug(" Shared change queues:")
+ for queue in new_change_queues:
+ self.log.debug(" %s containing %s" % (
+ queue, queue.generated_name))
+ self.change_queues = new_change_queues
+
+ def combineChangeQueues(self, change_queues):
+ self.log.debug("Combining shared queues")
+ new_change_queues = []
+ for a in change_queues:
+ merged_a = False
+ for b in new_change_queues:
+ if not a.getJobs().isdisjoint(b.getJobs()):
+ self.log.debug("Merging queue %s into %s" % (a, b))
+ b.mergeChangeQueue(a)
+ merged_a = True
+ break # this breaks out of 'for b' and continues 'for a'
+ if not merged_a:
+ self.log.debug("Keeping queue %s" % (a))
+ new_change_queues.append(a)
+ return new_change_queues
+
+ def convertJobs(self):
+ pass
+
+ def setupDir(self):
+ zuul_yaml = os.path.join(self.outdir, 'zuul.yaml')
+ zuul_d = os.path.join(self.outdir, 'zuul.d')
+ orig = os.path.join(zuul_d, '01zuul.yaml')
+ outfile = os.path.join(zuul_d, '99converted.yaml')
+ if not os.path.exists(self.outdir):
+ os.makedirs(self.outdir)
+ if not os.path.exists(zuul_d):
+ os.makedirs(zuul_d)
+ if os.path.exists(zuul_yaml) and self.move:
+ os.rename(zuul_yaml, orig)
+ return outfile
+
+ def makeNewJobs(self, old_job, parent: Job=None):
+ self.log.debug("makeNewJobs(%s)", old_job)
+ if isinstance(old_job, str):
+ remove_gate = True
+ if old_job.startswith('gate-'):
+ # Check to see if gate- and bare versions exist
+ if old_job.replace('gate-', '', 1) in self.jobs:
+ remove_gate = False
+ job = self.mapping.getNewJob(old_job, remove_gate)
+ if parent:
+ job.setParent(parent)
+ return [job]
+
+ new_list = [] # type: ignore
+ if isinstance(old_job, list):
+ for job in old_job:
+ new_list.extend(self.makeNewJobs(job, parent=parent))
+
+ elif isinstance(old_job, dict):
+ parent_name = get_single_key(old_job)
+ parent = Job(orig=parent_name, parent=parent)
+
+ jobs = self.makeNewJobs(old_job[parent_name], parent=parent)
+ for job in jobs:
+ new_list.append(job)
+ new_list.append(parent)
+ return new_list
+
+ def writeProjectTemplate(self, template):
+ new_template = collections.OrderedDict()
+ if 'name' in template:
+ new_template['name'] = template['name']
+ for key, value in template.items():
+ if key == 'name':
+ continue
+ jobs = [job.toDict() for job in self.makeNewJobs(value)]
+ new_template[key] = dict(jobs=jobs)
+
+ return new_template
+
+ def writeProject(self, project):
+ new_project = collections.OrderedDict()
+ if 'name' in project:
+ new_project['name'] = project['name']
+ if 'template' in project:
+ new_project['template'] = []
+ for template in project['template']:
+ new_project['template'].append(dict(
+ name=self.mapping.getNewTemplateName(template['name'])))
+ for key, value in project.items():
+ if key in ('name', 'template'):
+ continue
+ else:
+ new_project[key] = collections.OrderedDict()
+ if key == 'gate':
+ for queue in self.change_queues:
+ if project['name'] not in queue.getProjects():
+ continue
+ if len(queue.getProjects()) == 1:
+ continue
+ new_project[key]['queue'] = queue.name
+ jobs = [job.toDict() for job in self.makeNewJobs(value)]
+ new_project[key]['jobs'] = jobs
+
+ return new_project
+
+ def writeJobs(self):
+ outfile = self.setupDir()
+ config = []
+
+ for template in self.layout.get('project-templates', []):
+ self.log.debug("Processing template: %s", template)
+ if not self.mapping.hasProjectTemplate(template['name']):
+ config.append(
+ {'project-template': self.writeProjectTemplate(template)})
+
+ for project in self.layout.get('projects', []):
+ config.append(
+ {'project': self.writeProject(project)})
+
+ with open(outfile, 'w') as yamlout:
+ # Insert an extra space between top-level list items
+ yamlout.write(ordered_dump(config).replace('\n-', '\n\n-'))
+
+
+def main():
+ yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
+ construct_yaml_map)
+
+ yaml.add_representer(collections.OrderedDict, project_representer,
+ Dumper=IndentedDumper)
+
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+ parser.add_argument(
+ 'layout',
+ help="The Zuul v2 layout.yaml file to read.")
+ parser.add_argument(
+ 'job_config',
+ help="Directory containing Jenkins Job Builder job definitions.")
+ parser.add_argument(
+ 'nodepool_config',
+ help="Nodepool config file containing complete set of node names")
+ parser.add_argument(
+ 'outdir',
+ help="A directory into which the Zuul v3 config will be written.")
+ parser.add_argument(
+ '--mapping',
+ default=None,
+ help="A filename with a yaml mapping of old name to new name.")
+ parser.add_argument(
+ '-v', dest='verbose', action='store_true', help='verbose output')
+ parser.add_argument(
+ '-m', dest='move', action='store_true',
+ help='Move zuul.yaml to zuul.d if it exists')
+
+ args = parser.parse_args()
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ ZuulMigrate(args.layout, args.job_config, args.nodepool_config,
+ args.outdir, args.mapping, args.move).run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index a923fca..13fc310 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -76,6 +76,15 @@
super(MaxNodeError, self).__init__(message)
+class MaxTimeoutError(Exception):
+ def __init__(self, job, tenant):
+ message = textwrap.dedent("""\
+ The job "{job}" exceeds tenant max-job-timeout {maxtimeout}.""")
+ message = textwrap.fill(message.format(
+ job=job.name, maxtimeout=tenant.max_job_timeout))
+ super(MaxTimeoutError, self).__init__(message)
+
+
class DuplicateGroupError(Exception):
def __init__(self, nodeset, group):
message = textwrap.dedent("""\
@@ -505,6 +514,10 @@
if secrets and not conf['_source_context'].trusted:
job.post_review = True
+ if conf.get('timeout') and tenant.max_job_timeout != -1 and \
+ int(conf['timeout']) > tenant.max_job_timeout:
+ raise MaxTimeoutError(job, tenant)
+
if 'post-review' in conf:
if conf['post-review']:
job.post_review = True
@@ -1059,6 +1072,7 @@
def getSchema(connections=None):
tenant = {vs.Required('name'): str,
'max-nodes-per-job': int,
+ 'max-job-timeout': int,
'source': TenantParser.validateTenantSources(connections),
'exclude-unprotected-branches': bool,
'default-parent': str,
@@ -1072,6 +1086,8 @@
tenant = model.Tenant(conf['name'])
if conf.get('max-nodes-per-job') is not None:
tenant.max_nodes_per_job = conf['max-nodes-per-job']
+ if conf.get('max-job-timeout') is not None:
+ tenant.max_job_timeout = int(conf['max-job-timeout'])
if conf.get('exclude-unprotected-branches') is not None:
tenant.exclude_unprotected_branches = \
conf['exclude-unprotected-branches']
@@ -1342,6 +1358,8 @@
continue
TenantParser.log.debug("Waiting for cat job %s" % (job,))
job.wait()
+ if not job.updated:
+ raise Exception("Cat job %s failed" % (job,))
TenantParser.log.debug("Cat job %s got files %s" %
(job, job.files))
loaded = False
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index fca36c8..3d0eb37 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -553,13 +553,10 @@
def getGithubClient(self,
project=None,
- user_id=None,
- use_app=True):
+ user_id=None):
# if you're authenticating for a project and you're an integration then
- # you need to use the installation specific token. There are some
- # operations that are not yet supported by integrations so
- # use_app lets you use api_key auth.
- if use_app and project and self.app_id:
+ # you need to use the installation specific token.
+ if project and self.app_id:
github = self._createGithubClient()
github.login(token=self._get_installation_key(project, user_id))
github._zuul_project = project
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 03fcb4a..62b9716 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -203,7 +203,7 @@
except OSError:
self.log.exception(
'Problem sending SIGTERM to agent {}'.format(self.env))
- self.log.info('Sent SIGTERM to SSH Agent, {}'.format(self.env))
+ self.log.debug('Sent SIGTERM to SSH Agent, {}'.format(self.env))
self.env = {}
def add(self, key_path):
@@ -1618,7 +1618,8 @@
now=datetime.datetime.now()))
for line in syntax_buffer:
job_output.write("{now} | {line}\n".format(
- now=datetime.datetime.now(), line=line))
+ now=datetime.datetime.now(),
+ line=line.decode('utf-8').rstrip()))
return (self.RESULT_NORMAL, ret)
@@ -1634,7 +1635,7 @@
cmd.extend(['-e', '@' + playbook.secrets])
if success is not None:
- cmd.extend(['-e', 'success=%s' % str(bool(success))])
+ cmd.extend(['-e', 'zuul_success=%s' % str(bool(success))])
if phase:
cmd.extend(['-e', 'zuul_execution_phase=%s' % phase])
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index 5191a44..2614e58 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -134,18 +134,18 @@
def onBuildCompleted(self, job):
data = getJobData(job)
merged = data.get('merged', False)
- updated = data.get('updated', False)
+ job.updated = data.get('updated', False)
commit = data.get('commit')
files = data.get('files', {})
repo_state = data.get('repo_state', {})
job.files = files
self.log.info("Merge %s complete, merged: %s, updated: %s, "
"commit: %s" %
- (job, merged, updated, commit))
+ (job, merged, job.updated, commit))
job.setComplete()
if job.build_set:
self.sched.onMergeCompleted(job.build_set,
- merged, updated, commit, files,
+ merged, job.updated, commit, files,
repo_state)
# The test suite expects the job to be removed from the
# internal account after the wake flag is set.
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index fc599c1..881209d 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -111,7 +111,7 @@
def refstate(self, job):
args = json.loads(job.arguments)
- success, repo_state = self.merger.getItemRepoState(args['items'])
+ success, repo_state = self.merger.getRepoState(args['items'])
result = dict(updated=success,
repo_state=repo_state)
job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/model.py b/zuul/model.py
index 1ef8d3a..0e42368 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -2506,6 +2506,7 @@
def __init__(self, name):
self.name = name
self.max_nodes_per_job = 5
+ self.max_job_timeout = 10800
self.exclude_unprotected_branches = False
self.default_base_job = None
self.layout = None
@@ -2667,20 +2668,30 @@
class TimeDataBase(object):
def __init__(self, root):
self.root = root
- self.jobs = {}
- def _getTD(self, name):
- td = self.jobs.get(name)
- if not td:
- td = JobTimeData(os.path.join(self.root, name))
- self.jobs[name] = td
- td.load()
+ def _getTD(self, build):
+ if hasattr(build.build_set.item.change, 'branch'):
+ branch = build.build_set.item.change.branch
+ else:
+ branch = ''
+
+ dir_path = os.path.join(
+ self.root,
+ build.build_set.item.pipeline.layout.tenant.name,
+ build.build_set.item.change.project.canonical_name,
+ branch)
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+ path = os.path.join(dir_path, build.job.name)
+
+ td = JobTimeData(path)
+ td.load()
return td
def getEstimatedTime(self, name):
return self._getTD(name).getEstimatedTime()
- def update(self, name, elapsed, result):
- td = self._getTD(name)
+ def update(self, build, elapsed, result):
+ td = self._getTD(build)
td.add(elapsed, result)
td.save()
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 52b34ec..806ba86 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -826,7 +826,7 @@
return
try:
build.estimated_time = float(self.time_database.getEstimatedTime(
- build.job.name))
+ build))
except Exception:
self.log.exception("Exception estimating build time:")
pipeline.manager.onBuildStarted(event.build)
@@ -865,8 +865,7 @@
if build.end_time and build.start_time and build.result:
duration = build.end_time - build.start_time
try:
- self.time_database.update(
- build.job.name, duration, build.result)
+ self.time_database.update(build, duration, build.result)
except Exception:
self.log.exception("Exception recording build time:")
pipeline.manager.onBuildCompleted(event.build)