Merge "Print SIGTERM logging to debug" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index b16e4a8..e96bc5a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -19,13 +19,25 @@
nodes: zuul-functional
pre-run: playbooks/zuul-stream/pre
run: playbooks/zuul-stream/functional
- post-run: playbooks/zuul-stream/post
+ post-run:
+ - playbooks/zuul-stream/post
+ - playbooks/zuul-stream/post-ara
required-projects:
- openstack/ara
files:
- "zuul/ansible/callback/.*"
- "playbooks/zuul-stream/.*"
+- job:
+ name: zuul-migrate
+ parent: unittests
+ run: playbooks/zuul-migrate
+ # We're adding zuul to the required-projects so that we can also trigger
+ # this from project-config changes
+ required-projects:
+ - openstack-infra/project-config
+ - openstack-infra/zuul
+
- project:
name: openstack-infra/zuul
check:
@@ -36,6 +48,10 @@
- tox-pep8
- tox-py35
- zuul-stream-functional
+ - zuul-migrate:
+ files:
+ - zuul/cmd/migrate.py
+ - playbooks/zuul-migrate.yaml
gate:
jobs:
- tox-docs
@@ -43,7 +59,7 @@
- tox-py35
post:
jobs:
- - publish-openstack-python-docs:
+ - publish-openstack-python-docs-infra:
vars:
afs_publisher_target: 'infra/zuul'
- publish-openstack-python-branch-tarball
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 54bc10a..4722750 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -163,6 +163,11 @@
The maximum number of nodes a job can request. A value of
'-1' value removes the limit.
+ .. attr:: max-job-timeout
+ :default: 10800
+
+ The maximum timeout for jobs. A value of '-1' value removes the limit.
+
.. attr:: exclude-unprotected-branches
:default: false
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 837fb17..3d24f5d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -303,11 +303,24 @@
The identifier for the change.
+ .. var:: change_url
+
+ The URL to the source location of the given change.
+ E.g., `https://review.example.org/#/c/123456/` or
+ `https://github.com/example/example/pull/1234`.
+
.. var:: patchset
The patchset identifier for the change. If a change is
revised, this will have a different value.
+.. var:: zuul_success
+
+ Post run playbook(s) will be passed this variable to indicate if the run
+ phase of the job was successful or not. This variable is meant to be used
+ with the `boolean` filter.
+
+
Change Items
++++++++++++
@@ -332,6 +345,12 @@
The patchset identifier for the change. If a change is revised,
this will have a different value.
+ .. var:: change_url
+
+ The URL to the source location of the given change.
+ E.g., `https://review.example.org/#/c/123456/` or
+ `https://github.com/example/example/pull/1234`.
+
Branch Items
++++++++++++
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index c2cf279..1937cd5 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -554,14 +554,11 @@
}
$.each(changes, function (change_i, change) {
- // Only add a change when it has jobs
- if (change.jobs.length > 0) {
- var $change_box =
- format.change_with_status_tree(
- change, change_queue);
- $html.append($change_box);
- format.display_patchset($change_box);
- }
+ var $change_box =
+ format.change_with_status_tree(
+ change, change_queue);
+ $html.append($change_box);
+ format.display_patchset($change_box);
});
});
});
diff --git a/playbooks/zuul-migrate.yaml b/playbooks/zuul-migrate.yaml
new file mode 100644
index 0000000..66c7bd5
--- /dev/null
+++ b/playbooks/zuul-migrate.yaml
@@ -0,0 +1,18 @@
+- hosts: all
+ tasks:
+
+ - name: Install migration dependencies
+ command: "python3 -m pip install --user src/git.openstack.org/openstack-infra/zuul[migrate]"
+
+ - name: Migrate the data
+ command: "python3 ../zuul/zuul/cmd/migrate.py zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml . --mapping=zuul/mapping.yaml -v -m"
+ args:
+ chdir: src/git.openstack.org/openstack-infra/project-config
+
+ - name: Collect generated files
+ synchronize:
+ dest: "{{ zuul.executor.log_root }}"
+ mode: pull
+ src: "src/git.openstack.org/openstack-infra/project-config/zuul.d"
+ verify_host: true
+ no_log: true
diff --git a/playbooks/zuul-stream/fixtures/test-stream.yaml b/playbooks/zuul-stream/fixtures/test-stream.yaml
index 6a31ff8..fd28757 100644
--- a/playbooks/zuul-stream/fixtures/test-stream.yaml
+++ b/playbooks/zuul-stream/fixtures/test-stream.yaml
@@ -10,6 +10,10 @@
debug:
var: setupvar
+ - name: Output a debug sentence
+ debug:
+ msg: This is a debug message
+
- name: Run a shell task
command: ip addr show
diff --git a/playbooks/zuul-stream/functional.yaml b/playbooks/zuul-stream/functional.yaml
index 7b5b84f..6b67b05 100644
--- a/playbooks/zuul-stream/functional.yaml
+++ b/playbooks/zuul-stream/functional.yaml
@@ -22,8 +22,8 @@
- name: Validate output - shell task
shell: |
- egrep "^.*\| node1 \| link/loopback" job-output.txt
- egrep "^.*\| node2 \| link/loopback" job-output.txt
+ egrep "^.*\| node1 \| link/loopback" job-output.txt
+ egrep "^.*\| node2 \| link/loopback" job-output.txt
- name: Validate output - loop with items
shell: |
@@ -58,9 +58,3 @@
shell: |
egrep "^.+\| node1 \| OSError.+\/failure-itemloop\/" job-output.txt
egrep "^.+\| node2 \| OSError.+\/failure-itemloop\/" job-output.txt
-
- - name: Generate ARA html
- command: ara generate html ara-output
-
- - name: Compress ARA html
- command: gzip --recursive --best ara-output
diff --git a/playbooks/zuul-stream/post-ara.yaml b/playbooks/zuul-stream/post-ara.yaml
new file mode 100644
index 0000000..e666d21
--- /dev/null
+++ b/playbooks/zuul-stream/post-ara.yaml
@@ -0,0 +1,14 @@
+- hosts: controller
+ tasks:
+
+ - name: Generate ARA html
+ command: ara generate html ara-output
+
+ - name: Compress ARA html
+ command: gzip --recursive --best ara-output
+
+ - name: Fetch ARA files
+ synchronize:
+ src: "{{ ansible_user_dir }}/ara-output"
+ dest: "{{ zuul.executor.log_root }}/stream-files"
+ mode: pull
diff --git a/playbooks/zuul-stream/post.yaml b/playbooks/zuul-stream/post.yaml
index f3d4f9c..2c717a8 100644
--- a/playbooks/zuul-stream/post.yaml
+++ b/playbooks/zuul-stream/post.yaml
@@ -23,4 +23,3 @@
- ansible.cfg
- stream-job-output.txt
- job-output.json
- - ara-output
diff --git a/setup.cfg b/setup.cfg
index ce7a40e..63ff562 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,6 +27,7 @@
zuul-executor = zuul.cmd.executor:main
zuul-bwrap = zuul.driver.bubblewrap:main
zuul-web = zuul.cmd.web:main
+ zuul-migrate = zuul.cmd.migrate:main
[build_sphinx]
source-dir = doc/source
@@ -37,3 +38,5 @@
[extras]
mysql_reporter=
PyMySQL
+migrate=
+ jenkins-job-builder==1.6.2
diff --git a/tests/base.py b/tests/base.py
index c49e1ce..fcc5e84 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -195,9 +195,16 @@
if not large:
for fn, content in files.items():
fn = os.path.join(path, fn)
- with open(fn, 'w') as f:
- f.write(content)
- repo.index.add([fn])
+ if content is None:
+ os.unlink(fn)
+ repo.index.remove([fn])
+ else:
+ d = os.path.dirname(fn)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ with open(fn, 'w') as f:
+ f.write(content)
+ repo.index.add([fn])
else:
for fni in range(100):
fn = os.path.join(path, str(fni))
@@ -1188,7 +1195,7 @@
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self.sock.bind(('', 0))
self.port = self.sock.getsockname()[1]
self.wake_read, self.wake_write = os.pipe()
@@ -2138,6 +2145,7 @@
def getGithubConnection(driver, name, config):
con = FakeGithubConnection(driver, name, config,
upstream_root=self.upstream_root)
+ self.event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
diff --git a/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml b/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml b/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml
new file mode 100644
index 0000000..561fc39
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml
@@ -0,0 +1,46 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (tenant-one-gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+
+- job:
+ name: common-config-test
+
+- project:
+ name: org/project
+ check:
+ jobs:
+ - common-config-test
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml
new file mode 100644
index 0000000..280342c
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project-test1
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/README b/tests/fixtures/config/in-repo-join/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml b/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/in-repo-join/main.yaml b/tests/fixtures/config/in-repo-join/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
diff --git a/tests/fixtures/config/in-repo/git/common-config/zuul.yaml b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
index ff4268b..5623467 100644
--- a/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
@@ -78,6 +78,8 @@
- project:
name: common-config
+ check:
+ jobs: []
tenant-one-gate:
jobs:
- common-config-test
diff --git a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
index 60cd434..e1c27bb 100644
--- a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
@@ -3,6 +3,8 @@
- project:
name: org/project
+ check:
+ jobs: []
tenant-one-gate:
jobs:
- project-test1
diff --git a/tests/fixtures/config/multi-tenant/main.yaml b/tests/fixtures/config/multi-tenant/main.yaml
index 4916905..e667588 100644
--- a/tests/fixtures/config/multi-tenant/main.yaml
+++ b/tests/fixtures/config/multi-tenant/main.yaml
@@ -1,5 +1,6 @@
- tenant:
name: tenant-one
+ max-job-timeout: 1800
source:
gerrit:
config-projects:
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index a088236..ebb5e1c 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -17,6 +17,7 @@
from testtools.matchers import MatchesRegex, StartsWith
import urllib
import time
+from unittest import skip
import git
@@ -685,6 +686,8 @@
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
+ # TODO(jlk): Make this a more generic test for unknown project
+ @skip("Skipped for rewrite of webhook handler")
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_ping_event(self):
# Test valid ping
diff --git a/tests/unit/test_log_streamer.py b/tests/unit/test_log_streamer.py
index f47a8c8..c808540 100644
--- a/tests/unit/test_log_streamer.py
+++ b/tests/unit/test_log_streamer.py
@@ -22,6 +22,7 @@
import os.path
import socket
import tempfile
+import testtools
import threading
import time
@@ -34,7 +35,7 @@
def setUp(self):
super(TestLogStreamer, self).setUp()
- self.host = '0.0.0.0'
+ self.host = '::'
def startStreamer(self, port, root=None):
if not root:
@@ -46,16 +47,13 @@
streamer = self.startStreamer(port)
self.addCleanup(streamer.stop)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.addCleanup(s.close)
- self.assertEqual(0, s.connect_ex((self.host, port)))
+ s = socket.create_connection((self.host, port))
s.close()
streamer.stop()
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.addCleanup(s.close)
- self.assertNotEqual(0, s.connect_ex((self.host, port)))
+ with testtools.ExpectedException(ConnectionRefusedError):
+ s = socket.create_connection((self.host, port))
s.close()
@@ -66,7 +64,7 @@
def setUp(self):
super(TestStreaming, self).setUp()
- self.host = '0.0.0.0'
+ self.host = '::'
self.streamer = None
self.stop_streamer = False
self.streaming_data = ''
@@ -80,8 +78,7 @@
root = tempfile.gettempdir()
self.streamer = zuul.lib.log_streamer.LogStreamer(None, self.host,
port, root)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((self.host, port))
+ s = socket.create_connection((self.host, port))
self.addCleanup(s.close)
req = '%s\n' % build_uuid
@@ -161,7 +158,7 @@
def runWSClient(self, build_uuid, event):
async def client(loop, build_uuid, event):
- uri = 'http://127.0.0.1:9000/console-stream'
+ uri = 'http://[::1]:9000/console-stream'
try:
session = aiohttp.ClientSession(loop=loop)
async with session.ws_connect(uri) as ws:
@@ -226,7 +223,7 @@
# Start the web server
web_server = zuul.web.ZuulWeb(
- listen_address='127.0.0.1', listen_port=9000,
+ listen_address='::', listen_port=9000,
gear_server='127.0.0.1', gear_port=self.gearman_server.port)
loop = asyncio.new_event_loop()
loop.set_debug(True)
@@ -237,8 +234,11 @@
self.addCleanup(web_server.stop)
# Wait until web server is started
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
- while s.connect_ex((self.host, 9000)):
+ while True:
+ try:
+ with socket.create_connection((self.host, 9000)):
+ break
+ except ConnectionRefusedError:
time.sleep(0.1)
# Start a thread with the websocket client
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 9d695aa..d55ff92 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -371,55 +371,6 @@
dict(name='project-test1', result='SUCCESS', changes='1,2'),
dict(name='project-test2', result='SUCCESS', changes='1,2')])
- def test_dynamic_dependent_pipeline(self):
- # Test dynamically adding a project to a
- # dependent pipeline for the first time
- self.executor_server.hold_jobs_in_build = True
-
- tenant = self.sched.abide.tenants.get('tenant-one')
- gate_pipeline = tenant.layout.pipelines['gate']
-
- in_repo_conf = textwrap.dedent(
- """
- - job:
- name: project-test1
-
- - job:
- name: project-test2
-
- - project:
- name: org/project
- gate:
- jobs:
- - project-test2
- """)
-
- in_repo_playbook = textwrap.dedent(
- """
- - hosts: all
- tasks: []
- """)
-
- file_dict = {'.zuul.yaml': in_repo_conf,
- 'playbooks/project-test2.yaml': in_repo_playbook}
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
- files=file_dict)
- A.addApproval('Approved', 1)
- self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
- self.waitUntilSettled()
-
- items = gate_pipeline.getAllItems()
- self.assertEqual(items[0].change.number, '1')
- self.assertEqual(items[0].change.patchset, '1')
- self.assertTrue(items[0].live)
-
- self.executor_server.hold_jobs_in_build = False
- self.executor_server.release()
- self.waitUntilSettled()
-
- # Make sure the dynamic queue got cleaned up
- self.assertEqual(gate_pipeline.queues, [])
-
def test_in_repo_branch(self):
in_repo_conf = textwrap.dedent(
"""
@@ -544,6 +495,84 @@
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
])
+ def test_yaml_list_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ job: foo
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not a list', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_dict_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not a dictionary', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_key_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('has more than one key', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_unknown_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - foobar:
+ foo: bar
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not recognized', A.messages[0],
+ "A should have a syntax error reported")
+
def test_untrusted_syntax_error(self):
in_repo_conf = textwrap.dedent(
"""
@@ -775,6 +804,194 @@
# isn't this will raise an exception.
tenant.layout.getJob('project-test2')
+ def test_pipeline_error(self):
+ with open(os.path.join(FIXTURE_DIR,
+ 'config/in-repo/git/',
+ 'common-config/zuul.yaml')) as f:
+ base_common_config = f.read()
+
+ in_repo_conf_A = textwrap.dedent(
+ """
+ - pipeline:
+ name: periodic
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A}
+ A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('syntax error',
+ A.messages[0],
+ "A should have an error reported")
+
+ def test_change_series_error(self):
+ with open(os.path.join(FIXTURE_DIR,
+ 'config/in-repo/git/',
+ 'common-config/zuul.yaml')) as f:
+ base_common_config = f.read()
+
+ in_repo_conf_A = textwrap.dedent(
+ """
+ - pipeline:
+ name: periodic
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A}
+ A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
+ files=file_dict)
+
+ in_repo_conf_B = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A,
+ 'zuul.d/test2.yaml': in_repo_conf_B}
+ B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
+ files=file_dict)
+ B.setDependsOn(A, 1)
+ C = self.fake_gerrit.addFakeChange('common-config', 'master', 'C')
+ C.setDependsOn(B, 1)
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(C.reported, 1,
+ "C should report failure")
+ self.assertIn('depends on a change that failed to merge',
+ C.messages[0],
+ "C should have an error reported")
+
+
+class TestInRepoJoin(ZuulTestCase):
+ # In this config, org/project is not a member of any pipelines, so
+ # that we may test the changes that cause it to join them.
+
+ tenant_config_file = 'config/in-repo-join/main.yaml'
+
+ def test_dynamic_dependent_pipeline(self):
+ # Test dynamically adding a project to a
+ # dependent pipeline for the first time
+ self.executor_server.hold_jobs_in_build = True
+
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ gate_pipeline = tenant.layout.pipelines['gate']
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+
+ - job:
+ name: project-test2
+
+ - project:
+ name: org/project
+ gate:
+ jobs:
+ - project-test2
+ """)
+
+ in_repo_playbook = textwrap.dedent(
+ """
+ - hosts: all
+ tasks: []
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf,
+ 'playbooks/project-test2.yaml': in_repo_playbook}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ items = gate_pipeline.getAllItems()
+ self.assertEqual(items[0].change.number, '1')
+ self.assertEqual(items[0].change.patchset, '1')
+ self.assertTrue(items[0].live)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ # Make sure the dynamic queue got cleaned up
+ self.assertEqual(gate_pipeline.queues, [])
+
+ def test_dynamic_dependent_pipeline_failure(self):
+ # Test that a change behind a failing change adding a project
+ # to a dependent pipeline is dequeued.
+ self.executor_server.hold_jobs_in_build = True
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+
+ - project:
+ name: org/project
+ gate:
+ jobs:
+ - project-test1
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.executor_server.failJob('project-test1', A)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.orderedRelease()
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 2,
+ "A should report start and failure")
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.reported, 1,
+ "B should report start")
+ self.assertHistory([
+ dict(name='project-test1', result='FAILURE', changes='1,1'),
+ dict(name='project-test1', result='ABORTED', changes='1,1 2,1'),
+ ], ordered=False)
+
+ def test_dynamic_dependent_pipeline_absent(self):
+ # Test that a series of dependent changes don't report merge
+ # failures to a pipeline they aren't in.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setDependsOn(A, 1)
+
+ A.addApproval('Code-Review', 2)
+ A.addApproval('Approved', 1)
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 0,
+ "A should not report")
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.reported, 0,
+ "B should not report")
+ self.assertEqual(B.data['status'], 'NEW')
+ self.assertHistory([])
+
class TestAnsible(AnsibleZuulTestCase):
# A temporary class to hold new tests while others are disabled
@@ -1185,7 +1402,7 @@
class TestMaxNodesPerJob(AnsibleZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
- def test_max_nodes_reached(self):
+ def test_max_timeout_exceeded(self):
in_repo_conf = textwrap.dedent(
"""
- job:
@@ -1220,6 +1437,32 @@
"B should not fail because of nodes limit")
+class TestMaxTimeout(AnsibleZuulTestCase):
+ tenant_config_file = 'config/multi-tenant/main.yaml'
+
+ def test_max_nodes_reached(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: test-job
+ timeout: 3600
+ """)
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertIn('The job "test-job" exceeds tenant max-job-timeout',
+ A.messages[0], "A should fail because of timeout limit")
+
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertNotIn("exceeds tenant max-job-timeout", B.messages[0],
+ "B should not fail because of timeout limit")
+
+
class TestBaseJobs(ZuulTestCase):
tenant_config_file = 'config/base-jobs/main.yaml'
diff --git a/tools/run-migration.sh b/tools/run-migration.sh
new file mode 100755
index 0000000..6c7e250
--- /dev/null
+++ b/tools/run-migration.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stupid script I'm using to test migration script locally
+# Assumes project-config is adjacent to zuul and has the mapping file
+
+BASE_DIR=$(cd $(dirname $0)/../..; pwd)
+cd $BASE_DIR/project-config
+python3 $BASE_DIR/zuul/zuul/cmd/migrate.py --mapping=zuul/mapping.yaml \
+ zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml .
diff --git a/tools/zuul-cloner-shim.py b/tools/zuul-cloner-shim.py
new file mode 100755
index 0000000..3d1b2ae
--- /dev/null
+++ b/tools/zuul-cloner-shim.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# Copyright 2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import re
+import sys
+import yaml
+
+from collections import defaultdict
+from collections import OrderedDict
+
+REPO_SRC_DIR = "~zuul/src/git.openstack.org/"
+
+
+# Class copied from zuul/lib/conemapper.py with minor logging changes
+class CloneMapper(object):
+
+ def __init__(self, clonemap, projects):
+ self.clonemap = clonemap
+ self.projects = projects
+
+ def expand(self, workspace):
+ print("Workspace path set to: %s" % workspace)
+
+ is_valid = True
+ ret = OrderedDict()
+ errors = []
+ for project in self.projects:
+ dests = []
+ for mapping in self.clonemap:
+ if re.match(r'^%s$' % mapping['name'], project):
+ # Might be matched more than one time
+ dests.append(
+ re.sub(mapping['name'], mapping['dest'], project))
+
+ if len(dests) > 1:
+ errors.append(
+ "Duplicate destinations for %s: %s." % (project, dests))
+ is_valid = False
+ elif len(dests) == 0:
+ print("Using %s as destination (unmatched)" % project)
+ ret[project] = [project]
+ else:
+ ret[project] = dests
+
+ if not is_valid:
+ raise Exception("Expansion errors: %s" % errors)
+
+ print("Mapping projects to workspace...")
+ for project, dest in ret.items():
+ dest = os.path.normpath(os.path.join(workspace, dest[0]))
+ ret[project] = dest
+ print(" %s -> %s" % (project, dest))
+
+ print("Checking overlap in destination directories...")
+ check = defaultdict(list)
+ for project, dest in ret.items():
+ check[dest].append(project)
+
+ dupes = dict((d, p) for (d, p) in check.items() if len(p) > 1)
+ if dupes:
+ raise Exception("Some projects share the same destination: %s",
+ dupes)
+
+ print("Expansion completed.")
+ return ret
+
+
+def parseArgs():
+ ZUUL_ENV_SUFFIXES = ('branch', 'ref', 'url', 'project', 'newrev')
+
+ parser = argparse.ArgumentParser()
+
+ # Ignored arguments
+ parser.add_argument('-v', '--verbose', dest='verbose',
+ action='store_true', help='IGNORED')
+ parser.add_argument('--color', dest='color', action='store_true',
+ help='IGNORED')
+ parser.add_argument('--cache-dir', dest='cache_dir', help='IGNORED')
+ parser.add_argument('git_base_url', help='IGNORED')
+ parser.add_argument('--branch', help='IGNORED')
+ parser.add_argument('--project-branch', nargs=1, action='append',
+ metavar='PROJECT=BRANCH', help='IGNORED')
+ for zuul_suffix in ZUUL_ENV_SUFFIXES:
+ env_name = 'ZUUL_%s' % zuul_suffix.upper()
+ parser.add_argument(
+ '--zuul-%s' % zuul_suffix, metavar='$' + env_name,
+ help='IGNORED'
+ )
+
+ # Active arguments
+ parser.add_argument('-m', '--map', dest='clone_map_file',
+ help='specify clone map file')
+ parser.add_argument('--workspace', dest='workspace',
+ default=os.getcwd(),
+ help='where to clone repositories too')
+ parser.add_argument('projects', nargs='+',
+ help='list of Gerrit projects to clone')
+
+ return parser.parse_args()
+
+
+def readCloneMap(clone_map):
+ clone_map_file = os.path.expanduser(clone_map)
+ if not os.path.exists(clone_map_file):
+ raise Exception("Unable to read clone map file at %s." %
+ clone_map_file)
+ clone_map_file = open(clone_map_file)
+ clone_map = yaml.safe_load(clone_map_file).get('clonemap')
+ return clone_map
+
+
+def main():
+ args = parseArgs()
+
+ clone_map = []
+ if args.clone_map_file:
+ clone_map = readCloneMap(args.clone_map_file)
+
+ mapper = CloneMapper(clone_map, args.projects)
+ dests = mapper.expand(workspace=args.workspace)
+
+ for project in args.projects:
+ src = os.path.join(os.path.expanduser(REPO_SRC_DIR), project)
+ dst = dests[project]
+
+ # Remove the tail end of the path (since the copy operation will
+ # automatically create that)
+ d = dst.rstrip('/')
+ d, base = os.path.split(d)
+ if not os.path.exists(d):
+ print("Creating %s" % d)
+ os.makedirs(d)
+
+ # Create hard link copy of the source directory
+ cmd = "cp -al %s %s" % (src, dst)
+ print("%s" % cmd)
+ if os.system(cmd):
+ print("Error executing: %s" % cmd)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index 6aec86b..770a719 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -36,23 +36,6 @@
LOG_STREAM_PORT = 19885
-def linesplit(socket):
- buff = socket.recv(4096).decode("utf-8")
- buffering = True
- while buffering:
- if "\n" in buff:
- (line, buff) = buff.split("\n", 1)
- yield line + "\n"
- else:
- more = socket.recv(4096).decode("utf-8")
- if not more:
- buffering = False
- else:
- buff += more
- if buff:
- yield buff
-
-
def zuul_filter_result(result):
"""Remove keys from shell/command output.
@@ -122,6 +105,7 @@
self._logger = logging.getLogger('zuul.executor.ansible')
def _log(self, msg, ts=None, job=True, executor=False, debug=False):
+ msg = msg.rstrip()
if job:
now = ts or datetime.datetime.now()
self._logger.info("{now} | {msg}".format(now=now, msg=msg))
@@ -144,19 +128,36 @@
continue
msg = "%s\n" % log_id
s.send(msg.encode("utf-8"))
- for line in linesplit(s):
- if "[Zuul] Task exit code" in line:
- return
- elif self._streamers_stop and "[Zuul] Log not found" in line:
- return
- elif "[Zuul] Log not found" in line:
- # don't output this line
- pass
+ buff = s.recv(4096).decode("utf-8")
+ buffering = True
+ while buffering:
+ if "\n" in buff:
+ (line, buff) = buff.split("\n", 1)
+ done = self._log_streamline(host, line)
+ if done:
+ return
else:
- ts, ln = line.split(' | ', 1)
- ln = ln.strip()
+ more = s.recv(4096).decode("utf-8")
+ if not more:
+ buffering = False
+ else:
+ buff += more
+ if buff:
+ self._log_streamline(host, line)
- self._log("%s | %s " % (host, ln), ts=ts)
+ def _log_streamline(self, host, line):
+ if "[Zuul] Task exit code" in line:
+ return True
+ elif self._streamers_stop and "[Zuul] Log not found" in line:
+ return True
+ elif "[Zuul] Log not found" in line:
+ # don't output this line
+ return False
+ else:
+ ts, ln = line.split(' | ', 1)
+
+ self._log("%s | %s " % (host, ln), ts=ts)
+ return False
def v2_playbook_on_start(self, playbook):
self._playbook_name = os.path.splitext(playbook._file_name)[0]
@@ -201,10 +202,11 @@
msg = u"PLAY [{name}]".format(name=name)
self._log(msg)
- # Log an extra blank line to get space after each play
- self._log("")
def v2_playbook_on_task_start(self, task, is_conditional):
+ # Log an extra blank line to get space before each task
+ self._log("")
+
self._task = task
if self._play.strategy != 'free':
@@ -275,7 +277,7 @@
if is_localhost:
for line in stdout_lines:
hostname = self._get_hostname(result)
- self._log("%s | %s " % (hostname, line.strip()))
+ self._log("%s | %s " % (hostname, line))
def v2_runner_on_failed(self, result, ignore_errors=False):
result_dict = dict(result._result)
@@ -302,8 +304,6 @@
result=result, status='ERROR', result_dict=result_dict)
if ignore_errors:
self._log_message(result, "Ignoring Errors", status="ERROR")
- # Log an extra blank line to get space after each task
- self._log("")
def v2_runner_on_skipped(self, result):
if result._task.loop:
@@ -314,8 +314,6 @@
if reason:
# No reason means it's an item, which we'll log differently
self._log_message(result, status='skipping', msg=reason)
- # Log an extra blank line to get space after each skip
- self._log("")
def v2_runner_item_on_skipped(self, result):
reason = result._result.get('skip_reason')
@@ -376,9 +374,20 @@
for key in [k for k in result_dict.keys()
if k.startswith('_ansible')]:
del result_dict[key]
- self._log_message(
- msg=json.dumps(result_dict, indent=2, sort_keys=True),
- status=status, result=result)
+ keyname = next(iter(result_dict.keys()))
+ # If it has msg, that means it was like:
+ #
+ # debug:
+ # msg: Some debug text the user was looking for
+ #
+ # So we log it with self._log to get just the raw string the
+ # user provided.
+ if keyname == 'msg':
+ self._log(msg=result_dict['msg'])
+ else:
+ self._log_message(
+ msg=json.dumps(result_dict, indent=2, sort_keys=True),
+ status=status, result=result)
elif result._task.action not in ('command', 'shell'):
if 'msg' in result_dict:
self._log_message(msg=result_dict['msg'],
@@ -391,16 +400,14 @@
for res in result_dict['results']:
self._log_message(
result,
- "Runtime: {delta} Start: {start} End: {end}".format(**res))
+ "Runtime: {delta}".format(**res))
elif result_dict.get('msg') == 'All items completed':
self._log_message(result, result_dict['msg'])
else:
self._log_message(
result,
- "Runtime: {delta} Start: {start} End: {end}".format(
+ "Runtime: {delta}".format(
**result_dict))
- # Log an extra blank line to get space after each task
- self._log("")
def v2_runner_item_on_ok(self, result):
result_dict = dict(result._result)
@@ -430,12 +437,11 @@
if isinstance(result_dict['item'], str):
self._log_message(
result,
- "Item: {item} Runtime: {delta}"
- " Start: {start} End: {end}".format(**result_dict))
+ "Item: {item} Runtime: {delta}".format(**result_dict))
else:
self._log_message(
result,
- "Item: Runtime: {delta} Start: {start} End: {end}".format(
+ "Item: Runtime: {delta}".format(
**result_dict))
if self._deferred_result:
@@ -462,10 +468,11 @@
if self._deferred_result:
self._process_deferred(result)
- # Log an extra blank line to get space after each task
- self._log("")
def v2_playbook_on_stats(self, stats):
+ # Add a spacer line before the stats so that there will be a line
+ # between the last task and the recap
+ self._log("")
self._log("PLAY RECAP")
@@ -554,7 +561,7 @@
msg = result_dict['msg']
result_dict = None
if msg:
- msg_lines = msg.strip().split('\n')
+ msg_lines = msg.rstrip().split('\n')
if len(msg_lines) > 1:
self._log("{host} | {status}:".format(
host=hostname, status=status))
diff --git a/zuul/ansible/library/command.py b/zuul/ansible/library/command.py
index f701b48..0fc6129 100644
--- a/zuul/ansible/library/command.py
+++ b/zuul/ansible/library/command.py
@@ -159,9 +159,14 @@
# Jenkins format but with microsecond resolution instead of
# millisecond. It is kept so log parsing/formatting remains
# consistent.
- ts = datetime.datetime.now()
- outln = '%s | %s' % (ts, ln)
- self.logfile.write(outln.encode('utf-8'))
+ ts = str(datetime.datetime.now()).encode('utf-8')
+ if not isinstance(ln, bytes):
+ try:
+ ln = ln.encode('utf-8')
+ except Exception:
+ ln = repr(ln).encode('utf-8') + b'\n'
+ outln = b'%s | %s' % (ts, ln)
+ self.logfile.write(outln)
def follow(fd, log_uuid):
diff --git a/zuul/ansible/logconfig.py b/zuul/ansible/logconfig.py
index 7c3507b..7ef43a8 100644
--- a/zuul/ansible/logconfig.py
+++ b/zuul/ansible/logconfig.py
@@ -13,6 +13,7 @@
# under the License.
import abc
+import copy
import logging.config
import json
import os
@@ -161,14 +162,15 @@
logging.config.dictConfig(self._config)
def writeJson(self, filename: str):
- open(filename, 'w').write(json.dumps(self._config, indent=2))
+ with open(filename, 'w') as f:
+ f.write(json.dumps(self._config, indent=2))
class JobLoggingConfig(DictLoggingConfig):
def __init__(self, config=None, job_output_file=None):
if not config:
- config = _DEFAULT_JOB_LOGGING_CONFIG.copy()
+ config = copy.deepcopy(_DEFAULT_JOB_LOGGING_CONFIG)
super(JobLoggingConfig, self).__init__(config=config)
if job_output_file:
self.job_output_file = job_output_file
@@ -190,7 +192,7 @@
def __init__(self, config=None, server=None):
if not config:
- config = _DEFAULT_SERVER_LOGGING_CONFIG.copy()
+ config = copy.deepcopy(_DEFAULT_SERVER_LOGGING_CONFIG)
super(ServerLoggingConfig, self).__init__(config=config)
if server:
self.server = server
@@ -206,7 +208,7 @@
# config above because we're templating out the filename. Also, we
# only want to add the handler if we're actually going to use it.
for name, handler in _DEFAULT_SERVER_FILE_HANDLERS.items():
- server_handler = handler.copy()
+ server_handler = copy.deepcopy(handler)
server_handler['filename'] = server_handler['filename'].format(
server=server)
self._config['handlers'][name] = server_handler
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 06ef0ba..63c621d 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -82,7 +82,7 @@
self.log.info("Starting log streamer")
streamer = zuul.lib.log_streamer.LogStreamer(
- self.user, '0.0.0.0', self.finger_port, self.job_dir)
+ self.user, '::', self.finger_port, self.job_dir)
# Keep running until the parent dies:
pipe_read = os.fdopen(pipe_read)
diff --git a/zuul/cmd/migrate.py b/zuul/cmd/migrate.py
new file mode 100644
index 0000000..e9d6a10
--- /dev/null
+++ b/zuul/cmd/migrate.py
@@ -0,0 +1,613 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# TODO(mordred):
+# * Read and apply filters from the jobs: section
+# * Figure out shared job queues
+# * Emit job definitions
+# * figure out from builders whether or not it's a normal job or a
+# a devstack-legacy job
+# * Handle emitting arbitrary tox jobs (see tox-py27dj18)
+
+import argparse
+import collections
+import copy
+import itertools
+import logging
+import os
+import re
+from typing import Any, Dict, List, Optional # flake8: noqa
+
+import jenkins_jobs.builder
+from jenkins_jobs.formatter import deep_format
+import jenkins_jobs.formatter
+import jenkins_jobs.parser
+import yaml
+
+DESCRIPTION = """Migrate zuul v2 and Jenkins Job Builder to Zuul v3.
+
+This program takes a zuul v2 layout.yaml and a collection of Jenkins Job
+Builder job definitions and transforms them into a Zuul v3 config. An
+optional mapping config can be given that defines how to map old jobs
+to new jobs.
+"""
+def project_representer(dumper, data):
+ return dumper.represent_mapping('tag:yaml.org,2002:map',
+ data.items())
+
+
+def construct_yaml_map(self, node):
+ data = collections.OrderedDict()
+ yield data
+ value = self.construct_mapping(node)
+
+ if isinstance(node, yaml.MappingNode):
+ self.flatten_mapping(node)
+ else:
+ raise yaml.constructor.ConstructorError(
+ None, None,
+ 'expected a mapping node, but found %s' % node.id,
+ node.start_mark)
+
+ mapping = collections.OrderedDict()
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=False)
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise yaml.constructor.ConstructorError(
+ 'while constructing a mapping', node.start_mark,
+ 'found unacceptable key (%s)' % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=False)
+ mapping[key] = value
+ data.update(mapping)
+
+
+class IndentedEmitter(yaml.emitter.Emitter):
+ def expect_block_sequence(self):
+ self.increase_indent(flow=False, indentless=False)
+ self.state = self.expect_first_block_sequence_item
+
+
+class IndentedDumper(IndentedEmitter, yaml.serializer.Serializer,
+ yaml.representer.Representer, yaml.resolver.Resolver):
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ IndentedEmitter.__init__(
+ self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break)
+ yaml.serializer.Serializer.__init__(
+ self, encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version, tags=tags)
+ yaml.representer.Representer.__init__(
+ self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ yaml.resolver.Resolver.__init__(self)
+
+
+def ordered_load(stream, *args, **kwargs):
+ return yaml.load(stream=stream, *args, **kwargs)
+
+def ordered_dump(data, stream=None, *args, **kwargs):
+ return yaml.dump(data, stream=stream, default_flow_style=False,
+ Dumper=IndentedDumper, width=80, *args, **kwargs)
+
+def get_single_key(var):
+ if isinstance(var, str):
+ return var
+ elif isinstance(var, list):
+ return var[0]
+ return list(var.keys())[0]
+
+
+def has_single_key(var):
+ if isinstance(var, list):
+ return len(var) == 1
+ if isinstance(var, str):
+ return True
+ dict_keys = list(var.keys())
+ if len(dict_keys) != 1:
+ return False
+ if var[get_single_key(from_dict)]:
+ return False
+ return True
+
+
+def combination_matches(combination, match_combinations):
+ """
+ Checks if the given combination is matches for any of the given combination
+ globs, being those a set of combinations where if a key is missing, it's
+ considered matching
+
+ (key1=2, key2=3)
+
+ would match the combination match:
+ (key2=3)
+
+ but not:
+ (key1=2, key2=2)
+ """
+ for cmatch in match_combinations:
+ for key, val in combination.items():
+ if cmatch.get(key, val) != val:
+ break
+ else:
+ return True
+ return False
+
+
+def expandYamlForTemplateJob(self, project, template, jobs_glob=None):
+ dimensions = []
+ template_name = template['name']
+ orig_template = copy.deepcopy(template)
+
+ # reject keys that are not useful during yaml expansion
+ for k in ['jobs']:
+ project.pop(k)
+ excludes = project.pop('exclude', [])
+ for (k, v) in project.items():
+ tmpk = '{{{0}}}'.format(k)
+ if tmpk not in template_name:
+ continue
+ if type(v) == list:
+ dimensions.append(zip([k] * len(v), v))
+ # XXX somewhat hackish to ensure we actually have a single
+ # pass through the loop
+ if len(dimensions) == 0:
+ dimensions = [(("", ""),)]
+
+ for values in itertools.product(*dimensions):
+ params = copy.deepcopy(project)
+ params = self.applyDefaults(params, template)
+
+ expanded_values = {}
+ for (k, v) in values:
+ if isinstance(v, dict):
+ inner_key = next(iter(v))
+ expanded_values[k] = inner_key
+ expanded_values.update(v[inner_key])
+ else:
+ expanded_values[k] = v
+
+ params.update(expanded_values)
+ params = deep_format(params, params)
+ if combination_matches(params, excludes):
+ log = logging.getLogger("zuul.Migrate.YamlParser")
+ log.debug('Excluding combination %s', str(params))
+ continue
+
+ allow_empty_variables = self.config \
+ and self.config.has_section('job_builder') \
+ and self.config.has_option(
+ 'job_builder', 'allow_empty_variables') \
+ and self.config.getboolean(
+ 'job_builder', 'allow_empty_variables')
+
+ for key in template.keys():
+ if key not in params:
+ params[key] = template[key]
+
+ params['template-name'] = template_name
+ expanded = deep_format(template, params, allow_empty_variables)
+
+ job_name = expanded.get('name')
+ if jobs_glob and not matches(job_name, jobs_glob):
+ continue
+
+ self.formatDescription(expanded)
+ expanded['orig_template'] = orig_template
+ expanded['template_name'] = template_name
+ self.jobs.append(expanded)
+
+
+jenkins_jobs.parser.YamlParser.expandYamlForTemplateJob = expandYamlForTemplateJob
+
+
+class JJB(jenkins_jobs.builder.Builder):
+ def __init__(self):
+ self.global_config = None
+ self._plugins_list = []
+
+ def expandComponent(self, component_type, component, template_data):
+ component_list_type = component_type + 's'
+ new_components = []
+ if isinstance(component, dict):
+ name, component_data = next(iter(component.items()))
+ if template_data:
+ component_data = jenkins_jobs.formatter.deep_format(
+ component_data, template_data, True)
+ else:
+ name = component
+ component_data = {}
+
+ new_component = self.parser.data.get(component_type, {}).get(name)
+ if new_component:
+ for new_sub_component in new_component[component_list_type]:
+ new_components.extend(
+ self.expandComponent(component_type,
+ new_sub_component, component_data))
+ else:
+ new_components.append({name: component_data})
+ return new_components
+
+ def expandMacros(self, job):
+ for component_type in ['builder', 'publisher', 'wrapper']:
+ component_list_type = component_type + 's'
+ new_components = []
+ for new_component in job.get(component_list_type, []):
+ new_components.extend(self.expandComponent(component_type,
+ new_component, {}))
+ job[component_list_type] = new_components
+
+
+class Job:
+
+ def __init__(self,
+ orig: str,
+ name: str=None,
+ content: Dict[str, Any]=None,
+ vars: Dict[str, str]=None,
+ required_projects: List[str]=None,
+ nodes: List[str]=None,
+ parent=None) -> None:
+ self.orig = orig
+ self.voting = True
+ self.name = name
+ self.content = content.copy() if content else None
+ self.vars = vars or {}
+ self.required_projects = required_projects or []
+ self.nodes = nodes or []
+ self.parent = parent
+
+ if self.content and not self.name:
+ self.name = get_single_key(content)
+ if not self.name:
+ self.name = self.orig.replace('-{name}', '').replace('{name}-', '')
+ if self.orig.endswith('-nv'):
+ self.voting = False
+ if self.name.endswith('-nv'):
+ # NOTE(mordred) This MIGHT not be safe - it's possible, although
+ # silly, for someone to have -nv and normal versions of the same
+ # job in the same pipeline. Let's deal with that if we find it
+ # though.
+ self.name = self.name.replace('-nv', '')
+
+ def _stripNodeName(self, node):
+ node_key = '-{node}'.format(node=node)
+ self.name = self.name.replace(node_key, '')
+
+ def setVars(self, vars):
+ self.vars = vars
+
+ def setRequiredProjects(self, required_projects):
+ self.required_projects = required_projects
+
+ def setParent(self, parent):
+ self.parent = parent
+
+ def extractNode(self, default_node, labels):
+ matching_label = None
+ for label in labels:
+ if label in self.orig:
+ if not matching_label:
+ matching_label = label
+ elif len(label) > len(matching_label):
+ matching_label = label
+
+ if matching_label:
+ if matching_label == default_node:
+ self._stripNodeName(matching_label)
+ else:
+ self.nodes.append(matching_label)
+
+ def getDepends(self):
+ return [self.parent.name]
+
+ def getNodes(self):
+ return self.nodes
+
+ def toDict(self):
+ if self.content:
+ output = self.content
+ else:
+ output = collections.OrderedDict()
+ output[self.name] = collections.OrderedDict()
+
+ if self.parent:
+ output[self.name].setdefault('dependencies', self.getDepends())
+
+ if not self.voting:
+ output[self.name].setdefault('voting', False)
+
+ if self.nodes:
+ output[self.name].setdefault('nodes', self.getNodes())
+
+ if self.required_projects:
+ output[self.name].setdefault(
+ 'required-projects', self.required_projects)
+
+ if self.vars:
+ job_vars = output[self.name].get('vars', collections.OrderedDict())
+ job_vars.update(self.vars)
+
+ if not output[self.name]:
+ return self.name
+ return output
+
+
+class JobMapping:
+ log = logging.getLogger("zuul.Migrate.JobMapping")
+
+ def __init__(self, nodepool_config, mapping_file=None):
+ self.job_direct = {}
+ self.labels = []
+ self.job_mapping = []
+ self.template_mapping = {}
+ nodepool_data = ordered_load(open(nodepool_config, 'r'))
+ for label in nodepool_data['labels']:
+ self.labels.append(label['name'])
+ if not mapping_file:
+ self.default_node = 'ubuntu-xenial'
+ else:
+ mapping_data = ordered_load(open(mapping_file, 'r'))
+ self.default_node = mapping_data['default-node']
+ for map_info in mapping_data.get('job-mapping', []):
+ if map_info['old'].startswith('^'):
+ map_info['pattern'] = re.compile(map_info['old'])
+ self.job_mapping.append(map_info)
+ else:
+ self.job_direct[map_info['old']] = map_info['new']
+
+ for map_info in mapping_data.get('template-mapping', []):
+ self.template_mapping[map_info['old']] = map_info['new']
+
+ def makeNewName(self, new_name, match_dict):
+ return new_name.format(**match_dict)
+
+ def hasProjectTemplate(self, old_name):
+ return old_name in self.template_mapping
+
+ def getNewTemplateName(self, old_name):
+ return self.template_mapping.get(old_name, old_name)
+
+ def mapNewJob(self, name, info) -> Optional[Job]:
+ matches = info['pattern'].search(name)
+ if not matches:
+ return None
+ match_dict = matches.groupdict()
+ if isinstance(info['new'], dict):
+ job = Job(orig=name, content=info['new'])
+ else:
+ job = Job(orig=name, name=info['new'].format(**match_dict))
+
+ if 'vars' in info:
+ job.setVars(self._expandVars(info, match_dict))
+
+ if 'required-projects' in info:
+ job.setRequiredProjects(
+ self._expandRequiredProjects(info, match_dict))
+ return job
+
+ def _expandVars(self, info, match_dict):
+ job_vars = info['vars'].copy()
+ for key in job_vars.keys():
+ job_vars[key] = job_vars[key].format(**match_dict)
+ return job_vars
+
+ def _expandRequiredProjects(self, info, match_dict):
+ required_projects = []
+ job_projects = info['required-projects'].copy()
+ for project in job_projects:
+ required_projects.append(project.format(**match_dict))
+ return required_projects
+
+ def getNewJob(self, job_name, remove_gate):
+ if job_name in self.job_direct:
+ if isinstance(self.job_direct[job_name], dict):
+ return Job(job_name, content=self.job_direct[job_name])
+ else:
+ return Job(job_name, name=self.job_direct[job_name])
+
+ new_job = None
+ for map_info in self.job_mapping:
+ new_job = self.mapNewJob(job_name, map_info)
+ if new_job:
+ break
+ if not new_job:
+ if remove_gate:
+ job_name = job_name.replace('gate-', '', 1)
+ job_name = 'legacy-{job_name}'.format(job_name=job_name)
+ new_job = Job(orig=job_name, name=job_name)
+
+ new_job.extractNode(self.default_node, self.labels)
+ return new_job
+
+
+class ZuulMigrate:
+
+ log = logging.getLogger("zuul.Migrate")
+
+ def __init__(self, layout, job_config, nodepool_config,
+ outdir, mapping, move):
+ self.layout = ordered_load(open(layout, 'r'))
+ self.job_config = job_config
+ self.outdir = outdir
+ self.mapping = JobMapping(nodepool_config, mapping)
+ self.move = move
+
+ self.jobs = {}
+
+ def run(self):
+ self.loadJobs()
+ self.convertJobs()
+ self.writeJobs()
+
+ def loadJobs(self):
+ self.log.debug("Loading jobs")
+ builder = JJB()
+ builder.load_files([self.job_config])
+ builder.parser.expandYaml()
+ unseen = set(self.jobs.keys())
+ for job in builder.parser.jobs:
+ builder.expandMacros(job)
+ self.jobs[job['name']] = job
+ unseen.discard(job['name'])
+ for name in unseen:
+ del self.jobs[name]
+
+ def convertJobs(self):
+ pass
+
+ def setupDir(self):
+ zuul_yaml = os.path.join(self.outdir, 'zuul.yaml')
+ zuul_d = os.path.join(self.outdir, 'zuul.d')
+ orig = os.path.join(zuul_d, '01zuul.yaml')
+ outfile = os.path.join(zuul_d, '99converted.yaml')
+ if not os.path.exists(self.outdir):
+ os.makedirs(self.outdir)
+ if not os.path.exists(zuul_d):
+ os.makedirs(zuul_d)
+ if os.path.exists(zuul_yaml) and self.move:
+ os.rename(zuul_yaml, orig)
+ return outfile
+
+ def makeNewJobs(self, old_job, parent: Job=None):
+ self.log.debug("makeNewJobs(%s)", old_job)
+ if isinstance(old_job, str):
+ remove_gate = True
+ if old_job.startswith('gate-'):
+ # Check to see if gate- and bare versions exist
+ if old_job.replace('gate-', '', 1) in self.jobs:
+ remove_gate = False
+ job = self.mapping.getNewJob(old_job, remove_gate)
+ if parent:
+ job.setParent(parent)
+ return [job]
+
+ new_list = [] # type: ignore
+ if isinstance(old_job, list):
+ for job in old_job:
+ new_list.extend(self.makeNewJobs(job, parent=parent))
+
+ elif isinstance(old_job, dict):
+ parent_name = get_single_key(old_job)
+ parent = Job(orig=parent_name, parent=parent)
+
+ jobs = self.makeNewJobs(old_job[parent_name], parent=parent)
+ for job in jobs:
+ new_list.append(job)
+ new_list.append(parent)
+ return new_list
+
+ def writeProjectTemplate(self, template):
+ new_template = collections.OrderedDict()
+ if 'name' in template:
+ new_template['name'] = template['name']
+ for key, value in template.items():
+ if key == 'name':
+ continue
+ jobs = [job.toDict() for job in self.makeNewJobs(value)]
+ new_template[key] = dict(jobs=jobs)
+
+ return new_template
+
+ def writeProject(self, project):
+ new_project = collections.OrderedDict()
+ if 'name' in project:
+ new_project['name'] = project['name']
+ if 'template' in project:
+ new_project['template'] = []
+ for template in project['template']:
+ new_project['template'].append(dict(
+ name=self.mapping.getNewTemplateName(template['name'])))
+ for key, value in project.items():
+ if key in ('name', 'template'):
+ continue
+ else:
+ jobs = [job.toDict() for job in self.makeNewJobs(value)]
+ new_project[key] = dict(jobs=jobs)
+
+ return new_project
+
+ def writeJobs(self):
+ outfile = self.setupDir()
+ config = []
+
+ for template in self.layout.get('project-templates', []):
+ self.log.debug("Processing template: %s", template)
+ if not self.mapping.hasProjectTemplate(template['name']):
+ config.append(
+ {'project-template': self.writeProjectTemplate(template)})
+
+ for project in self.layout.get('projects', []):
+ config.append(
+ {'project': self.writeProject(project)})
+
+ with open(outfile, 'w') as yamlout:
+ # Insert an extra space between top-level list items
+ yamlout.write(ordered_dump(config).replace('\n-', '\n\n-'))
+
+
+def main():
+ yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
+ construct_yaml_map)
+
+ yaml.add_representer(collections.OrderedDict, project_representer,
+ Dumper=IndentedDumper)
+
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+ parser.add_argument(
+ 'layout',
+ help="The Zuul v2 layout.yaml file to read.")
+ parser.add_argument(
+ 'job_config',
+ help="Directory containing Jenkins Job Builder job definitions.")
+ parser.add_argument(
+ 'nodepool_config',
+ help="Nodepool config file containing complete set of node names")
+ parser.add_argument(
+ 'outdir',
+ help="A directory into which the Zuul v3 config will be written.")
+ parser.add_argument(
+ '--mapping',
+ default=None,
+ help="A filename with a yaml mapping of old name to new name.")
+ parser.add_argument(
+ '-v', dest='verbose', action='store_true', help='verbose output')
+ parser.add_argument(
+ '-m', dest='move', action='store_true',
+ help='Move zuul.yaml to zuul.d if it exists')
+
+ args = parser.parse_args()
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ ZuulMigrate(args.layout, args.job_config, args.nodepool_config,
+ args.outdir, args.mapping, args.move).run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 94c0d2a..13fc310 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -76,6 +76,15 @@
super(MaxNodeError, self).__init__(message)
+class MaxTimeoutError(Exception):
+ def __init__(self, job, tenant):
+ message = textwrap.dedent("""\
+ The job "{job}" exceeds tenant max-job-timeout {maxtimeout}.""")
+ message = textwrap.fill(message.format(
+ job=job.name, maxtimeout=tenant.max_job_timeout))
+ super(MaxTimeoutError, self).__init__(message)
+
+
class DuplicateGroupError(Exception):
def __init__(self, nodeset, group):
message = textwrap.dedent("""\
@@ -120,6 +129,30 @@
@contextmanager
+def early_configuration_exceptions(context):
+ try:
+ yield
+ except ConfigurationSyntaxError:
+ raise
+ except Exception as e:
+ intro = textwrap.fill(textwrap.dedent("""\
+ Zuul encountered a syntax error while parsing its configuration in the
+ repo {repo} on branch {branch}. The error was:""".format(
+ repo=context.project.name,
+ branch=context.branch,
+ )))
+
+ m = textwrap.dedent("""\
+ {intro}
+
+ {error}""")
+
+ m = m.format(intro=intro,
+ error=indent(str(e)))
+ raise ConfigurationSyntaxError(m)
+
+
+@contextmanager
def configuration_exceptions(stanza, conf):
try:
yield
@@ -481,6 +514,10 @@
if secrets and not conf['_source_context'].trusted:
job.post_review = True
+ if conf.get('timeout') and tenant.max_job_timeout != -1 and \
+ int(conf['timeout']) > tenant.max_job_timeout:
+ raise MaxTimeoutError(job, tenant)
+
if 'post-review' in conf:
if conf['post-review']:
job.post_review = True
@@ -1035,6 +1072,7 @@
def getSchema(connections=None):
tenant = {vs.Required('name'): str,
'max-nodes-per-job': int,
+ 'max-job-timeout': int,
'source': TenantParser.validateTenantSources(connections),
'exclude-unprotected-branches': bool,
'default-parent': str,
@@ -1048,6 +1086,8 @@
tenant = model.Tenant(conf['name'])
if conf.get('max-nodes-per-job') is not None:
tenant.max_nodes_per_job = conf['max-nodes-per-job']
+ if conf.get('max-job-timeout') is not None:
+ tenant.max_job_timeout = int(conf['max-job-timeout'])
if conf.get('exclude-unprotected-branches') is not None:
tenant.exclude_unprotected_branches = \
conf['exclude-unprotected-branches']
@@ -1318,6 +1358,8 @@
continue
TenantParser.log.debug("Waiting for cat job %s" % (job,))
job.wait()
+ if not job.updated:
+ raise Exception("Cat job %s failed" % (job,))
TenantParser.log.debug("Cat job %s got files %s" %
(job, job.files))
loaded = False
@@ -1367,13 +1409,15 @@
def _parseConfigProjectLayout(data, source_context):
# This is the top-level configuration for a tenant.
config = model.UnparsedTenantConfig()
- config.extend(safe_load_yaml(data, source_context))
+ with early_configuration_exceptions(source_context):
+ config.extend(safe_load_yaml(data, source_context))
return config
@staticmethod
def _parseUntrustedProjectLayout(data, source_context):
config = model.UnparsedTenantConfig()
- config.extend(safe_load_yaml(data, source_context))
+ with early_configuration_exceptions(source_context):
+ config.extend(safe_load_yaml(data, source_context))
if config.pipelines:
with configuration_exceptions('pipeline', config.pipelines[0]):
raise PipelineNotPermittedError()
@@ -1471,6 +1515,8 @@
@staticmethod
def _parseLayout(base, tenant, data, scheduler, connections):
+ # Don't call this method from dynamic reconfiguration because
+ # it interacts with drivers and connections.
layout = model.Layout(tenant)
TenantParser._parseLayoutItems(layout, tenant, data,
@@ -1582,7 +1628,8 @@
config.extend(incdata)
def createDynamicLayout(self, tenant, files,
- include_config_projects=False):
+ include_config_projects=False,
+ scheduler=None, connections=None):
if include_config_projects:
config = model.UnparsedTenantConfig()
for project in tenant.config_projects:
@@ -1594,22 +1641,29 @@
self._loadDynamicProjectData(config, project, files, False, tenant)
layout = model.Layout(tenant)
- # NOTE: the actual pipeline objects (complete with queues and
- # enqueued items) are copied by reference here. This allows
- # our shadow dynamic configuration to continue to interact
- # with all the other changes, each of which may have their own
- # version of reality. We do not support creating, updating,
- # or deleting pipelines in dynamic layout changes.
- layout.pipelines = tenant.layout.pipelines
+ if not include_config_projects:
+ # NOTE: the actual pipeline objects (complete with queues
+ # and enqueued items) are copied by reference here. This
+ # allows our shadow dynamic configuration to continue to
+ # interact with all the other changes, each of which may
+ # have their own version of reality. We do not support
+ # creating, updating, or deleting pipelines in dynamic
+ # layout changes.
+ layout.pipelines = tenant.layout.pipelines
- # NOTE: the semaphore definitions are copied from the static layout
- # here. For semaphores there should be no per patch max value but
- # exactly one value at any time. So we do not support dynamic semaphore
- # configuration changes.
- layout.semaphores = tenant.layout.semaphores
+ # NOTE: the semaphore definitions are copied from the
+ # static layout here. For semaphores there should be no
+ # per patch max value but exactly one value at any
+ # time. So we do not support dynamic semaphore
+ # configuration changes.
+ layout.semaphores = tenant.layout.semaphores
+ skip_pipelines = skip_semaphores = True
+ else:
+ skip_pipelines = skip_semaphores = False
- TenantParser._parseLayoutItems(layout, tenant, config, None, None,
- skip_pipelines=True,
- skip_semaphores=True)
+ TenantParser._parseLayoutItems(layout, tenant, config,
+ scheduler, connections,
+ skip_pipelines=skip_pipelines,
+ skip_semaphores=skip_semaphores)
return layout
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 35137c7..ecf5f94 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -133,25 +133,42 @@
event.branch_deleted = True
event.branch = event.ref
- if event.change_number:
- # TODO(jhesketh): Check if the project exists?
- # and self.connection.sched.getProject(event.project_name):
-
- # Call _getChange for the side effect of updating the
- # cache. Note that this modifies Change objects outside
- # the main thread.
- # NOTE(jhesketh): Ideally we'd just remove the change from the
- # cache to denote that it needs updating. However the change
- # object is already used by Items and hence BuildSets etc. and
- # we need to update those objects by reference so that they have
- # the correct/new information and also avoid hitting gerrit
- # multiple times.
- self.connection._getChange(event.change_number,
- event.patch_number,
- refresh=True)
+ self._getChange(event)
self.connection.logEvent(event)
self.connection.sched.addEvent(event)
+ def _getChange(self, event):
+ # Grab the change if we are managing the project or if it exists in the
+ # cache as it may be a dependency
+ if event.change_number:
+ refresh = True
+ if event.change_number not in self.connection._change_cache:
+ refresh = False
+ for tenant in self.connection.sched.abide.tenants.values():
+ # TODO(fungi): it would be better to have some simple means
+ # of inferring the hostname from the connection, or at
+ # least split this into separate method arguments, rather
+ # than assembling and passing in a baked string.
+ if (None, None) != tenant.getProject('/'.join((
+ self.connection.canonical_hostname,
+ event.project_name))):
+ refresh = True
+ break
+
+ if refresh:
+ # Call _getChange for the side effect of updating the
+ # cache. Note that this modifies Change objects outside
+ # the main thread.
+ # NOTE(jhesketh): Ideally we'd just remove the change from the
+ # cache to denote that it needs updating. However the change
+ # object is already used by Items and hence BuildSets etc. and
+ # we need to update those objects by reference so that they
+ # have the correct/new information and also avoid hitting
+ # gerrit multiple times.
+ self.connection._getChange(event.change_number,
+ event.patch_number,
+ refresh=True)
+
def run(self):
while True:
if self._stopped:
@@ -298,12 +315,17 @@
# This lets the user supply a list of change objects that are
# still in use. Anything in our cache that isn't in the supplied
# list should be safe to remove from the cache.
- remove = []
- for key, change in self._change_cache.items():
- if change not in relevant:
- remove.append(key)
- for key in remove:
- del self._change_cache[key]
+ remove = {}
+ for change_number, patchsets in self._change_cache.items():
+ for patchset, change in patchsets.items():
+ if change not in relevant:
+ remove.setdefault(change_number, [])
+ remove[change_number].append(patchset)
+ for change_number, patchsets in remove.items():
+ for patchset in patchsets:
+ del self._change_cache[change_number][patchset]
+ if not self._change_cache[change_number]:
+ del self._change_cache[change_number]
def getChange(self, event, refresh=False):
if event.change_number:
@@ -349,21 +371,22 @@
return change
def _getChange(self, number, patchset, refresh=False, history=None):
- key = '%s,%s' % (number, patchset)
- change = self._change_cache.get(key)
+ change = self._change_cache.get(number, {}).get(patchset)
if change and not refresh:
return change
if not change:
change = GerritChange(None)
change.number = number
change.patchset = patchset
- key = '%s,%s' % (change.number, change.patchset)
- self._change_cache[key] = change
+ self._change_cache.setdefault(change.number, {})
+ self._change_cache[change.number][change.patchset] = change
try:
self._updateChange(change, history)
except Exception:
- if key in self._change_cache:
- del self._change_cache[key]
+ if self._change_cache.get(change.number, {}).get(change.patchset):
+ del self._change_cache[change.number][change.patchset]
+ if not self._change_cache[change.number]:
+ del self._change_cache[change.number]
raise
return change
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 0ce6ef5..fca36c8 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -17,6 +17,8 @@
import logging
import hmac
import hashlib
+import queue
+import threading
import time
import re
@@ -80,11 +82,10 @@
delivery=delivery))
self._validate_signature(request)
+ # TODO(jlk): Validate project in the request is a project we know
try:
self.__dispatch_event(request)
- except webob.exc.HTTPNotFound:
- raise
except:
self.log.exception("Exception handling Github event:")
@@ -98,20 +99,58 @@
'header.')
try:
- method = getattr(self, '_event_' + event)
- except AttributeError:
- message = "Unhandled X-Github-Event: {0}".format(event)
- self.log.debug(message)
- # Returns empty 200 on unhandled events
- raise webob.exc.HTTPOk()
-
- try:
json_body = request.json_body
+ self.connection.addEvent(json_body, event)
except:
message = 'Exception deserializing JSON body'
self.log.exception(message)
raise webob.exc.HTTPBadRequest(message)
+ def _validate_signature(self, request):
+ secret = self.connection.connection_config.get('webhook_token', None)
+ if secret is None:
+ raise RuntimeError("webhook_token is required")
+
+ body = request.body
+ try:
+ request_signature = request.headers['X-Hub-Signature']
+ except KeyError:
+ raise webob.exc.HTTPUnauthorized(
+ 'Please specify a X-Hub-Signature header with secret.')
+
+ payload_signature = _sign_request(body, secret)
+
+ self.log.debug("Payload Signature: {0}".format(str(payload_signature)))
+ self.log.debug("Request Signature: {0}".format(str(request_signature)))
+ if not hmac.compare_digest(
+ str(payload_signature), str(request_signature)):
+ raise webob.exc.HTTPUnauthorized(
+ 'Request signature does not match calculated payload '
+ 'signature. Check that secret is correct.')
+
+ return True
+
+
+class GithubEventConnector(threading.Thread):
+ """Move events from GitHub into the scheduler"""
+
+ log = logging.getLogger("zuul.GithubEventConnector")
+
+ def __init__(self, connection):
+ super(GithubEventConnector, self).__init__()
+ self.daemon = True
+ self.connection = connection
+ self._stopped = False
+
+ def stop(self):
+ self._stopped = True
+ self.connection.addEvent(None)
+
+ def _handleEvent(self):
+ json_body, event_type = self.connection.getEvent()
+ if self._stopped:
+ return
+
# If there's any installation mapping information in the body then
# update the project mapping before any requests are made.
installation_id = json_body.get('installation', {}).get('id')
@@ -127,9 +166,17 @@
self.connection.installation_map[project_name] = installation_id
try:
+ method = getattr(self, '_event_' + event_type)
+ except AttributeError:
+ # TODO(jlk): Gracefully handle event types we don't care about
+ # instead of logging an exception.
+ message = "Unhandled X-Github-Event: {0}".format(event_type)
+ self.log.debug(message)
+ # Returns empty on unhandled events
+ return
+
+ try:
event = method(json_body)
- except webob.exc.HTTPNotFound:
- raise
except:
self.log.exception('Exception when handling event:')
event = None
@@ -240,14 +287,6 @@
event.action = body.get('action')
return event
- def _event_ping(self, body):
- project_name = body['repository']['full_name']
- if not self.connection.getProject(project_name):
- self.log.warning("Ping received for unknown project %s" %
- project_name)
- raise webob.exc.HTTPNotFound("Sorry, this project is not "
- "registered")
-
def _event_status(self, body):
action = body.get('action')
if action == 'pending':
@@ -277,30 +316,6 @@
(number, project_name))
return pr_body
- def _validate_signature(self, request):
- secret = self.connection.connection_config.get('webhook_token', None)
- if secret is None:
- raise RuntimeError("webhook_token is required")
-
- body = request.body
- try:
- request_signature = request.headers['X-Hub-Signature']
- except KeyError:
- raise webob.exc.HTTPUnauthorized(
- 'Please specify a X-Hub-Signature header with secret.')
-
- payload_signature = _sign_request(body, secret)
-
- self.log.debug("Payload Signature: {0}".format(str(payload_signature)))
- self.log.debug("Request Signature: {0}".format(str(request_signature)))
- if not hmac.compare_digest(
- str(payload_signature), str(request_signature)):
- raise webob.exc.HTTPUnauthorized(
- 'Request signature does not match calculated payload '
- 'signature. Check that secret is correct.')
-
- return True
-
def _pull_request_to_event(self, pr_body):
event = GithubTriggerEvent()
event.trigger_name = 'github'
@@ -327,6 +342,17 @@
if login:
return self.connection.getUser(login)
+ def run(self):
+ while True:
+ if self._stopped:
+ return
+ try:
+ self._handleEvent()
+ except:
+ self.log.exception("Exception moving GitHub event:")
+ finally:
+ self.connection.eventDone()
+
class GithubUser(collections.Mapping):
log = logging.getLogger('zuul.GithubUser')
@@ -376,6 +402,7 @@
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.server)
self.source = driver.getSource(self)
+ self.event_queue = queue.Queue()
# ssl verification must default to true
verify_ssl = self.connection_config.get('verify_ssl', 'true')
@@ -408,9 +435,20 @@
self.registerHttpHandler(self.payload_path,
webhook_listener.handle_request)
self._authenticateGithubAPI()
+ self._start_event_connector()
def onStop(self):
self.unregisterHttpHandler(self.payload_path)
+ self._stop_event_connector()
+
+ def _start_event_connector(self):
+ self.github_event_connector = GithubEventConnector(self)
+ self.github_event_connector.start()
+
+ def _stop_event_connector(self):
+ if self.github_event_connector:
+ self.github_event_connector.stop()
+ self.github_event_connector.join()
def _createGithubClient(self):
if self.server != 'github.com':
@@ -504,6 +542,15 @@
return token
+ def addEvent(self, data, event=None):
+ return self.event_queue.put((data, event))
+
+ def getEvent(self):
+ return self.event_queue.get()
+
+ def eventDone(self):
+ self.event_queue.task_done()
+
def getGithubClient(self,
project=None,
user_id=None,
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index d76fafd..afdc747 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -28,7 +28,7 @@
class SQLConnection(BaseConnection):
driver_name = 'sql'
- log = logging.getLogger("connection.sql")
+ log = logging.getLogger("zuul.SQLConnection")
def __init__(self, driver, connection_name, connection_config):
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index 7c79176..ca35577 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -23,7 +23,7 @@
"""Sends off reports to a database."""
name = 'sql'
- log = logging.getLogger("zuul.reporter.mysql.SQLReporter")
+ log = logging.getLogger("zuul.SQLReporter")
def report(self, item):
"""Create an entry into a database."""
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index be41186..f97d286 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -175,6 +175,8 @@
zuul_params['tag'] = item.change.tag
if hasattr(item.change, 'number'):
zuul_params['change'] = str(item.change.number)
+ if hasattr(item.change, 'url'):
+ zuul_params['change_url'] = item.change.url
if hasattr(item.change, 'patchset'):
zuul_params['patchset'] = str(item.change.patchset)
if (hasattr(item.change, 'oldrev') and item.change.oldrev
@@ -196,6 +198,8 @@
)
if hasattr(i.change, 'number'):
d['change'] = str(i.change.number)
+ if hasattr(i.change, 'url'):
+ d['change_url'] = i.change.url
if hasattr(i.change, 'patchset'):
d['patchset'] = str(i.change.patchset)
if hasattr(i.change, 'branch'):
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index b7c2f7f..62b9716 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -36,7 +36,7 @@
import gear
import zuul.merger.merger
-import zuul.ansible
+import zuul.ansible.logconfig
from zuul.lib import commandsocket
BUFFER_LINES_FOR_SYNTAX = 200
@@ -946,7 +946,7 @@
repos += playbook['roles']
for repo in repos:
- self.log.debug("Updating playbook or role %s" % (repo,))
+ self.log.debug("Updating playbook or role %s" % (repo['project'],))
key = (repo['connection'], repo['project'])
if key not in projects:
tasks.append(self.executor_server.update(*key))
@@ -1162,6 +1162,7 @@
ansible_user=self.executor_server.default_username,
ansible_port=port,
nodepool=dict(
+ label=node.get('label'),
az=node.get('az'),
cloud=node.get('cloud'),
provider=node.get('provider'),
@@ -1617,7 +1618,8 @@
now=datetime.datetime.now()))
for line in syntax_buffer:
job_output.write("{now} | {line}\n".format(
- now=datetime.datetime.now(), line=line))
+ now=datetime.datetime.now(),
+ line=line.decode('utf-8').rstrip()))
return (self.RESULT_NORMAL, ret)
@@ -1633,7 +1635,7 @@
cmd.extend(['-e', '@' + playbook.secrets])
if success is not None:
- cmd.extend(['-e', 'success=%s' % str(bool(success))])
+ cmd.extend(['-e', 'zuul_success=%s' % str(bool(success))])
if phase:
cmd.extend(['-e', 'zuul_execution_phase=%s' % phase])
diff --git a/zuul/lib/commandsocket.py b/zuul/lib/commandsocket.py
index 901291a..2836999 100644
--- a/zuul/lib/commandsocket.py
+++ b/zuul/lib/commandsocket.py
@@ -44,9 +44,9 @@
# First, wake up our listener thread with a connection and
# tell it to stop running.
self.running = False
- s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- s.connect(self.path)
- s.sendall(b'_stop\n')
+ with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
+ s.connect(self.path)
+ s.sendall(b'_stop\n')
# The command '_stop' will be ignored by our listener, so
# directly inject it into the queue so that consumers of this
# class which are waiting in .get() are awakened. They can
diff --git a/zuul/lib/log_streamer.py b/zuul/lib/log_streamer.py
index 57afef9..3ecaf4d 100644
--- a/zuul/lib/log_streamer.py
+++ b/zuul/lib/log_streamer.py
@@ -168,6 +168,8 @@
'''
Custom version that allows us to drop privileges after port binding.
'''
+ address_family = socket.AF_INET6
+
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.jobdir_root = kwargs.pop('jobdir_root')
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 8282f86..98c7350 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -444,7 +444,9 @@
loader.createDynamicLayout(
item.pipeline.layout.tenant,
build_set.files,
- include_config_projects=True)
+ include_config_projects=True,
+ scheduler=self.sched,
+ connections=self.sched.connections)
# Then create the config a second time but without changes
# to config repos so that we actually use this config.
@@ -527,11 +529,12 @@
if not item.job_graph:
try:
+ self.log.debug("Freezing job graph for %s" % (item,))
item.freezeJobGraph()
except Exception as e:
# TODOv3(jeblair): nicify this exception as it will be reported
self.log.exception("Error freezing job graph for %s" %
- item)
+ (item,))
item.setConfigError("Unable to freeze job graph: %s" %
(str(e)))
return False
@@ -540,6 +543,7 @@
def _processOneItem(self, item, nnfi):
changed = False
ready = False
+ dequeued = False
failing_reasons = [] # Reasons this item is failing
item_ahead = item.item_ahead
@@ -594,8 +598,14 @@
item.reported_start = True
if item.current_build_set.unable_to_merge:
failing_reasons.append("it has a merge conflict")
+ if (not item.live) and (not dequeued):
+ self.dequeueItem(item)
+ changed = dequeued = True
if item.current_build_set.config_error:
failing_reasons.append("it has an invalid configuration")
+ if (not item.live) and (not dequeued):
+ self.dequeueItem(item)
+ changed = dequeued = True
if ready and self.provisionNodes(item):
changed = True
if ready and self.executeJobs(item):
@@ -603,10 +613,10 @@
if item.didAnyJobFail():
failing_reasons.append("at least one job failed")
- if (not item.live) and (not item.items_behind):
+ if (not item.live) and (not item.items_behind) and (not dequeued):
failing_reasons.append("is a non-live item with no items behind")
self.dequeueItem(item)
- changed = True
+ changed = dequeued = True
if ((not item_ahead) and item.areAllJobsComplete() and item.live):
try:
self.reportItem(item)
@@ -618,7 +628,7 @@
(item_behind.change, item))
self.cancelJobs(item_behind)
self.dequeueItem(item)
- changed = True
+ changed = dequeued = True
elif not failing_reasons and item.live:
nnfi = item
item.current_build_set.failing_reasons = failing_reasons
@@ -743,9 +753,12 @@
layout = (item.current_build_set.layout or
self.pipeline.layout)
- if not layout.hasProject(item.change.project):
+ project_in_pipeline = True
+ if not layout.getProjectPipelineConfig(item.change.project,
+ self.pipeline):
self.log.debug("Project %s not in pipeline %s for change %s" % (
item.change.project, self.pipeline, item.change))
+ project_in_pipeline = False
actions = []
elif item.getConfigError():
self.log.debug("Invalid config for change %s" % item.change)
@@ -771,7 +784,7 @@
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
self.pipeline._consecutive_failures += 1
- if layout.hasProject(item.change.project) and self.pipeline._disabled:
+ if project_in_pipeline and self.pipeline._disabled:
actions = self.pipeline.disabled_actions
# Check here if we should disable so that we only use the disabled
# reporters /after/ the last disable_at failure is still reported as
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index 5191a44..2614e58 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -134,18 +134,18 @@
def onBuildCompleted(self, job):
data = getJobData(job)
merged = data.get('merged', False)
- updated = data.get('updated', False)
+ job.updated = data.get('updated', False)
commit = data.get('commit')
files = data.get('files', {})
repo_state = data.get('repo_state', {})
job.files = files
self.log.info("Merge %s complete, merged: %s, updated: %s, "
"commit: %s" %
- (job, merged, updated, commit))
+ (job, merged, job.updated, commit))
job.setComplete()
if job.build_set:
self.sched.onMergeCompleted(job.build_set,
- merged, updated, commit, files,
+ merged, job.updated, commit, files,
repo_state)
# The test suite expects the job to be removed from the
# internal account after the wake flag is set.
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index ed98696..8b98bfb 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -191,11 +191,14 @@
def checkout(self, ref):
repo = self.createRepoObject()
self.log.debug("Checking out %s" % ref)
- repo.head.reference = ref
+ # Perform a hard reset before checking out so that we clean up
+ # anything that might be left over from a merge.
reset_repo_to_head(repo)
+ repo.git.checkout(ref)
return repo.head.commit
def checkoutLocalBranch(self, branch):
+ # TODO(jeblair): retire in favor of checkout
repo = self.createRepoObject()
# Perform a hard reset before checking out so that we clean up
# anything that might be left over from a merge.
@@ -341,10 +344,6 @@
return self._addProject(hostname, project_name, url, sshkey)
def updateRepo(self, connection_name, project_name):
- # TODOv3(jhesketh): Reimplement
- # da90a50b794f18f74de0e2c7ec3210abf79dda24 after merge..
- # Likely we'll handle connection context per projects differently.
- # self._setGitSsh()
repo = self.getRepo(connection_name, project_name)
try:
self.log.info("Updating local repository %s/%s",
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index fc599c1..881209d 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -111,7 +111,7 @@
def refstate(self, job):
args = json.loads(job.arguments)
- success, repo_state = self.merger.getItemRepoState(args['items'])
+ success, repo_state = self.merger.getRepoState(args['items'])
result = dict(updated=success,
repo_state=repo_state)
job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/model.py b/zuul/model.py
index 850bbe2..429a0c3 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -21,6 +21,7 @@
import time
from uuid import uuid4
import urllib.parse
+import textwrap
MERGER_MERGE = 1 # "git merge"
MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
@@ -1374,6 +1375,7 @@
self.quiet = False
self.active = False # Whether an item is within an active window
self.live = True # Whether an item is intended to be processed at all
+ # TODO(jeblair): move job_graph to buildset
self.job_graph = None
def __repr__(self):
@@ -1391,6 +1393,7 @@
old.next_build_set = self.current_build_set
self.current_build_set.previous_build_set = old
self.build_sets.append(self.current_build_set)
+ self.job_graph = None
def addBuild(self, build):
self.current_build_set.addBuild(build)
@@ -2093,6 +2096,83 @@
self.private_key_file = None
+class ConfigItemNotListError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration file is not a list. Each zuul.yaml configuration
+ file must be a list of items, for example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item starts with "- " so that it is parsed as a
+ YAML list.
+ """)
+ super(ConfigItemNotListError, self).__init__(message)
+
+
+class ConfigItemNotDictError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item is not a dictionary. Each zuul.yaml
+ configuration file must be a list of dictionaries, for
+ example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item in the list is a dictionary with one
+ key (in this example, 'job' and 'project').
+ """)
+ super(ConfigItemNotDictError, self).__init__(message)
+
+
+class ConfigItemMultipleKeysError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item has more than one key. Each zuul.yaml
+ configuration file must be a list of dictionaries with a
+ single key, for example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item in the list is a dictionary with only
+ one key (in this example, 'job' and 'project'). This error
+ may be caused by insufficient indentation of the keys under
+ the configuration item ('name' in this example).
+ """)
+ super(ConfigItemMultipleKeysError, self).__init__(message)
+
+
+class ConfigItemUnknownError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item not recognized. Each zuul.yaml
+ configuration file must be a list of dictionaries, for
+ example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ The dictionary keys must match one of the configuration item
+ types recognized by zuul (for example, 'job' or 'project').
+ """)
+ super(ConfigItemUnknownError, self).__init__(message)
+
+
class UnparsedAbideConfig(object):
"""A collection of yaml lists that has not yet been parsed into objects.
@@ -2109,25 +2189,18 @@
return
if not isinstance(conf, list):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotListError()
+
for item in conf:
if not isinstance(item, dict):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotDictError()
if len(item.keys()) > 1:
- raise Exception("Configuration item dictionaries must have "
- "a single key (when parsing %s)" %
- (conf,))
+ raise ConfigItemMultipleKeysError()
key, value = list(item.items())[0]
if key == 'tenant':
self.tenants.append(value)
else:
- raise Exception("Configuration item not recognized "
- "(when parsing %s)" %
- (conf,))
+ raise ConfigItemUnknownError()
class UnparsedTenantConfig(object):
@@ -2166,19 +2239,13 @@
return
if not isinstance(conf, list):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotListError()
for item in conf:
if not isinstance(item, dict):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotDictError()
if len(item.keys()) > 1:
- raise Exception("Configuration item dictionaries must have "
- "a single key (when parsing %s)" %
- (conf,))
+ raise ConfigItemMultipleKeysError()
key, value = list(item.items())[0]
if key == 'project':
name = value['name']
@@ -2196,9 +2263,7 @@
elif key == 'semaphore':
self.semaphores.append(value)
else:
- raise Exception("Configuration item `%s` not recognized "
- "(when parsing %s)" %
- (item, conf,))
+ raise ConfigItemUnknownError()
class Layout(object):
@@ -2331,19 +2396,21 @@
job_graph.addJob(frozen_job)
def createJobGraph(self, item):
- project_config = self.project_configs.get(
- item.change.project.canonical_name, None)
- ret = JobGraph()
# NOTE(pabelanger): It is possible for a foreign project not to have a
# configured pipeline, if so return an empty JobGraph.
- if project_config and item.pipeline.name in project_config.pipelines:
- project_job_list = \
- project_config.pipelines[item.pipeline.name].job_list
- self._createJobGraph(item, project_job_list, ret)
+ ret = JobGraph()
+ ppc = self.getProjectPipelineConfig(item.change.project,
+ item.pipeline)
+ if ppc:
+ self._createJobGraph(item, ppc.job_list, ret)
return ret
- def hasProject(self, project):
- return project.canonical_name in self.project_configs
+ def getProjectPipelineConfig(self, project, pipeline):
+ project_config = self.project_configs.get(
+ project.canonical_name, None)
+ if not project_config:
+ return None
+ return project_config.pipelines.get(pipeline.name, None)
class Semaphore(object):
@@ -2439,6 +2506,7 @@
def __init__(self, name):
self.name = name
self.max_nodes_per_job = 5
+ self.max_job_timeout = 10800
self.exclude_unprotected_branches = False
self.default_base_job = None
self.layout = None