Merge "Log exeptions for module failure cases" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index 1912eb5..27f2ca1 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -19,9 +19,25 @@
nodes: zuul-functional
pre-run: playbooks/zuul-stream/pre
run: playbooks/zuul-stream/functional
- post-run: playbooks/zuul-stream/post
+ post-run:
+ - playbooks/zuul-stream/post
+ - playbooks/zuul-stream/post-ara
required-projects:
- openstack/ara
+ files:
+ - "zuul/ansible/callback/.*"
+ - "playbooks/zuul-stream/.*"
+
+- job:
+ name: zuul-migrate
+ parent: unittests
+ run: playbooks/zuul-migrate
+ # We're adding zuul to the required-projects so that we can also trigger
+ # this from project-config changes
+ required-projects:
+ - openstack-infra/project-config
+ - name: openstack-infra/zuul
+ override-branch: feature/zuulv3
- project:
name: openstack-infra/zuul
@@ -33,14 +49,19 @@
- tox-pep8
- tox-py35
- zuul-stream-functional
+ - zuul-migrate:
+ files:
+ - zuul/cmd/migrate.py
+ - playbooks/zuul-migrate.yaml
gate:
jobs:
- tox-docs
- tox-pep8
- tox-py35
+ - zuul-stream-functional
post:
jobs:
- - publish-openstack-python-docs:
+ - publish-openstack-python-docs-infra:
vars:
- afs_target: 'infra/zuul'
+ afs_publisher_target: 'infra/zuul'
- publish-openstack-python-branch-tarball
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 54bc10a..4722750 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -163,6 +163,11 @@
The maximum number of nodes a job can request. A value of
'-1' value removes the limit.
+ .. attr:: max-job-timeout
+ :default: 10800
+
+ The maximum timeout for jobs. A value of '-1' value removes the limit.
+
.. attr:: exclude-unprotected-branches
:default: false
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 973470d..025ea71 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -497,9 +497,10 @@
- job:
name: run-tests
parent: base
- nodes:
- - name: test-node
- label: fedora
+ nodeset:
+ nodes:
+ - name: test-node
+ label: fedora
.. attr:: job
@@ -630,12 +631,12 @@
- job:
name: run-tests
- nodes: current-release
+ nodeset: current-release
- job:
name: run-tests
branch: stable/2.0
- nodes: old-release
+ nodeset: old-release
In some cases, Zuul uses an implied value for the branch
specifier if none is supplied:
@@ -722,18 +723,19 @@
ssh_key:
key: descrypted-secret-key-data
- .. attr:: nodes
+ .. attr:: nodeset
- A list of nodes which should be supplied to the job. This
- parameter may be supplied either as a string, in which case it
- references a :ref:`nodeset` definition which appears elsewhere
- in the configuration, or a list, in which case it is interpreted
- in the same way as a Nodeset definition (in essence, it is an
- anonymous Node definition unique to this job). See the
- :ref:`nodeset` reference for the syntax to use in that case.
+ The nodes which should be supplied to the job. This parameter
+ may be supplied either as a string, in which case it references
+ a :ref:`nodeset` definition which appears elsewhere in the
+ configuration, or a dictionary, in which case it is interpreted
+ in the same way as a Nodeset definition, though the ``name``
+ attribute should be omitted (in essence, it is an anonymous
+ Nodeset definition unique to this job). See the :ref:`nodeset`
+ reference for the syntax to use in that case.
- If a job has an empty or no node definition, it will still run
- and may be able to perform actions on the Zuul executor.
+ If a job has an empty or no nodeset definition, it will still
+ run and may be able to perform actions on the Zuul executor.
.. attr:: override-branch
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 837fb17..3d24f5d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -303,11 +303,24 @@
The identifier for the change.
+ .. var:: change_url
+
+ The URL to the source location of the given change.
+ E.g., `https://review.example.org/#/c/123456/` or
+ `https://github.com/example/example/pull/1234`.
+
.. var:: patchset
The patchset identifier for the change. If a change is
revised, this will have a different value.
+.. var:: zuul_success
+
+ Post run playbook(s) will be passed this variable to indicate if the run
+ phase of the job was successful or not. This variable is meant to be used
+ with the `boolean` filter.
+
+
Change Items
++++++++++++
@@ -332,6 +345,12 @@
The patchset identifier for the change. If a change is revised,
this will have a different value.
+ .. var:: change_url
+
+ The URL to the source location of the given change.
+ E.g., `https://review.example.org/#/c/123456/` or
+ `https://github.com/example/example/pull/1234`.
+
Branch Items
++++++++++++
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index c2cf279..70e999e 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -554,14 +554,11 @@
}
$.each(changes, function (change_i, change) {
- // Only add a change when it has jobs
- if (change.jobs.length > 0) {
- var $change_box =
- format.change_with_status_tree(
- change, change_queue);
- $html.append($change_box);
- format.display_patchset($change_box);
- }
+ var $change_box =
+ format.change_with_status_tree(
+ change, change_queue);
+ $html.append($change_box);
+ format.display_patchset($change_box);
});
});
});
@@ -572,6 +569,11 @@
// Toggle showing/hiding the patchset when the header is
// clicked.
+ if (e.target.nodeName.toLowerCase() === 'a') {
+ // Ignore clicks from gerrit patch set link
+ return;
+ }
+
// Grab the patchset panel
var $panel = $(e.target).parents('.zuul-change');
var $body = $panel.children('.zuul-patchset-body');
@@ -655,7 +657,7 @@
setTimeout(function() {app.schedule(app);}, 5000);
return;
}
- app.update().complete(function () {
+ app.update().always(function () {
setTimeout(function() {app.schedule(app);}, 5000);
});
@@ -725,7 +727,7 @@
.removeClass('zuul-msg-wrap-off')
.show();
})
- .complete(function () {
+ .always(function () {
xhr = undefined;
app.emit('update-end');
});
diff --git a/playbooks/zuul-migrate.yaml b/playbooks/zuul-migrate.yaml
new file mode 100644
index 0000000..7ed6626
--- /dev/null
+++ b/playbooks/zuul-migrate.yaml
@@ -0,0 +1,26 @@
+- hosts: all
+ tasks:
+
+ - name: Install migration dependencies
+ command: "python3 -m pip install --user src/git.openstack.org/openstack-infra/zuul[migrate]"
+
+ - name: Migrate the data
+ command: "python3 ../zuul/zuul/cmd/migrate.py zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml . --mapping=zuul/mapping.yaml -v -m"
+ args:
+ chdir: src/git.openstack.org/openstack-infra/project-config
+
+ - name: Collect generated job config
+ synchronize:
+ dest: "{{ zuul.executor.log_root }}"
+ mode: pull
+ src: "src/git.openstack.org/openstack-infra/project-config/zuul.d"
+ verify_host: true
+ no_log: true
+
+ - name: Collect generated playbooks
+ synchronize:
+ dest: "{{ zuul.executor.log_root }}/playbooks"
+ mode: pull
+ src: "src/git.openstack.org/openstack-infra/project-config/playbooks/legacy"
+ verify_host: true
+ no_log: true
diff --git a/playbooks/zuul-stream/fixtures/test-stream.yaml b/playbooks/zuul-stream/fixtures/test-stream.yaml
index 6a31ff8..fd28757 100644
--- a/playbooks/zuul-stream/fixtures/test-stream.yaml
+++ b/playbooks/zuul-stream/fixtures/test-stream.yaml
@@ -10,6 +10,10 @@
debug:
var: setupvar
+ - name: Output a debug sentence
+ debug:
+ msg: This is a debug message
+
- name: Run a shell task
command: ip addr show
diff --git a/playbooks/zuul-stream/functional.yaml b/playbooks/zuul-stream/functional.yaml
index 7b5b84f..6b67b05 100644
--- a/playbooks/zuul-stream/functional.yaml
+++ b/playbooks/zuul-stream/functional.yaml
@@ -22,8 +22,8 @@
- name: Validate output - shell task
shell: |
- egrep "^.*\| node1 \| link/loopback" job-output.txt
- egrep "^.*\| node2 \| link/loopback" job-output.txt
+ egrep "^.*\| node1 \| link/loopback" job-output.txt
+ egrep "^.*\| node2 \| link/loopback" job-output.txt
- name: Validate output - loop with items
shell: |
@@ -58,9 +58,3 @@
shell: |
egrep "^.+\| node1 \| OSError.+\/failure-itemloop\/" job-output.txt
egrep "^.+\| node2 \| OSError.+\/failure-itemloop\/" job-output.txt
-
- - name: Generate ARA html
- command: ara generate html ara-output
-
- - name: Compress ARA html
- command: gzip --recursive --best ara-output
diff --git a/playbooks/zuul-stream/post-ara.yaml b/playbooks/zuul-stream/post-ara.yaml
new file mode 100644
index 0000000..e666d21
--- /dev/null
+++ b/playbooks/zuul-stream/post-ara.yaml
@@ -0,0 +1,14 @@
+- hosts: controller
+ tasks:
+
+ - name: Generate ARA html
+ command: ara generate html ara-output
+
+ - name: Compress ARA html
+ command: gzip --recursive --best ara-output
+
+ - name: Fetch ARA files
+ synchronize:
+ src: "{{ ansible_user_dir }}/ara-output"
+ dest: "{{ zuul.executor.log_root }}/stream-files"
+ mode: pull
diff --git a/playbooks/zuul-stream/post.yaml b/playbooks/zuul-stream/post.yaml
index f3d4f9c..2c717a8 100644
--- a/playbooks/zuul-stream/post.yaml
+++ b/playbooks/zuul-stream/post.yaml
@@ -23,4 +23,3 @@
- ansible.cfg
- stream-job-output.txt
- job-output.json
- - ara-output
diff --git a/requirements.txt b/requirements.txt
index eadb508..cdffda2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,6 +17,7 @@
PrettyTable>=0.6,<0.8
babel>=1.0
ansible>=2.3.0.0,<2.4
+netaddr
kazoo
sqlalchemy
alembic
diff --git a/setup.cfg b/setup.cfg
index ce7a40e..63ff562 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,6 +27,7 @@
zuul-executor = zuul.cmd.executor:main
zuul-bwrap = zuul.driver.bubblewrap:main
zuul-web = zuul.cmd.web:main
+ zuul-migrate = zuul.cmd.migrate:main
[build_sphinx]
source-dir = doc/source
@@ -37,3 +38,5 @@
[extras]
mysql_reporter=
PyMySQL
+migrate=
+ jenkins-job-builder==1.6.2
diff --git a/tests/base.py b/tests/base.py
index c49e1ce..c159865 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -195,9 +195,16 @@
if not large:
for fn, content in files.items():
fn = os.path.join(path, fn)
- with open(fn, 'w') as f:
- f.write(content)
- repo.index.add([fn])
+ if content is None:
+ os.unlink(fn)
+ repo.index.remove([fn])
+ else:
+ d = os.path.dirname(fn)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ with open(fn, 'w') as f:
+ f.write(content)
+ repo.index.add([fn])
else:
for fni in range(100):
fn = os.path.join(path, str(fni))
@@ -1005,8 +1012,7 @@
def getGithubClient(self,
project=None,
- user_id=None,
- use_app=True):
+ user_id=None):
return self.github_client
def openFakePullRequest(self, project, branch, subject, files=[],
@@ -1188,7 +1194,7 @@
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self.sock.bind(('', 0))
self.port = self.sock.getsockname()[1]
self.wake_read, self.wake_write = os.pipe()
@@ -2138,6 +2144,7 @@
def getGithubConnection(driver, name, config):
con = FakeGithubConnection(driver, name, config,
upstream_root=self.upstream_root)
+ self.event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
index 9bfeb0e..e6bd5ef 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
@@ -24,6 +24,12 @@
- zuul.project.canonical_name == 'review.example.com/org/project'
- zuul.project.src_dir == 'src/review.example.com/org/project'
+ - name: Assert legacy zuul vars are valid
+ assert:
+ that:
+ - zuul.project.name == '{{ (zuul | zuul_legacy_vars).ZUUL_PROJECT }}'
+ - zuul.branch == '{{ (zuul | zuul_legacy_vars).ZUUL_BRANCH }}'
+
- debug:
msg: "vartest secret {{ vartest_secret }}"
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index d34d5c4..67d1c70 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -98,9 +98,10 @@
- job:
parent: python27
name: check-vars
- nodes:
- - name: ubuntu-xenial
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: ubuntu-xenial
+ label: ubuntu-xenial
vars:
vartest_job: vartest_job
vartest_secret: vartest_job
@@ -112,9 +113,10 @@
- job:
parent: python27
name: check-secret-names
- nodes:
- - name: ubuntu-xenial
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: ubuntu-xenial
+ label: ubuntu-xenial
secrets:
- secret: vartest_secret
name: renamed_secret
diff --git a/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml b/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/common-config/playbooks/common-config-test.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml b/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml
new file mode 100644
index 0000000..561fc39
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/common-config/zuul.yaml
@@ -0,0 +1,46 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (tenant-one-gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+
+- job:
+ name: common-config-test
+
+- project:
+ name: org/project
+ check:
+ jobs:
+ - common-config-test
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml
new file mode 100644
index 0000000..280342c
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/.zuul.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project-test1
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/README b/tests/fixtures/config/in-repo-join/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml b/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/git/org_project/playbooks/project-test1.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/in-repo-join/main.yaml b/tests/fixtures/config/in-repo-join/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/in-repo-join/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
diff --git a/tests/fixtures/config/in-repo/git/common-config/zuul.yaml b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
index ff4268b..5623467 100644
--- a/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
@@ -78,6 +78,8 @@
- project:
name: common-config
+ check:
+ jobs: []
tenant-one-gate:
jobs:
- common-config-test
diff --git a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
index 60cd434..e1c27bb 100644
--- a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
@@ -3,6 +3,8 @@
- project:
name: org/project
+ check:
+ jobs: []
tenant-one-gate:
jobs:
- project-test1
diff --git a/tests/fixtures/config/inventory/git/common-config/zuul.yaml b/tests/fixtures/config/inventory/git/common-config/zuul.yaml
index 7809c5d..e5727a2 100644
--- a/tests/fixtures/config/inventory/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/inventory/git/common-config/zuul.yaml
@@ -37,10 +37,11 @@
- job:
name: single-inventory
- nodes:
- - name: ubuntu-xenial
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: ubuntu-xenial
+ label: ubuntu-xenial
- job:
name: group-inventory
- nodes: nodeset1
+ nodeset: nodeset1
diff --git a/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml
index 27f2fd5..273469c 100644
--- a/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml
@@ -17,6 +17,7 @@
- job:
name: python27
- nodes:
- - name: controller
- label: ubuntu-trusty
+ nodeset:
+ nodes:
+ - name: controller
+ label: ubuntu-trusty
diff --git a/tests/fixtures/config/multi-tenant/main.yaml b/tests/fixtures/config/multi-tenant/main.yaml
index 4916905..e667588 100644
--- a/tests/fixtures/config/multi-tenant/main.yaml
+++ b/tests/fixtures/config/multi-tenant/main.yaml
@@ -1,5 +1,6 @@
- tenant:
name: tenant-one
+ max-job-timeout: 1800
source:
gerrit:
config-projects:
diff --git a/tests/fixtures/config/openstack/git/project-config/zuul.yaml b/tests/fixtures/config/openstack/git/project-config/zuul.yaml
index 2506db0..de6321d 100644
--- a/tests/fixtures/config/openstack/git/project-config/zuul.yaml
+++ b/tests/fixtures/config/openstack/git/project-config/zuul.yaml
@@ -37,9 +37,10 @@
name: base
parent: null
timeout: 30
- nodes:
- - name: controller
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: controller
+ label: ubuntu-xenial
- job:
name: python27
@@ -49,9 +50,10 @@
name: python27
parent: base
branches: stable/mitaka
- nodes:
- - name: controller
- label: ubuntu-trusty
+ nodeset:
+ nodes:
+ - name: controller
+ label: ubuntu-trusty
- job:
name: python35
diff --git a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
index 9796fe2..14f43f4 100644
--- a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
@@ -47,41 +47,47 @@
- job:
name: project-merge
hold-following-changes: true
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project-test1
attempts: 4
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project-test1
branches: stable
- nodes:
- - name: controller
- label: label2
+ nodeset:
+ nodes:
+ - name: controller
+ label: label2
- job:
name: project-post
- nodes:
- - name: static
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: static
+ label: ubuntu-xenial
- job:
name: project-test2
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project1-project2-integration
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project-testfile
diff --git a/tests/fixtures/layouts/autohold.yaml b/tests/fixtures/layouts/autohold.yaml
index 515f79d..578f886 100644
--- a/tests/fixtures/layouts/autohold.yaml
+++ b/tests/fixtures/layouts/autohold.yaml
@@ -17,9 +17,10 @@
- job:
name: project-test2
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- project:
name: org/project
diff --git a/tests/fixtures/layouts/disable_at.yaml b/tests/fixtures/layouts/disable_at.yaml
index 7b1b8c8..8c24c1b 100644
--- a/tests/fixtures/layouts/disable_at.yaml
+++ b/tests/fixtures/layouts/disable_at.yaml
@@ -21,9 +21,10 @@
- job:
name: project-test1
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- project:
name: org/project
diff --git a/tests/fixtures/layouts/dont-ignore-ref-deletes.yaml b/tests/fixtures/layouts/dont-ignore-ref-deletes.yaml
index 6a92deb..bb98b57 100644
--- a/tests/fixtures/layouts/dont-ignore-ref-deletes.yaml
+++ b/tests/fixtures/layouts/dont-ignore-ref-deletes.yaml
@@ -13,9 +13,10 @@
- job:
name: project-post
- nodes:
- - name: static
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: static
+ label: ubuntu-xenial
- project:
name: org/project
diff --git a/tests/fixtures/layouts/idle.yaml b/tests/fixtures/layouts/idle.yaml
index ec31408..4cc07ae 100644
--- a/tests/fixtures/layouts/idle.yaml
+++ b/tests/fixtures/layouts/idle.yaml
@@ -11,9 +11,10 @@
- job:
name: project-bitrot
- nodes:
- - name: static
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: static
+ label: ubuntu-xenial
- project:
name: org/project
diff --git a/tests/fixtures/layouts/no-timer.yaml b/tests/fixtures/layouts/no-timer.yaml
index 3790ea7..7aaa1ed 100644
--- a/tests/fixtures/layouts/no-timer.yaml
+++ b/tests/fixtures/layouts/no-timer.yaml
@@ -29,9 +29,10 @@
- job:
name: project-bitrot
- nodes:
- - name: static
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: static
+ label: ubuntu-xenial
- project:
name: org/project
diff --git a/tests/fixtures/layouts/repo-deleted.yaml b/tests/fixtures/layouts/repo-deleted.yaml
index 6e6c301..3a7f6b3 100644
--- a/tests/fixtures/layouts/repo-deleted.yaml
+++ b/tests/fixtures/layouts/repo-deleted.yaml
@@ -42,16 +42,18 @@
- job:
name: project-test1
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project-test1
branches: stable
- nodes:
- - name: controller
- label: label2
+ nodeset:
+ nodes:
+ - name: controller
+ label: label2
- job:
name: project-test2
diff --git a/tests/fixtures/layouts/smtp.yaml b/tests/fixtures/layouts/smtp.yaml
index 5ea75ce..0654448 100644
--- a/tests/fixtures/layouts/smtp.yaml
+++ b/tests/fixtures/layouts/smtp.yaml
@@ -48,16 +48,18 @@
- job:
name: project-test1
- nodes:
- - name: controller
- label: label1
+ nodeset:
+ nodes:
+ - name: controller
+ label: label1
- job:
name: project-test1
branches: stable
- nodes:
- - name: controller
- label: label2
+ nodeset:
+ nodes:
+ - name: controller
+ label: label2
- job:
name: project-test2
diff --git a/tests/fixtures/layouts/timer.yaml b/tests/fixtures/layouts/timer.yaml
index e1c4e77..8c0cc2b 100644
--- a/tests/fixtures/layouts/timer.yaml
+++ b/tests/fixtures/layouts/timer.yaml
@@ -30,9 +30,10 @@
- job:
name: project-bitrot
- nodes:
- - name: static
- label: ubuntu-xenial
+ nodeset:
+ nodes:
+ - name: static
+ label: ubuntu-xenial
- project:
name: org/project
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index 3b5c206..f7d580c 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -304,7 +304,7 @@
def test_conflict_config(self):
tenant = self.sched.abide.tenants.get('tenant-one')
jobs = sorted(tenant.layout.jobs.keys())
- self.assertEquals(
+ self.assertEqual(
['base', 'noop', 'trusted-zuul.yaml-job',
'untrusted-zuul.yaml-job'],
jobs)
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index 3793edc..9c45645 100755
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -35,17 +35,17 @@
'Project %s commit for build %s #%s should '
'not have a detached HEAD' % (
project, build, number))
- self.assertEquals(repo.active_branch.name,
- state['branch'],
- 'Project %s commit for build %s #%s should '
- 'be on the correct branch' % (
- project, build, number))
+ self.assertEqual(repo.active_branch.name,
+ state['branch'],
+ 'Project %s commit for build %s #%s should '
+ 'be on the correct branch' % (
+ project, build, number))
if 'commit' in state:
- self.assertEquals(state['commit'],
- str(repo.commit('HEAD')),
- 'Project %s commit for build %s #%s should '
- 'be correct' % (
- project, build, number))
+ self.assertEqual(state['commit'],
+ str(repo.commit('HEAD')),
+ 'Project %s commit for build %s #%s should '
+ 'be correct' % (
+ project, build, number))
ref = repo.commit('HEAD')
repo_messages = set(
[c.message.strip() for c in repo.iter_commits(ref)])
@@ -93,7 +93,7 @@
self.waitUntilSettled()
- self.assertEquals(2, len(self.builds), "Two builds are running")
+ self.assertEqual(2, len(self.builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
@@ -133,7 +133,7 @@
self.waitUntilSettled()
- self.assertEquals(3, len(self.builds), "Three builds are running")
+ self.assertEqual(3, len(self.builds), "Three builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
@@ -194,7 +194,7 @@
self.waitUntilSettled()
- self.assertEquals(4, len(self.builds), "Four builds are running")
+ self.assertEqual(4, len(self.builds), "Four builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
@@ -283,7 +283,7 @@
time.sleep(1)
self.waitUntilSettled()
- self.assertEquals(1, len(self.builds), "One build is running")
+ self.assertEqual(1, len(self.builds), "One build is running")
upstream = self.getUpstreamRepos(projects)
states = [
@@ -326,7 +326,7 @@
time.sleep(1)
self.waitUntilSettled()
- self.assertEquals(2, len(self.builds), "Two builds are running")
+ self.assertEqual(2, len(self.builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
diff --git a/tests/unit/test_gerrit.py b/tests/unit/test_gerrit.py
index a369aff..5ce1aa3 100644
--- a/tests/unit/test_gerrit.py
+++ b/tests/unit/test_gerrit.py
@@ -63,11 +63,11 @@
result = gerrit.simpleQuery('project:openstack-infra/zuul')
_ssh_mock.assert_has_calls(calls)
- self.assertEquals(len(calls), _ssh_mock.call_count,
- '_ssh should be called %d times' % len(calls))
+ self.assertEqual(len(calls), _ssh_mock.call_count,
+ '_ssh should be called %d times' % len(calls))
self.assertIsNotNone(result, 'Result is not none')
- self.assertEquals(len(result), expected_patches,
- 'There must be %d patches.' % expected_patches)
+ self.assertEqual(len(result), expected_patches,
+ 'There must be %d patches.' % expected_patches)
def test_simple_query_pagination_new(self):
files = ['simple_query_pagination_new_1',
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index a088236..ebb5e1c 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -17,6 +17,7 @@
from testtools.matchers import MatchesRegex, StartsWith
import urllib
import time
+from unittest import skip
import git
@@ -685,6 +686,8 @@
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
+ # TODO(jlk): Make this a more generic test for unknown project
+ @skip("Skipped for rewrite of webhook handler")
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_ping_event(self):
# Test valid ping
diff --git a/tests/unit/test_log_streamer.py b/tests/unit/test_log_streamer.py
index f47a8c8..c808540 100644
--- a/tests/unit/test_log_streamer.py
+++ b/tests/unit/test_log_streamer.py
@@ -22,6 +22,7 @@
import os.path
import socket
import tempfile
+import testtools
import threading
import time
@@ -34,7 +35,7 @@
def setUp(self):
super(TestLogStreamer, self).setUp()
- self.host = '0.0.0.0'
+ self.host = '::'
def startStreamer(self, port, root=None):
if not root:
@@ -46,16 +47,13 @@
streamer = self.startStreamer(port)
self.addCleanup(streamer.stop)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.addCleanup(s.close)
- self.assertEqual(0, s.connect_ex((self.host, port)))
+ s = socket.create_connection((self.host, port))
s.close()
streamer.stop()
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.addCleanup(s.close)
- self.assertNotEqual(0, s.connect_ex((self.host, port)))
+ with testtools.ExpectedException(ConnectionRefusedError):
+ s = socket.create_connection((self.host, port))
s.close()
@@ -66,7 +64,7 @@
def setUp(self):
super(TestStreaming, self).setUp()
- self.host = '0.0.0.0'
+ self.host = '::'
self.streamer = None
self.stop_streamer = False
self.streaming_data = ''
@@ -80,8 +78,7 @@
root = tempfile.gettempdir()
self.streamer = zuul.lib.log_streamer.LogStreamer(None, self.host,
port, root)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((self.host, port))
+ s = socket.create_connection((self.host, port))
self.addCleanup(s.close)
req = '%s\n' % build_uuid
@@ -161,7 +158,7 @@
def runWSClient(self, build_uuid, event):
async def client(loop, build_uuid, event):
- uri = 'http://127.0.0.1:9000/console-stream'
+ uri = 'http://[::1]:9000/console-stream'
try:
session = aiohttp.ClientSession(loop=loop)
async with session.ws_connect(uri) as ws:
@@ -226,7 +223,7 @@
# Start the web server
web_server = zuul.web.ZuulWeb(
- listen_address='127.0.0.1', listen_port=9000,
+ listen_address='::', listen_port=9000,
gear_server='127.0.0.1', gear_port=self.gearman_server.port)
loop = asyncio.new_event_loop()
loop.set_debug(True)
@@ -237,8 +234,11 @@
self.addCleanup(web_server.stop)
# Wait until web server is started
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
- while s.connect_ex((self.host, 9000)):
+ while True:
+ try:
+ with socket.create_connection((self.host, 9000)):
+ break
+ except ConnectionRefusedError:
time.sleep(0.1)
# Start a thread with the websocket client
diff --git a/tests/unit/test_merger_repo.py b/tests/unit/test_merger_repo.py
index f815344..8aafabf 100644
--- a/tests/unit/test_merger_repo.py
+++ b/tests/unit/test_merger_repo.py
@@ -65,12 +65,12 @@
os.path.join(self.workspace_root, 'subdir', '.git')),
msg='Cloned over the submodule placeholder')
- self.assertEquals(
+ self.assertEqual(
os.path.join(self.upstream_root, 'org/project1'),
work_repo.createRepoObject().remotes[0].url,
message="Parent clone still point to upstream project1")
- self.assertEquals(
+ self.assertEqual(
os.path.join(self.upstream_root, 'org/project2'),
sub_repo.createRepoObject().remotes[0].url,
message="Sub repository points to upstream project2")
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 6dd8333..c457ff0 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -190,10 +190,12 @@
'timeout': 30,
'pre-run': 'base-pre',
'post-run': 'base-post',
- 'nodes': [{
- 'name': 'controller',
- 'label': 'base',
- }],
+ 'nodeset': {
+ 'nodes': [{
+ 'name': 'controller',
+ 'label': 'base',
+ }],
+ },
})
layout.addJob(base)
python27 = configloader.JobParser.fromYaml(tenant, layout, {
@@ -203,10 +205,12 @@
'parent': 'base',
'pre-run': 'py27-pre',
'post-run': ['py27-post-a', 'py27-post-b'],
- 'nodes': [{
- 'name': 'controller',
- 'label': 'new',
- }],
+ 'nodeset': {
+ 'nodes': [{
+ 'name': 'controller',
+ 'label': 'new',
+ }],
+ },
'timeout': 40,
})
layout.addJob(python27)
@@ -220,10 +224,12 @@
'pre-run': 'py27-diablo-pre',
'run': 'py27-diablo',
'post-run': 'py27-diablo-post',
- 'nodes': [{
- 'name': 'controller',
- 'label': 'old',
- }],
+ 'nodeset': {
+ 'nodes': [{
+ 'name': 'controller',
+ 'label': 'old',
+ }],
+ },
'timeout': 50,
})
layout.addJob(python27diablo)
@@ -800,14 +806,22 @@
self.db = model.TimeDataBase(self.tmp_root)
def test_timedatabase(self):
- self.assertEqual(self.db.getEstimatedTime('job-name'), 0)
- self.db.update('job-name', 50, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 50)
- self.db.update('job-name', 100, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 75)
+ pipeline = Dummy(layout=Dummy(tenant=Dummy(name='test-tenant')))
+ change = Dummy(project=Dummy(canonical_name='git.example.com/foo/bar'))
+ job = Dummy(name='job-name')
+ item = Dummy(pipeline=pipeline,
+ change=change)
+ build = Dummy(build_set=Dummy(item=item),
+ job=job)
+
+ self.assertEqual(self.db.getEstimatedTime(build), 0)
+ self.db.update(build, 50, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 50)
+ self.db.update(build, 100, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 75)
for x in range(10):
- self.db.update('job-name', 100, 'SUCCESS')
- self.assertEqual(self.db.getEstimatedTime('job-name'), 100)
+ self.db.update(build, 100, 'SUCCESS')
+ self.assertEqual(self.db.getEstimatedTime(build), 100)
class TestGraph(BaseTestCase):
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index f33d964..2dcd9bf 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -617,7 +617,6 @@
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
- @skip("Disabled for early v3 development")
def _test_time_database(self, iteration):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -626,7 +625,7 @@
self.waitUntilSettled()
time.sleep(2)
- data = json.loads(self.sched.formatStatusJSON())
+ data = json.loads(self.sched.formatStatusJSON('tenant-one'))
found_job = None
for pipeline in data['pipelines']:
if pipeline['name'] != 'gate':
@@ -652,7 +651,6 @@
self.executor_server.release()
self.waitUntilSettled()
- @skip("Disabled for early v3 development")
def test_time_database(self):
"Test the time database"
@@ -2436,6 +2434,35 @@
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
+ def test_live_reconfiguration_abort(self):
+ # Raise an exception during reconfiguration and verify we
+ # still function.
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ pipeline = tenant.layout.pipelines['gate']
+ change = pipeline.getAllItems()[0].change
+ # Set this to an invalid value to cause an exception during
+ # reconfiguration.
+ change.branch = None
+
+ self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+
+ self.waitUntilSettled()
+ self.assertEqual(self.getJobFromHistory('project-merge').result,
+ 'ABORTED')
+ self.assertEqual(A.data['status'], 'NEW')
+ # The final report fails because of the invalid value set above.
+ self.assertEqual(A.reported, 1)
+
def test_live_reconfiguration_merge_conflict(self):
# A real-world bug: a change in a gate queue has a merge
# conflict and a job is added to its project while it's
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 9d695aa..94f169a 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -371,55 +371,6 @@
dict(name='project-test1', result='SUCCESS', changes='1,2'),
dict(name='project-test2', result='SUCCESS', changes='1,2')])
- def test_dynamic_dependent_pipeline(self):
- # Test dynamically adding a project to a
- # dependent pipeline for the first time
- self.executor_server.hold_jobs_in_build = True
-
- tenant = self.sched.abide.tenants.get('tenant-one')
- gate_pipeline = tenant.layout.pipelines['gate']
-
- in_repo_conf = textwrap.dedent(
- """
- - job:
- name: project-test1
-
- - job:
- name: project-test2
-
- - project:
- name: org/project
- gate:
- jobs:
- - project-test2
- """)
-
- in_repo_playbook = textwrap.dedent(
- """
- - hosts: all
- tasks: []
- """)
-
- file_dict = {'.zuul.yaml': in_repo_conf,
- 'playbooks/project-test2.yaml': in_repo_playbook}
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
- files=file_dict)
- A.addApproval('Approved', 1)
- self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
- self.waitUntilSettled()
-
- items = gate_pipeline.getAllItems()
- self.assertEqual(items[0].change.number, '1')
- self.assertEqual(items[0].change.patchset, '1')
- self.assertTrue(items[0].live)
-
- self.executor_server.hold_jobs_in_build = False
- self.executor_server.release()
- self.waitUntilSettled()
-
- # Make sure the dynamic queue got cleaned up
- self.assertEqual(gate_pipeline.queues, [])
-
def test_in_repo_branch(self):
in_repo_conf = textwrap.dedent(
"""
@@ -544,6 +495,84 @@
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
])
+ def test_yaml_list_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ job: foo
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not a list', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_dict_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not a dictionary', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_key_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('has more than one key', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_yaml_unknown_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - foobar:
+ foo: bar
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not recognized', A.messages[0],
+ "A should have a syntax error reported")
+
def test_untrusted_syntax_error(self):
in_repo_conf = textwrap.dedent(
"""
@@ -719,6 +748,48 @@
self.assertIn('appears multiple times', A.messages[0],
"A should have a syntax error reported")
+ def test_secret_not_found_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: test
+ secrets: does-not-exist
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('secret "does-not-exist" was not found', A.messages[0],
+ "A should have a syntax error reported")
+
+ def test_nodeset_not_found_error(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: test
+ nodeset: does-not-exist
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('nodeset "does-not-exist" was not found', A.messages[0],
+ "A should have a syntax error reported")
+
def test_multi_repo(self):
downstream_repo_conf = textwrap.dedent(
"""
@@ -775,6 +846,194 @@
# isn't this will raise an exception.
tenant.layout.getJob('project-test2')
+ def test_pipeline_error(self):
+ with open(os.path.join(FIXTURE_DIR,
+ 'config/in-repo/git/',
+ 'common-config/zuul.yaml')) as f:
+ base_common_config = f.read()
+
+ in_repo_conf_A = textwrap.dedent(
+ """
+ - pipeline:
+ name: periodic
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A}
+ A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('syntax error',
+ A.messages[0],
+ "A should have an error reported")
+
+ def test_change_series_error(self):
+ with open(os.path.join(FIXTURE_DIR,
+ 'config/in-repo/git/',
+ 'common-config/zuul.yaml')) as f:
+ base_common_config = f.read()
+
+ in_repo_conf_A = textwrap.dedent(
+ """
+ - pipeline:
+ name: periodic
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A}
+ A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
+ files=file_dict)
+
+ in_repo_conf_B = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+ foo: error
+ """)
+
+ file_dict = {'zuul.yaml': None,
+ 'zuul.d/main.yaml': base_common_config,
+ 'zuul.d/test1.yaml': in_repo_conf_A,
+ 'zuul.d/test2.yaml': in_repo_conf_B}
+ B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
+ files=file_dict)
+ B.setDependsOn(A, 1)
+ C = self.fake_gerrit.addFakeChange('common-config', 'master', 'C')
+ C.setDependsOn(B, 1)
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(C.reported, 1,
+ "C should report failure")
+ self.assertIn('depends on a change that failed to merge',
+ C.messages[0],
+ "C should have an error reported")
+
+
+class TestInRepoJoin(ZuulTestCase):
+ # In this config, org/project is not a member of any pipelines, so
+ # that we may test the changes that cause it to join them.
+
+ tenant_config_file = 'config/in-repo-join/main.yaml'
+
+ def test_dynamic_dependent_pipeline(self):
+ # Test dynamically adding a project to a
+ # dependent pipeline for the first time
+ self.executor_server.hold_jobs_in_build = True
+
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ gate_pipeline = tenant.layout.pipelines['gate']
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+
+ - job:
+ name: project-test2
+
+ - project:
+ name: org/project
+ gate:
+ jobs:
+ - project-test2
+ """)
+
+ in_repo_playbook = textwrap.dedent(
+ """
+ - hosts: all
+ tasks: []
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf,
+ 'playbooks/project-test2.yaml': in_repo_playbook}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ items = gate_pipeline.getAllItems()
+ self.assertEqual(items[0].change.number, '1')
+ self.assertEqual(items[0].change.patchset, '1')
+ self.assertTrue(items[0].live)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ # Make sure the dynamic queue got cleaned up
+ self.assertEqual(gate_pipeline.queues, [])
+
+ def test_dynamic_dependent_pipeline_failure(self):
+ # Test that a change behind a failing change adding a project
+ # to a dependent pipeline is dequeued.
+ self.executor_server.hold_jobs_in_build = True
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+
+ - project:
+ name: org/project
+ gate:
+ jobs:
+ - project-test1
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.executor_server.failJob('project-test1', A)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.orderedRelease()
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 2,
+ "A should report start and failure")
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.reported, 1,
+ "B should report start")
+ self.assertHistory([
+ dict(name='project-test1', result='FAILURE', changes='1,1'),
+ dict(name='project-test1', result='ABORTED', changes='1,1 2,1'),
+ ], ordered=False)
+
+ def test_dynamic_dependent_pipeline_absent(self):
+ # Test that a series of dependent changes don't report merge
+ # failures to a pipeline they aren't in.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setDependsOn(A, 1)
+
+ A.addApproval('Code-Review', 2)
+ A.addApproval('Approved', 1)
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 0,
+ "A should not report")
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.reported, 0,
+ "B should not report")
+ self.assertEqual(B.data['status'], 'NEW')
+ self.assertHistory([])
+
class TestAnsible(AnsibleZuulTestCase):
# A temporary class to hold new tests while others are disabled
@@ -1185,24 +1444,25 @@
class TestMaxNodesPerJob(AnsibleZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
- def test_max_nodes_reached(self):
+ def test_max_timeout_exceeded(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job
- nodes:
- - name: node01
- label: fake
- - name: node02
- label: fake
- - name: node03
- label: fake
- - name: node04
- label: fake
- - name: node05
- label: fake
- - name: node06
- label: fake
+ nodeset:
+ nodes:
+ - name: node01
+ label: fake
+ - name: node02
+ label: fake
+ - name: node03
+ label: fake
+ - name: node04
+ label: fake
+ - name: node05
+ label: fake
+ - name: node06
+ label: fake
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
@@ -1220,6 +1480,32 @@
"B should not fail because of nodes limit")
+class TestMaxTimeout(AnsibleZuulTestCase):
+ tenant_config_file = 'config/multi-tenant/main.yaml'
+
+ def test_max_nodes_reached(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: test-job
+ timeout: 3600
+ """)
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertIn('The job "test-job" exceeds tenant max-job-timeout',
+ A.messages[0], "A should fail because of timeout limit")
+
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertNotIn("exceeds tenant max-job-timeout", B.messages[0],
+ "B should not fail because of timeout limit")
+
+
class TestBaseJobs(ZuulTestCase):
tenant_config_file = 'config/base-jobs/main.yaml'
diff --git a/tools/run-migration.sh b/tools/run-migration.sh
new file mode 100755
index 0000000..6c7e250
--- /dev/null
+++ b/tools/run-migration.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stupid script I'm using to test migration script locally
+# Assumes project-config is adjacent to zuul and has the mapping file
+
+BASE_DIR=$(cd $(dirname $0)/../..; pwd)
+cd $BASE_DIR/project-config
+python3 $BASE_DIR/zuul/zuul/cmd/migrate.py --mapping=zuul/mapping.yaml \
+ zuul/layout.yaml jenkins/jobs nodepool/nodepool.yaml .
diff --git a/zuul/ansible/action/normal.py b/zuul/ansible/action/normal.py
index 34df21d..152f13f 100644
--- a/zuul/ansible/action/normal.py
+++ b/zuul/ansible/action/normal.py
@@ -40,7 +40,7 @@
or self._play_context.remote_addr.startswith('127.')
or self._task.delegate_to == 'localhost'
or (self._task.delegate_to
- and self._task.delegate_to.startswtih('127.'))):
+ and self._task.delegate_to.startswith('127.'))):
if not self.dispatch_handler():
raise AnsibleError("Executing local code is prohibited")
return super(ActionModule, self).run(tmp, task_vars)
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index 2a960a2..0a266df 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -36,23 +36,6 @@
LOG_STREAM_PORT = 19885
-def linesplit(socket):
- buff = socket.recv(4096).decode("utf-8")
- buffering = True
- while buffering:
- if "\n" in buff:
- (line, buff) = buff.split("\n", 1)
- yield line + "\n"
- else:
- more = socket.recv(4096).decode("utf-8")
- if not more:
- buffering = False
- else:
- buff += more
- if buff:
- yield buff
-
-
def zuul_filter_result(result):
"""Remove keys from shell/command output.
@@ -122,6 +105,7 @@
self._logger = logging.getLogger('zuul.executor.ansible')
def _log(self, msg, ts=None, job=True, executor=False, debug=False):
+ msg = msg.rstrip()
if job:
now = ts or datetime.datetime.now()
self._logger.info("{now} | {msg}".format(now=now, msg=msg))
@@ -134,10 +118,9 @@
def _read_log(self, host, ip, log_id, task_name, hosts):
self._log("[%s] Starting to log %s for task %s"
% (host, log_id, task_name), job=False, executor=True)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
- s.connect((ip, LOG_STREAM_PORT))
+ s = socket.create_connection((ip, LOG_STREAM_PORT))
except Exception:
self._log("[%s] Waiting on logger" % host,
executor=True, debug=True)
@@ -145,19 +128,36 @@
continue
msg = "%s\n" % log_id
s.send(msg.encode("utf-8"))
- for line in linesplit(s):
- if "[Zuul] Task exit code" in line:
- return
- elif self._streamers_stop and "[Zuul] Log not found" in line:
- return
- elif "[Zuul] Log not found" in line:
- # don't output this line
- pass
+ buff = s.recv(4096).decode("utf-8")
+ buffering = True
+ while buffering:
+ if "\n" in buff:
+ (line, buff) = buff.split("\n", 1)
+ done = self._log_streamline(host, line)
+ if done:
+ return
else:
- ts, ln = line.split(' | ', 1)
- ln = ln.strip()
+ more = s.recv(4096).decode("utf-8")
+ if not more:
+ buffering = False
+ else:
+ buff += more
+ if buff:
+ self._log_streamline(host, line)
- self._log("%s | %s " % (host, ln), ts=ts)
+ def _log_streamline(self, host, line):
+ if "[Zuul] Task exit code" in line:
+ return True
+ elif self._streamers_stop and "[Zuul] Log not found" in line:
+ return True
+ elif "[Zuul] Log not found" in line:
+ # don't output this line
+ return False
+ else:
+ ts, ln = line.split(' | ', 1)
+
+ self._log("%s | %s " % (host, ln), ts=ts)
+ return False
def v2_playbook_on_start(self, playbook):
self._playbook_name = os.path.splitext(playbook._file_name)[0]
@@ -202,10 +202,11 @@
msg = u"PLAY [{name}]".format(name=name)
self._log(msg)
- # Log an extra blank line to get space after each play
- self._log("")
def v2_playbook_on_task_start(self, task, is_conditional):
+ # Log an extra blank line to get space before each task
+ self._log("")
+
self._task = task
if self._play.strategy != 'free':
@@ -276,7 +277,7 @@
if is_localhost:
for line in stdout_lines:
hostname = self._get_hostname(result)
- self._log("%s | %s " % (hostname, line.strip()))
+ self._log("%s | %s " % (hostname, line))
def v2_runner_on_failed(self, result, ignore_errors=False):
result_dict = dict(result._result)
@@ -311,8 +312,6 @@
result=result, status='ERROR', result_dict=result_dict)
if ignore_errors:
self._log_message(result, "Ignoring Errors", status="ERROR")
- # Log an extra blank line to get space after each task
- self._log("")
def v2_runner_on_skipped(self, result):
if result._task.loop:
@@ -323,8 +322,6 @@
if reason:
# No reason means it's an item, which we'll log differently
self._log_message(result, status='skipping', msg=reason)
- # Log an extra blank line to get space after each skip
- self._log("")
def v2_runner_item_on_skipped(self, result):
reason = result._result.get('skip_reason')
@@ -393,9 +390,20 @@
for key in [k for k in result_dict.keys()
if k.startswith('_ansible')]:
del result_dict[key]
- self._log_message(
- msg=json.dumps(result_dict, indent=2, sort_keys=True),
- status=status, result=result)
+ keyname = next(iter(result_dict.keys()))
+ # If it has msg, that means it was like:
+ #
+ # debug:
+ # msg: Some debug text the user was looking for
+ #
+ # So we log it with self._log to get just the raw string the
+ # user provided.
+ if keyname == 'msg':
+ self._log(msg=result_dict['msg'])
+ else:
+ self._log_message(
+ msg=json.dumps(result_dict, indent=2, sort_keys=True),
+ status=status, result=result)
elif result._task.action not in ('command', 'shell'):
if 'msg' in result_dict:
self._log_message(msg=result_dict['msg'],
@@ -408,16 +416,14 @@
for res in result_dict['results']:
self._log_message(
result,
- "Runtime: {delta} Start: {start} End: {end}".format(**res))
+ "Runtime: {delta}".format(**res))
elif result_dict.get('msg') == 'All items completed':
self._log_message(result, result_dict['msg'])
else:
self._log_message(
result,
- "Runtime: {delta} Start: {start} End: {end}".format(
+ "Runtime: {delta}".format(
**result_dict))
- # Log an extra blank line to get space after each task
- self._log("")
def v2_runner_item_on_ok(self, result):
result_dict = dict(result._result)
@@ -447,12 +453,11 @@
if isinstance(result_dict['item'], str):
self._log_message(
result,
- "Item: {item} Runtime: {delta}"
- " Start: {start} End: {end}".format(**result_dict))
+ "Item: {item} Runtime: {delta}".format(**result_dict))
else:
self._log_message(
result,
- "Item: Runtime: {delta} Start: {start} End: {end}".format(
+ "Item: Runtime: {delta}".format(
**result_dict))
if self._deferred_result:
@@ -479,10 +484,11 @@
if self._deferred_result:
self._process_deferred(result)
- # Log an extra blank line to get space after each task
- self._log("")
def v2_playbook_on_stats(self, stats):
+ # Add a spacer line before the stats so that there will be a line
+ # between the last task and the recap
+ self._log("")
self._log("PLAY RECAP")
@@ -571,7 +577,7 @@
msg = result_dict['msg']
result_dict = None
if msg:
- msg_lines = msg.strip().split('\n')
+ msg_lines = msg.rstrip().split('\n')
if len(msg_lines) > 1:
self._log("{host} | {status}:".format(
host=hostname, status=status))
diff --git a/zuul/ansible/filter/zuul_filters.py b/zuul/ansible/filter/zuul_filters.py
new file mode 100644
index 0000000..4304d51
--- /dev/null
+++ b/zuul/ansible/filter/zuul_filters.py
@@ -0,0 +1,63 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def zuul_legacy_vars(zuul):
+ # omitted:
+ # ZUUL_URL
+ # ZUUL_REF
+ # ZUUL_COMMIT
+
+ short_name = zuul['project']['name'].split('/')[-1]
+ params = dict(ZUUL_UUID=zuul['build'],
+ ZUUL_PROJECT=zuul['project']['name'],
+ ZUUL_SHORT_PROJECT_NAME=short_name,
+ ZUUL_PIPELINE=zuul['pipeline'],
+ ZUUL_VOTING=zuul['voting'],
+ WORKSPACE='/home/zuul')
+ if 'branch' in zuul:
+ params['ZUUL_BRANCH'] = zuul['branch']
+
+ if 'change' in zuul:
+ changes_str = '^'.join(
+ ['%s:%s:refs/changes/%s/%s/%s' % (
+ i['project']['name'],
+ i['branch'],
+ str(i['change'])[:-2:],
+ i['change'],
+ i['patchset'])
+ for i in zuul['items']])
+ params['ZUUL_CHANGES'] = changes_str
+
+ change_ids = ' '.join(['%s,%s' % (i['change'], i['patchset'])
+ for i in zuul['items']])
+ params['ZUUL_CHANGE_IDS'] = change_ids
+ params['ZUUL_CHANGE'] = str(zuul['change'])
+ params['ZUUL_PATCHSET'] = str(zuul['patchset'])
+
+ if 'newrev' in zuul or 'oldrev' in zuul:
+ params['ZUUL_REFNAME'] = zuul['ref']
+ params['ZUUL_OLDREV'] = zuul.get('oldrev', '0' * 40)
+ params['ZUUL_NEWREV'] = zuul.get('newrev', '0' * 40)
+
+ params['TOX_TESTENV_PASSENV'] = ' '.join(params.keys())
+ return params
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'zuul_legacy_vars': zuul_legacy_vars,
+ }
diff --git a/zuul/ansible/library/command.py b/zuul/ansible/library/command.py
index f701b48..0fc6129 100644
--- a/zuul/ansible/library/command.py
+++ b/zuul/ansible/library/command.py
@@ -159,9 +159,14 @@
# Jenkins format but with microsecond resolution instead of
# millisecond. It is kept so log parsing/formatting remains
# consistent.
- ts = datetime.datetime.now()
- outln = '%s | %s' % (ts, ln)
- self.logfile.write(outln.encode('utf-8'))
+ ts = str(datetime.datetime.now()).encode('utf-8')
+ if not isinstance(ln, bytes):
+ try:
+ ln = ln.encode('utf-8')
+ except Exception:
+ ln = repr(ln).encode('utf-8') + b'\n'
+ outln = b'%s | %s' % (ts, ln)
+ self.logfile.write(outln)
def follow(fd, log_uuid):
diff --git a/zuul/ansible/logconfig.py b/zuul/ansible/logconfig.py
index 7c3507b..7ef43a8 100644
--- a/zuul/ansible/logconfig.py
+++ b/zuul/ansible/logconfig.py
@@ -13,6 +13,7 @@
# under the License.
import abc
+import copy
import logging.config
import json
import os
@@ -161,14 +162,15 @@
logging.config.dictConfig(self._config)
def writeJson(self, filename: str):
- open(filename, 'w').write(json.dumps(self._config, indent=2))
+ with open(filename, 'w') as f:
+ f.write(json.dumps(self._config, indent=2))
class JobLoggingConfig(DictLoggingConfig):
def __init__(self, config=None, job_output_file=None):
if not config:
- config = _DEFAULT_JOB_LOGGING_CONFIG.copy()
+ config = copy.deepcopy(_DEFAULT_JOB_LOGGING_CONFIG)
super(JobLoggingConfig, self).__init__(config=config)
if job_output_file:
self.job_output_file = job_output_file
@@ -190,7 +192,7 @@
def __init__(self, config=None, server=None):
if not config:
- config = _DEFAULT_SERVER_LOGGING_CONFIG.copy()
+ config = copy.deepcopy(_DEFAULT_SERVER_LOGGING_CONFIG)
super(ServerLoggingConfig, self).__init__(config=config)
if server:
self.server = server
@@ -206,7 +208,7 @@
# config above because we're templating out the filename. Also, we
# only want to add the handler if we're actually going to use it.
for name, handler in _DEFAULT_SERVER_FILE_HANDLERS.items():
- server_handler = handler.copy()
+ server_handler = copy.deepcopy(handler)
server_handler['filename'] = server_handler['filename'].format(
server=server)
self._config['handlers'][name] = server_handler
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 06ef0ba..63c621d 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -82,7 +82,7 @@
self.log.info("Starting log streamer")
streamer = zuul.lib.log_streamer.LogStreamer(
- self.user, '0.0.0.0', self.finger_port, self.job_dir)
+ self.user, '::', self.finger_port, self.job_dir)
# Keep running until the parent dies:
pipe_read = os.fdopen(pipe_read)
diff --git a/zuul/cmd/migrate.py b/zuul/cmd/migrate.py
new file mode 100644
index 0000000..05278aa
--- /dev/null
+++ b/zuul/cmd/migrate.py
@@ -0,0 +1,1478 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# TODO(mordred):
+# * Read and apply filters from the jobs: section
+# * Figure out shared job queues
+# * Emit job definitions
+# * figure out from builders whether or not it's a normal job or a
+# a devstack-legacy job
+# * Handle emitting arbitrary tox jobs (see tox-py27dj18)
+
+import argparse
+import collections
+import copy
+import itertools
+import getopt
+import logging
+import os
+import subprocess
+import tempfile
+import re
+from typing import Any, Dict, List, Optional # flake8: noqa
+
+import jenkins_jobs.builder
+from jenkins_jobs.formatter import deep_format
+import jenkins_jobs.formatter
+from jenkins_jobs.parser import matches
+import jenkins_jobs.parser
+import yaml
+
+JOBS_BY_ORIG_TEMPLATE = {} # type: ignore
+SUFFIXES = [] # type: ignore
+ENVIRONMENT = '{{ host_vars[inventory_hostname] | zuul_legacy_vars }}'
+DESCRIPTION = """Migrate zuul v2 and Jenkins Job Builder to Zuul v3.
+
+This program takes a zuul v2 layout.yaml and a collection of Jenkins Job
+Builder job definitions and transforms them into a Zuul v3 config. An
+optional mapping config can be given that defines how to map old jobs
+to new jobs.
+"""
+
+def deal_with_shebang(data):
+ # Ansible shell blocks do not honor shebang lines. That's fine - but
+ # we do have a bunch of scripts that have either nothing, -x, -xe,
+ # -ex or -eux. Transform those into leading set commands
+ if not data.startswith('#!'):
+ return (None, data)
+ data_lines = data.split('\n')
+ data_lines.reverse()
+ shebang = data_lines.pop()
+ split_line = shebang.split()
+ # Strip the # and the !
+ executable = split_line[0][2:]
+ if executable == '/bin/sh':
+ # Ansible default
+ executable = None
+ if len(split_line) > 1:
+ flag_x = False
+ flag_e = False
+ flag_u = False
+ optlist, args = getopt.getopt(split_line[1:], 'uex')
+ for opt, _ in optlist:
+ if opt == '-x':
+ flag_x = True
+ elif opt == '-e':
+ flag_e = True
+ elif opt == '-u':
+ flag_u = True
+
+ if flag_x:
+ data_lines.append('set -x')
+ if flag_e:
+ data_lines.append('set -e')
+ if flag_u:
+ data_lines.append('set -u')
+ data_lines.reverse()
+ data = '\n'.join(data_lines).lstrip()
+ return (executable, data)
+
+
+def _extract_from_vars(line):
+ # export PROJECTS="openstack/blazar $PROJECTS"
+ # export DEVSTACK_PROJECT_FROM_GIT=python-swiftclient
+ # export DEVSTACK_PROJECT_FROM_GIT="python-octaviaclient"
+ # export DEVSTACK_PROJECT_FROM_GIT+=",glean"
+ projects = []
+ line = line.replace('"', '').replace('+', '').replace(',', ' ')
+ if (line.startswith('export PROJECTS') or
+ line.startswith('export DEVSTACK_PROJECT_FROM_GIT')):
+ nothing, project_string = line.split('=')
+ project_string = project_string.replace('$PROJECTS', '').strip()
+ projects = project_string.split()
+ return projects
+
+
+def extract_projects(data):
+ # clonemap:
+ # - name: openstack/windmill
+ # dest: .
+ # EOF
+ projects = []
+ data_lines = data.split('\n')
+ in_clonemap = False
+ in_clonemap_cli = False
+ for line in data_lines:
+ line = line.strip()
+ if line == 'clonemap:':
+ in_clonemap = True
+ continue
+ elif line == 'EOF':
+ in_clonemap = False
+ continue
+ elif line.startswith('/usr/zuul-env/bin/zuul-cloner'):
+ in_clonemap_cli = True
+ continue
+ elif in_clonemap_cli and not line.endswith('\\'):
+ in_clonemap_cli = False
+ continue
+ if in_clonemap:
+ if line.startswith('- name:'):
+ garbage, project = line.split(':')
+ project = project.strip().replace("'", '').replace('"', '')
+ if project == '$ZUUL_PROJECT':
+ continue
+ projects.append(project)
+ elif in_clonemap_cli and line.startswith('openstack/'):
+ line = line.replace('\\', '').strip()
+ projects.append(line)
+ elif in_clonemap_cli:
+ continue
+ else:
+ projects.extend(_extract_from_vars(line))
+ return projects
+
+
+def expand_project_names(required, full):
+ projects = []
+ for name in full:
+ org, repo = name.split('/')
+ if repo in required or name in required:
+ projects.append(name)
+ return projects
+
+
+# from :
+# http://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data flake8: noqa
+def should_use_block(value):
+ for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
+ if c in value:
+ return True
+ return False
+
+
+def my_represent_scalar(self, tag, value, style=None):
+ if style is None:
+ if should_use_block(value):
+ style='|'
+ else:
+ style = self.default_style
+
+ node = yaml.representer.ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+def project_representer(dumper, data):
+ return dumper.represent_mapping('tag:yaml.org,2002:map',
+ data.items())
+
+
+def construct_yaml_map(self, node):
+ data = collections.OrderedDict()
+ yield data
+ value = self.construct_mapping(node)
+
+ if isinstance(node, yaml.MappingNode):
+ self.flatten_mapping(node)
+ else:
+ raise yaml.constructor.ConstructorError(
+ None, None,
+ 'expected a mapping node, but found %s' % node.id,
+ node.start_mark)
+
+ mapping = collections.OrderedDict()
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=False)
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise yaml.constructor.ConstructorError(
+ 'while constructing a mapping', node.start_mark,
+ 'found unacceptable key (%s)' % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=False)
+ mapping[key] = value
+ data.update(mapping)
+
+
+class IndentedEmitter(yaml.emitter.Emitter):
+ def expect_block_sequence(self):
+ self.increase_indent(flow=False, indentless=False)
+ self.state = self.expect_first_block_sequence_item
+
+
+class IndentedDumper(IndentedEmitter, yaml.serializer.Serializer,
+ yaml.representer.Representer, yaml.resolver.Resolver):
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ IndentedEmitter.__init__(
+ self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break)
+ yaml.serializer.Serializer.__init__(
+ self, encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version, tags=tags)
+ yaml.representer.Representer.__init__(
+ self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ yaml.resolver.Resolver.__init__(self)
+
+
+def ordered_load(stream, *args, **kwargs):
+ return yaml.load(stream=stream, *args, **kwargs)
+
+
+def ordered_dump(data, stream=None, *args, **kwargs):
+ dumper = IndentedDumper
+ # We need to do this because of how template expasion into a project
+ # works. Without it, we end up with YAML references to the expanded jobs.
+ dumper.ignore_aliases = lambda self, data: True
+
+ output = yaml.dump(
+ data, default_flow_style=False,
+ Dumper=dumper, width=80, *args, **kwargs).replace(
+ '\n -', '\n\n -')
+ if stream:
+ stream.write(output)
+ else:
+ return output
+
+
+def get_single_key(var):
+ if isinstance(var, str):
+ return var
+ elif isinstance(var, list):
+ return var[0]
+ return list(var.keys())[0]
+
+
+def has_single_key(var):
+ if isinstance(var, list):
+ return len(var) == 1
+ if isinstance(var, str):
+ return True
+ dict_keys = list(var.keys())
+ if len(dict_keys) != 1:
+ return False
+ if var[get_single_key(var)]:
+ return False
+ return True
+
+
+def combination_matches(combination, match_combinations):
+ """
+ Checks if the given combination is matches for any of the given combination
+ globs, being those a set of combinations where if a key is missing, it's
+ considered matching
+
+ (key1=2, key2=3)
+
+ would match the combination match:
+ (key2=3)
+
+ but not:
+ (key1=2, key2=2)
+ """
+ for cmatch in match_combinations:
+ for key, val in combination.items():
+ if cmatch.get(key, val) != val:
+ break
+ else:
+ return True
+ return False
+
+
+def expandYamlForTemplateJob(self, project, template, jobs_glob=None):
+ dimensions = []
+ template_name = template['name']
+ orig_template = copy.deepcopy(template)
+
+ # reject keys that are not useful during yaml expansion
+ for k in ['jobs']:
+ project.pop(k)
+ excludes = project.pop('exclude', [])
+ for (k, v) in project.items():
+ tmpk = '{{{0}}}'.format(k)
+ if tmpk not in template_name:
+ continue
+ if type(v) == list:
+ dimensions.append(zip([k] * len(v), v))
+ # XXX somewhat hackish to ensure we actually have a single
+ # pass through the loop
+ if len(dimensions) == 0:
+ dimensions = [(("", ""),)]
+
+ for values in itertools.product(*dimensions):
+ params = copy.deepcopy(project)
+ params = self.applyDefaults(params, template)
+
+ expanded_values = {}
+ for (k, v) in values:
+ if isinstance(v, dict):
+ inner_key = next(iter(v))
+ expanded_values[k] = inner_key
+ expanded_values.update(v[inner_key])
+ else:
+ expanded_values[k] = v
+
+ params.update(expanded_values)
+ params = deep_format(params, params)
+ if combination_matches(params, excludes):
+ log = logging.getLogger("zuul.Migrate.YamlParser")
+ log.debug('Excluding combination %s', str(params))
+ continue
+
+ allow_empty_variables = self.config \
+ and self.config.has_section('job_builder') \
+ and self.config.has_option(
+ 'job_builder', 'allow_empty_variables') \
+ and self.config.getboolean(
+ 'job_builder', 'allow_empty_variables')
+
+ for key in template.keys():
+ if key not in params:
+ params[key] = template[key]
+
+ params['template-name'] = template_name
+ project_name = params['name']
+ params['name'] = '$ZUUL_SHORT_PROJECT_NAME'
+ expanded = deep_format(template, params, allow_empty_variables)
+
+ job_name = expanded.get('name')
+ templated_job_name = job_name
+ if job_name:
+ job_name = job_name.replace(
+ '$ZUUL_SHORT_PROJECT_NAME', project_name)
+ expanded['name'] = job_name
+ if jobs_glob and not matches(job_name, jobs_glob):
+ continue
+
+ self.formatDescription(expanded)
+ expanded['orig_template'] = orig_template
+ expanded['template_name'] = template_name
+ self.jobs.append(expanded)
+ JOBS_BY_ORIG_TEMPLATE[templated_job_name] = expanded
+
+jenkins_jobs.parser.YamlParser.expandYamlForTemplateJob = \
+ expandYamlForTemplateJob
+
+
+class JJB(jenkins_jobs.builder.Builder):
+ def __init__(self):
+ self.global_config = None
+ self._plugins_list = []
+
+ def expandComponent(self, component_type, component, template_data):
+ component_list_type = component_type + 's'
+ new_components = []
+ if isinstance(component, dict):
+ name, component_data = next(iter(component.items()))
+ if template_data:
+ component_data = jenkins_jobs.formatter.deep_format(
+ component_data, template_data, True)
+ else:
+ name = component
+ component_data = {}
+
+ new_component = self.parser.data.get(component_type, {}).get(name)
+ if new_component:
+ for new_sub_component in new_component[component_list_type]:
+ new_components.extend(
+ self.expandComponent(component_type,
+ new_sub_component, component_data))
+ else:
+ new_components.append({name: component_data})
+ return new_components
+
+ def expandMacros(self, job):
+ for component_type in ['builder', 'publisher', 'wrapper']:
+ component_list_type = component_type + 's'
+ new_components = []
+ for new_component in job.get(component_list_type, []):
+ new_components.extend(self.expandComponent(component_type,
+ new_component, {}))
+ job[component_list_type] = new_components
+
+
+class OldProject:
+ def __init__(self, name, gate_jobs):
+ self.name = name
+ self.gate_jobs = gate_jobs
+
+
+class OldJob:
+ def __init__(self, name):
+ self.name = name
+ self.queue_name = None
+
+ def __repr__(self):
+ return self.name
+
+
+class Job:
+
+ log = logging.getLogger("zuul.Migrate")
+
+ def __init__(self,
+ orig: str,
+ name: str=None,
+ content: Dict[str, Any]=None,
+ vars: Dict[str, str]=None,
+ nodes: List[str]=None,
+ parent=None) -> None:
+ self.orig = orig
+ self.voting = True
+ self.name = name
+ self.content = content.copy() if content else None
+ self.vars = vars or {}
+ self.required_projects = [] # type: ignore
+ self.nodes = nodes or []
+ self.parent = parent
+ self.branch = None
+ self.files = None
+ self.jjb_job = None
+ self.emit = True
+
+ if self.content and not self.name:
+ self.name = get_single_key(content)
+ if not self.name:
+ self.name = self.orig
+ self.name = self.name.replace('-{name}', '').replace('{name}-', '')
+
+ for suffix in SUFFIXES:
+ suffix = '-{suffix}'.format(suffix=suffix)
+
+ if self.name.endswith(suffix):
+ self.name = self.name.replace(suffix, '')
+
+ def _stripNodeName(self, node):
+ node_key = '-{node}'.format(node=node)
+ self.name = self.name.replace(node_key, '')
+
+ def setNoEmit(self):
+ self.emit = False
+
+ def setVars(self, vars):
+ self.vars = vars
+
+ def setParent(self, parent):
+ self.parent = parent
+
+ def extractNode(self, default_node, labels):
+ matching_label = None
+ for label in labels:
+ if label in self.orig:
+ if not matching_label:
+ matching_label = label
+ elif len(label) > len(matching_label):
+ matching_label = label
+
+ if matching_label:
+ if matching_label == default_node:
+ self._stripNodeName(matching_label)
+ else:
+ self.nodes.append(matching_label)
+
+ def getDepends(self):
+ return [self.parent.name]
+
+ def getNodes(self):
+ return self.nodes
+
+ def addJJBJob(self, jobs):
+ if '{name}' in self.orig:
+ self.jjb_job = JOBS_BY_ORIG_TEMPLATE[self.orig.format(
+ name='$ZUUL_SHORT_PROJECT_NAME')]
+ else:
+ self.jjb_job = jobs[self.orig]
+
+ def getTimeout(self):
+ if self.jjb_job:
+ for wrapper in self.jjb_job.get('wrappers', []):
+ if isinstance(wrapper, dict):
+ build_timeout = wrapper.get('timeout')
+ if isinstance(build_timeout, dict):
+ timeout = build_timeout.get('timeout')
+ if timeout is not None:
+ timeout = int(timeout) * 60
+
+ @property
+ def short_name(self):
+ return self.name.replace('legacy-', '')
+
+ @property
+ def job_path(self):
+ return 'playbooks/legacy/{name}'.format(name=self.short_name)
+
+ def _getRsyncOptions(self, source):
+ # If the source starts with ** then we want to match any
+ # number of directories, so don't anchor the include filter.
+ # If it does not start with **, then the intent is likely to
+ # at least start by matching an immediate file or subdirectory
+ # (even if later we have a ** in the middle), so in this case,
+ # anchor it to the root of the transfer (the workspace).
+ if not source.startswith('**'):
+ source = os.path.join('/', source)
+ # These options mean: include the thing we want, include any
+ # directories (so that we continue to search for the thing we
+ # want no matter how deep it is), exclude anything that
+ # doesn't match the thing we want or is a directory, then get
+ # rid of empty directories left over at the end.
+ rsync_opts = ['--include="%s"' % source,
+ '--include="*/"',
+ '--exclude="*"',
+ '--prune-empty-dirs']
+ return rsync_opts
+
+ def _makeSCPTask(self, publisher):
+ # NOTE(mordred) About docs-draft manipulation:
+ # The target of html/ was chosen to put the node contents into the
+ # html dir inside of logs such that if the node's contents have an
+ # index.html in them setting the success-url to html/ will render
+ # things as expected. Existing builder macros look like:
+ #
+ # - publisher:
+ # name: upload-sphinx-draft
+ # publishers:
+ # - scp:
+ # site: 'static.openstack.org'
+ # files:
+ # - target: 'docs-draft/$LOG_PATH'
+ # source: 'doc/build/html/**'
+ # keep-hierarchy: true
+ # copy-after-failure: true
+ #
+ # Which is pulling the tree of the remote html directory starting with
+ # doc/build/html and putting that whole thing into
+ # docs-draft/$LOG_PATH.
+ #
+ # Then there is a success-pattern in layout.yaml that looks like:
+ #
+ # http://{url}/{log_path}/doc/build/html/
+ #
+ # Which gets reports. There are many variations on that URL. So rather
+ # than needing to figure out varying success-urls to report in v3,
+ # we'll remote the ** and not process this through the rsync_opts
+ # processing we use for the other publishers, but instead will just
+ # pass doc/build/html/ to get the contents of doc/build/html/ and we'll
+ # put those in {{ log_root }}/html/ locally meaning the success-url
+ # can always be html/. This should work for all values of source
+ # from v2.
+ tasks = []
+ artifacts = False
+ draft = False
+ site = publisher['scp']['site']
+ for scpfile in publisher['scp']['files']:
+ if 'ZUUL_PROJECT' in scpfile.get('source', ''):
+ self.log.error(
+ "Job {name} uses ZUUL_PROJECT in source".format(
+ name=self.name))
+ continue
+
+ if scpfile.get('copy-console'):
+ continue
+ else:
+ src = "{{ ansible_user_dir }}"
+ rsync_opts = self._getRsyncOptions(scpfile['source'])
+
+ target = scpfile['target']
+ # TODO(mordred) Generalize this next section, it's SUPER
+ # openstack specific. We can likely do this in mapping.yaml
+ if site == 'static.openstack.org':
+ for f in ('service-types', 'specs'):
+ if target.startswith(f):
+ self.log.error(
+ "Job {name} uses {f} publishing".format(
+ name=self.name, f=f))
+ continue
+ if target.startswith('docs-draft'):
+ target = "{{ zuul.executor.log_root }}/html/"
+ src = scpfile['source'].replace('**', '')
+ rsync_opts = None
+ draft = True
+ elif site == 'tarballs.openstack.org':
+ if not target.startswith('tarballs'):
+ self.log.error(
+ 'Job {name} wants to publish artifacts to non'
+ ' tarballs dir'.format(name=self.name))
+ continue
+ if target.startswith('tarballs/ci'):
+ target = target.split('/', 3)[-1]
+ else:
+ target = target.split('/', 2)[-1]
+ target = "{{ zuul.executor.work_root }}/artifacts/" + target
+ artifacts = True
+ elif site == 'yaml2ical':
+ self.log.error('Job {name} uses yaml2ical publisher')
+ continue
+
+ syncargs = collections.OrderedDict()
+ syncargs['src'] = src
+ syncargs['dest'] = target
+ syncargs['copy_links'] = 'yes'
+ syncargs['mode'] = 'pull'
+ syncargs['verify_host'] = True
+ if rsync_opts:
+ syncargs['rsync_opts'] = rsync_opts
+ task = collections.OrderedDict()
+ task['name'] = 'copy files from {src} on node to'.format(src=src)
+ task['synchronize'] = syncargs
+ # We don't use retry_args here because there is a bug in
+ # the synchronize module that breaks subsequent attempts at
+ # retrying. Better to try once and get an accurate error
+ # message if it fails.
+ # https://github.com/ansible/ansible/issues/18281
+ tasks.append(task)
+
+ if artifacts:
+ ensure_task = collections.OrderedDict()
+ ensure_task['name'] = 'Ensure artifacts directory exists'
+ ensure_task['file'] = collections.OrderedDict(
+ path="{{ zuul.executor.work_root }}/artifacts",
+ state='directory')
+ ensure_task['delegate_to'] = 'localhost'
+ tasks.insert(0, ensure_task)
+ return dict(tasks=tasks, artifacts=artifacts, draft=draft)
+
+ def _emitShellTask(self, data, syntax_check):
+ shell, data = deal_with_shebang(data)
+ task = collections.OrderedDict()
+ task['shell'] = data
+ if shell:
+ task['args'] = dict(executable=shell)
+
+ if syntax_check:
+ # Emit a test playbook with this shell task in it then run
+ # ansible-playbook --syntax-check on it. This will fail if there
+ # are embedding issues, such as with unbalanced single quotes
+ # The end result should be less scripts and more shell
+ play = dict(hosts='all', tasks=[task])
+ (fd, tmp_path) = tempfile.mkstemp()
+ try:
+ f = os.fdopen(fd, 'w')
+ ordered_dump([play], f)
+ f.close()
+ proc = subprocess.run(
+ ['ansible-playbook', '--syntax-check', tmp_path],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ if proc.returncode != 0:
+ # Return of None means we must emit a script
+ self.log.error(
+ "Job {name} had an ansible syntax error, using script"
+ " instead of shell".format(name=self.name))
+ return None
+ finally:
+ os.unlink(tmp_path)
+ return task
+
+ def _emitScriptContent(self, data, playbook_dir, seq):
+ script_fn = '%s-%02d.sh' % (self.short_name, seq)
+ script_path = os.path.join(playbook_dir, script_fn)
+
+ with open(script_path, 'w') as script:
+ if not data.startswith('#!'):
+ data = '#!/bin/bash -x\n %s' % (data,)
+ script.write(data)
+
+ task = collections.OrderedDict()
+ task['name'] = 'Running playbooks/legacy/{playbook}'.format(
+ playbook=script_fn)
+ task['script'] = script_fn
+ return task
+
+ def _makeBuilderTask(self, playbook_dir, builder, sequence, syntax_check):
+ # Don't write a script to echo the template line
+ # TODO(mordred) Put these into mapping.yaml
+ if builder['shell'].startswith('echo JJB template: '):
+ return
+ if 'echo "Detailed logs:' in builder['shell']:
+ return
+
+ task = self._emitShellTask(builder['shell'], syntax_check)
+ if not task:
+ task = self._emitScriptContent(
+ builder['shell'], playbook_dir, sequence)
+ task['environment'] = ENVIRONMENT
+ return task
+
+ def _transformPublishers(self, jjb_job):
+ early_publishers = []
+ late_publishers = []
+ old_publishers = jjb_job.get('publishers', [])
+ for publisher in old_publishers:
+ early_scpfiles = []
+ late_scpfiles = []
+ if 'scp' not in publisher:
+ early_publishers.append(publisher)
+ continue
+ copy_console = False
+ for scpfile in publisher['scp']['files']:
+ if scpfile.get('copy-console'):
+ scpfile['keep-hierarchy'] = True
+ late_scpfiles.append(scpfile)
+ copy_console = True
+ else:
+ early_scpfiles.append(scpfile)
+ publisher['scp']['files'] = early_scpfiles + late_scpfiles
+ if copy_console:
+ late_publishers.append(publisher)
+ else:
+ early_publishers.append(publisher)
+ publishers = early_publishers + late_publishers
+ if old_publishers != publishers:
+ self.log.debug("Transformed job publishers")
+ return early_publishers, late_publishers
+
+ def emitPlaybooks(self, jobsdir, syntax_check=False):
+ has_artifacts = False
+ has_draft = False
+ if not self.jjb_job:
+ if self.emit:
+ self.log.error(
+ 'Job {name} has no job content'.format(name=self.name))
+ return False, False, False
+
+ playbook_dir = os.path.join(jobsdir, self.job_path)
+ if not os.path.exists(playbook_dir):
+ os.makedirs(playbook_dir)
+
+ run_playbook = os.path.join(self.job_path, 'run.yaml')
+ post_playbook = os.path.join(self.job_path, 'post.yaml')
+ tasks = []
+ sequence = 0
+ for builder in self.jjb_job.get('builders', []):
+ if 'shell' in builder:
+ self.required_projects.extend(
+ extract_projects(builder['shell']))
+ task = self._makeBuilderTask(
+ playbook_dir, builder, sequence, syntax_check)
+ if task:
+ if 'script' in task:
+ sequence += 1
+ tasks.append(task)
+ play = collections.OrderedDict()
+ play['hosts'] = 'all'
+ play['name'] = 'Autoconverted job {name} from old job {old}'.format(
+ name=self.name, old=self.orig)
+ play['tasks'] = tasks
+
+ with open(run_playbook, 'w') as run_playbook_out:
+ ordered_dump([play], run_playbook_out)
+
+ has_post = False
+ tasks = []
+ early_publishers, late_publishers = self._transformPublishers(
+ self.jjb_job)
+ for publishers in [early_publishers, late_publishers]:
+ for publisher in publishers:
+ if 'scp' in publisher:
+ ret = self._makeSCPTask(publisher)
+ if ret['artifacts']:
+ has_artifacts = True
+ if ret['draft']:
+ has_draft = True
+ tasks.extend(ret['tasks'])
+ if 'afs' in builder:
+ self.log.error(
+ "Job {name} uses AFS publisher".format(name=self.name))
+ if tasks:
+ has_post = True
+ play = collections.OrderedDict()
+ play['hosts'] = 'all'
+ play['tasks'] = tasks
+ with open(post_playbook, 'w') as post_playbook_out:
+ ordered_dump([play], post_playbook_out)
+ return has_artifacts, has_post, has_draft
+
+ def toJobDict(
+ self, has_artifacts=False, has_post=False, has_draft=False,
+ project_names=[]):
+ output = collections.OrderedDict()
+ output['name'] = self.name
+
+ expanded_projects = []
+ if self.required_projects:
+ expanded_projects = expand_project_names(
+ self.required_projects, project_names)
+ # Look for project names in the job name. Lookie there - the
+ # python in operator works on lists and strings.
+ expanded_projects.extend(expand_project_names(
+ self.name, project_names))
+
+ if 'dsvm' in self.name:
+ output['parent'] = 'legacy-dsvm-base'
+ elif 'puppet-openstack-integration' in self.name:
+ output['parent'] = 'legacy-puppet-openstack-integration'
+ elif 'openstack/puppet-openstack-integration' in expanded_projects:
+ output['parent'] = 'legacy-puppet-openstack-integration'
+ elif has_artifacts:
+ output['parent'] = 'publish-openstack-artifacts'
+ elif has_draft:
+ output['success-url'] = 'html/'
+ output['run'] = os.path.join(self.job_path, 'run.yaml')
+ if has_post:
+ output['post-run'] = os.path.join(self.job_path, 'post.yaml')
+
+ if self.vars:
+ output['vars'] = self.vars.copy()
+ timeout = self.getTimeout()
+ if timeout:
+ output['timeout'] = timeout
+ output['vars']['BUILD_TIMEOUT'] = str(timeout * 1000)
+
+ if self.nodes:
+ output['nodes'] = self.getNodes()
+
+ if expanded_projects:
+ output['required-projects'] = list(set(expanded_projects))
+
+ return output
+
+ def toPipelineDict(self):
+ if self.content:
+ output = self.content
+ else:
+ output = collections.OrderedDict()
+ output[self.name] = collections.OrderedDict()
+
+ if self.parent:
+ output[self.name].setdefault('dependencies', self.getDepends())
+
+ if not self.voting:
+ output[self.name].setdefault('voting', False)
+
+ if self.vars:
+ job_vars = output[self.name].get('vars', collections.OrderedDict())
+ job_vars.update(self.vars)
+
+ if self.branch:
+ output[self.name]['branch'] = self.branch
+
+ if self.files:
+ output[self.name]['files'] = self.files
+
+ if not output[self.name]:
+ return self.name
+
+ return output
+
+
+class JobMapping:
+ log = logging.getLogger("zuul.Migrate.JobMapping")
+
+ def __init__(self, nodepool_config, layout, mapping_file=None):
+ self.layout = layout
+ self.job_direct = {}
+ self.labels = []
+ self.job_mapping = []
+ self.template_mapping = {}
+ self.jjb_jobs = {}
+ self.seen_new_jobs = []
+ self.unshare = []
+ nodepool_data = ordered_load(open(nodepool_config, 'r'))
+ for label in nodepool_data['labels']:
+ self.labels.append(label['name'])
+ if not mapping_file:
+ self.default_node = 'ubuntu-xenial'
+ else:
+ mapping_data = ordered_load(open(mapping_file, 'r'))
+ self.default_node = mapping_data['default-node']
+ global SUFFIXES
+ SUFFIXES = mapping_data.get('strip-suffixes', [])
+ self.unshare = mapping_data.get('unshare', [])
+ for map_info in mapping_data.get('job-mapping', []):
+ if map_info['old'].startswith('^'):
+ map_info['pattern'] = re.compile(map_info['old'])
+ self.job_mapping.append(map_info)
+ else:
+ self.job_direct[map_info['old']] = map_info['new']
+
+ for map_info in mapping_data.get('template-mapping', []):
+ self.template_mapping[map_info['old']] = map_info['new']
+
+ def makeNewName(self, new_name, match_dict):
+ return new_name.format(**match_dict)
+
+ def hasProjectTemplate(self, old_name):
+ return old_name in self.template_mapping
+
+ def setJJBJobs(self, jjb_jobs):
+ self.jjb_jobs = jjb_jobs
+
+ def getNewTemplateName(self, old_name):
+ return self.template_mapping.get(old_name, old_name)
+
+ def mapNewJob(self, name, info) -> Optional[Job]:
+ matches = info['pattern'].search(name)
+ if not matches:
+ return None
+ match_dict = matches.groupdict()
+ if isinstance(info['new'], dict):
+ new_job = info['new']
+ old_name = get_single_key(new_job)
+ new_name = old_name.format(**match_dict)
+ job = Job(orig=name, content={new_name: new_job[old_name]})
+ else:
+ job = Job(orig=name, name=info['new'].format(**match_dict))
+
+ if 'vars' in info:
+ job.setVars(self._expandVars(info, match_dict))
+
+ return job
+
+ def _expandVars(self, info, match_dict):
+ job_vars = info['vars'].copy()
+ for key in job_vars.keys():
+ job_vars[key] = job_vars[key].format(**match_dict)
+ return job_vars
+
+ def getNewJob(self, job_name, remove_gate):
+ if job_name in self.job_direct:
+ if isinstance(self.job_direct[job_name], dict):
+ job = Job(job_name, content=self.job_direct[job_name])
+ else:
+ job = Job(job_name, name=self.job_direct[job_name])
+ if job_name not in self.seen_new_jobs:
+ self.seen_new_jobs.append(self.job_direct[job_name])
+ job.setNoEmit()
+ return job
+
+ new_job = None
+ for map_info in self.job_mapping:
+ new_job = self.mapNewJob(job_name, map_info)
+ if new_job:
+ if job_name not in self.seen_new_jobs:
+ self.seen_new_jobs.append(new_job.name)
+ new_job.setNoEmit()
+ break
+ if not new_job:
+ orig_name = job_name
+ if remove_gate:
+ job_name = job_name.replace('gate-', '', 1)
+ job_name = 'legacy-{job_name}'.format(job_name=job_name)
+ new_job = Job(orig=orig_name, name=job_name)
+
+ new_job.extractNode(self.default_node, self.labels)
+
+ # Handle matchers
+ for layout_job in self.layout.get('jobs', []):
+ if re.search(layout_job['name'], new_job.orig):
+ # Matchers that can apply to templates must be processed first
+ # since project-specific matchers can cause the template to
+ # be expanded into a project.
+ if not layout_job.get('voting', True):
+ new_job.voting = False
+ if layout_job.get('branch'):
+ new_job.branch = layout_job['branch']
+ if layout_job.get('files'):
+ new_job.files = layout_job['files']
+
+ new_job.addJJBJob(self.jjb_jobs)
+ return new_job
+
+
+class ChangeQueue:
+ def __init__(self):
+ self.name = ''
+ self.assigned_name = None
+ self.generated_name = None
+ self.projects = []
+ self._jobs = set()
+
+ def getJobs(self):
+ return self._jobs
+
+ def getProjects(self):
+ return [p.name for p in self.projects]
+
+ def addProject(self, project):
+ if project not in self.projects:
+ self.projects.append(project)
+ self._jobs |= project.gate_jobs
+
+ names = [x.name for x in self.projects]
+ names.sort()
+ self.generated_name = names[0].split('/')[-1]
+
+ for job in self._jobs:
+ if job.queue_name:
+ if (self.assigned_name and
+ job.queue_name != self.assigned_name):
+ raise Exception("More than one name assigned to "
+ "change queue: %s != %s" %
+ (self.assigned_name,
+ job.queue_name))
+ self.assigned_name = job.queue_name
+ self.name = self.assigned_name or self.generated_name
+
+ def mergeChangeQueue(self, other):
+ for project in other.projects:
+ self.addProject(project)
+
+
+class ZuulMigrate:
+
+ log = logging.getLogger("zuul.Migrate")
+
+ def __init__(self, layout, job_config, nodepool_config,
+ outdir, mapping, move, syntax_check):
+ self.layout = ordered_load(open(layout, 'r'))
+ self.job_config = job_config
+ self.outdir = outdir
+ self.mapping = JobMapping(nodepool_config, self.layout, mapping)
+ self.move = move
+ self.syntax_check = syntax_check
+
+ self.jobs = {}
+ self.old_jobs = {}
+ self.job_objects = []
+ self.new_templates = {}
+
+ def run(self):
+ self.loadJobs()
+ self.buildChangeQueues()
+ self.convertJobs()
+ self.writeJobs()
+
+ def loadJobs(self):
+ self.log.debug("Loading jobs")
+ builder = JJB()
+ builder.load_files([self.job_config])
+ builder.parser.expandYaml()
+ unseen = set(self.jobs.keys())
+ for job in builder.parser.jobs:
+ builder.expandMacros(job)
+ self.jobs[job['name']] = job
+ unseen.discard(job['name'])
+ for name in unseen:
+ del self.jobs[name]
+ self.mapping.setJJBJobs(self.jobs)
+
+ def getOldJob(self, name):
+ if name not in self.old_jobs:
+ self.old_jobs[name] = OldJob(name)
+ return self.old_jobs[name]
+
+ def flattenOldJobs(self, tree, name=None):
+ if isinstance(tree, str):
+ n = tree.format(name=name)
+ if n in self.mapping.unshare:
+ return []
+ return [self.getOldJob(n)]
+
+ new_list = [] # type: ignore
+ if isinstance(tree, list):
+ for job in tree:
+ new_list.extend(self.flattenOldJobs(job, name))
+ elif isinstance(tree, dict):
+ parent_name = get_single_key(tree)
+ jobs = self.flattenOldJobs(tree[parent_name], name)
+ for job in jobs:
+ if job not in self.mapping.unshare:
+ new_list.append(self.getOldJob(job))
+ if parent_name not in self.mapping.unshare:
+ new_list.append(self.getOldJob(parent_name))
+ return new_list
+
+ def buildChangeQueues(self):
+ self.log.debug("Building shared change queues")
+
+ for j in self.layout['jobs']:
+ if '^' in j['name'] or '$' in j['name']:
+ continue
+ job = self.getOldJob(j['name'])
+ job.queue_name = j.get('queue-name')
+
+ change_queues = []
+
+ for project in self.layout.get('projects'):
+ if 'gate' not in project:
+ continue
+ gate_jobs = set()
+ for template in project['template']:
+ for pt in self.layout.get('project-templates'):
+ if pt['name'] != template['name']:
+ continue
+ if 'gate' not in pt['name']:
+ continue
+ gate_jobs |= set(self.flattenOldJobs(pt['gate'],
+ project['name']))
+ gate_jobs |= set(self.flattenOldJobs(project['gate']))
+ old_project = OldProject(project['name'], gate_jobs)
+ change_queue = ChangeQueue()
+ change_queue.addProject(old_project)
+ change_queues.append(change_queue)
+ self.log.debug("Created queue: %s" % change_queue)
+
+ # Iterate over all queues trying to combine them, and keep doing
+ # so until they can not be combined further.
+ last_change_queues = change_queues
+ while True:
+ new_change_queues = self.combineChangeQueues(last_change_queues)
+ if len(last_change_queues) == len(new_change_queues):
+ break
+ last_change_queues = new_change_queues
+
+ self.log.debug(" Shared change queues:")
+ for queue in new_change_queues:
+ self.log.debug(" %s containing %s" % (
+ queue, queue.generated_name))
+ self.change_queues = new_change_queues
+
+ def combineChangeQueues(self, change_queues):
+ self.log.debug("Combining shared queues")
+ new_change_queues = []
+ for a in change_queues:
+ merged_a = False
+ for b in new_change_queues:
+ if not a.getJobs().isdisjoint(b.getJobs()):
+ self.log.debug("Merging queue %s into %s" % (a, b))
+ b.mergeChangeQueue(a)
+ merged_a = True
+ break # this breaks out of 'for b' and continues 'for a'
+ if not merged_a:
+ self.log.debug("Keeping queue %s" % (a))
+ new_change_queues.append(a)
+ return new_change_queues
+
+ def convertJobs(self):
+ pass
+
+ def setupDir(self):
+ zuul_yaml = os.path.join(self.outdir, 'zuul.yaml')
+ zuul_d = os.path.join(self.outdir, 'zuul.d')
+ orig = os.path.join(zuul_d, '01zuul.yaml')
+ job_outfile = os.path.join(zuul_d, '99converted-jobs.yaml')
+ project_outfile = os.path.join(zuul_d, '99converted-projects.yaml')
+ if not os.path.exists(self.outdir):
+ os.makedirs(self.outdir)
+ if not os.path.exists(zuul_d):
+ os.makedirs(zuul_d)
+ if os.path.exists(zuul_yaml) and self.move:
+ os.rename(zuul_yaml, orig)
+ return job_outfile, project_outfile
+
+ def makeNewJobs(self, old_job, parent: Job=None):
+ self.log.debug("makeNewJobs(%s)", old_job)
+ if isinstance(old_job, str):
+ remove_gate = True
+ if old_job.startswith('gate-'):
+ # Check to see if gate- and bare versions exist
+ if old_job.replace('gate-', '', 1) in self.jobs:
+ remove_gate = False
+ job = self.mapping.getNewJob(old_job, remove_gate)
+ if parent:
+ job.setParent(parent)
+ return [job]
+
+ new_list = [] # type: ignore
+ if isinstance(old_job, list):
+ for job in old_job:
+ new_list.extend(self.makeNewJobs(job, parent=parent))
+
+ elif isinstance(old_job, dict):
+ parent_name = get_single_key(old_job)
+ parent = self.makeNewJobs(parent_name, parent=parent)[0]
+
+ jobs = self.makeNewJobs(old_job[parent_name], parent=parent)
+ for job in jobs:
+ new_list.append(job)
+ new_list.append(parent)
+ return new_list
+
+ def writeProjectTemplate(self, template):
+ new_template = collections.OrderedDict()
+ if 'name' in template:
+ new_template['name'] = template['name']
+ for key, value in template.items():
+ if key == 'name':
+ continue
+
+ # keep a cache of the Job objects so we can use it to get old
+ # job name to new job name when expanding templates into projects.
+ tmp = [job for job in self.makeNewJobs(value)]
+ self.job_objects.extend(tmp)
+ jobs = [job.toPipelineDict() for job in tmp]
+ new_template[key] = dict(jobs=jobs)
+
+ return new_template
+
+ def scanForProjectMatchers(self, project_name):
+ ''' Get list of job matchers that reference the given project name '''
+ job_matchers = []
+ for matcher in self.layout.get('jobs', []):
+ for skipper in matcher.get('skip-if', []):
+ if skipper.get('project'):
+ if re.search(skipper['project'], project_name):
+ job_matchers.append(matcher)
+ return job_matchers
+
+ def findReferencedTemplateNames(self, job_matchers, project_name):
+ ''' Search templates in the layout file for matching jobs '''
+ template_names = []
+
+ def search_jobs(template):
+ def _search(job):
+ if isinstance(job, str):
+ for matcher in job_matchers:
+ if re.search(matcher['name'],
+ job.format(name=project_name)):
+ template_names.append(template['name'])
+ return True
+ elif isinstance(job, list):
+ for i in job:
+ if _search(i):
+ return True
+ elif isinstance(job, dict):
+ for k, v in job.items():
+ if _search(k) or _search(v):
+ return True
+ return False
+
+ for key, value in template.items():
+ if key == 'name':
+ continue
+ for job in template[key]:
+ if _search(job):
+ return
+
+ for template in self.layout.get('project-templates', []):
+ search_jobs(template)
+ return template_names
+
+ def expandTemplateIntoProject(self, template_name, project):
+ self.log.debug("EXPAND template %s into project %s",
+ template_name, project['name'])
+ # find the new template since that's the thing we're expanding
+ if template_name not in self.new_templates:
+ self.log.error(
+ "Template %s not found for expansion into project %s",
+ template_name, project['name'])
+ return
+
+ template = self.new_templates[template_name]
+
+ for pipeline, value in template.items():
+ if pipeline == 'name':
+ continue
+ if pipeline not in project:
+ project[pipeline] = dict(jobs=[])
+ project[pipeline]['jobs'].extend(value['jobs'])
+
+ def getOldJobName(self, new_job_name):
+ for job in self.job_objects:
+ if job.name == new_job_name:
+ return job.orig
+ return None
+
+ def applyProjectMatchers(self, matchers, project):
+ '''
+ Apply per-project job matchers to the given project.
+
+ :param matchers: Job matchers that referenced the given project.
+ :param project: The new project object.
+ '''
+
+ def processPipeline(pipeline_jobs, job_name_regex, files):
+ for job in pipeline_jobs:
+ if isinstance(job, str):
+ old_job_name = self.getOldJobName(job)
+ if not old_job_name:
+ continue
+ if re.search(job_name_regex, old_job_name):
+ self.log.debug(
+ "Applied irrelevant-files to job %s in project %s",
+ job, project['name'])
+ job = dict(job={'irrelevant-files': files})
+ elif isinstance(job, dict):
+ # should really only be one key (job name)
+ job_name = list(job.keys())[0]
+ extras = job[job_name]
+ old_job_name = self.getOldJobName(job_name)
+ if not old_job_name:
+ continue
+ if re.search(job_name_regex, old_job_name):
+ self.log.debug(
+ "Applied irrelevant-files to complex job "
+ "%s in project %s", job_name, project['name'])
+ if 'irrelevant-files' not in extras:
+ extras['irrelevant-files'] = []
+ extras['irrelevant-files'].extend(files)
+
+ def applyIrrelevantFiles(job_name_regex, files):
+ for k, v in project.items():
+ if k in ('template', 'name'):
+ continue
+ processPipeline(project[k]['jobs'], job_name_regex, files)
+
+ for matcher in matchers:
+ # find the project-specific section
+ for skipper in matcher.get('skip-if', []):
+ if skipper.get('project'):
+ if re.search(skipper['project'], project['name']):
+ if 'all-files-match-any' in skipper:
+ applyIrrelevantFiles(
+ matcher['name'],
+ skipper['all-files-match-any'])
+
+ def writeProject(self, project):
+ '''
+ Create a new v3 project definition.
+
+ As part of creating the project, scan for project-specific job matchers
+ referencing this project and remove the templates matching the job
+ regex for that matcher. Expand the matched template(s) into the project
+ so we can apply the project-specific matcher to the job(s).
+ '''
+ new_project = collections.OrderedDict()
+ if 'name' in project:
+ new_project['name'] = project['name']
+
+ job_matchers = self.scanForProjectMatchers(project['name'])
+ if job_matchers:
+ exp_template_names = self.findReferencedTemplateNames(
+ job_matchers, project['name'])
+ else:
+ exp_template_names = []
+
+ templates_to_expand = []
+ if 'template' in project:
+ new_project['template'] = []
+ for template in project['template']:
+ if template['name'] in exp_template_names:
+ templates_to_expand.append(template['name'])
+ continue
+ new_project['template'].append(dict(
+ name=self.mapping.getNewTemplateName(template['name'])))
+
+ for key, value in project.items():
+ if key in ('name', 'template'):
+ continue
+ else:
+ new_project[key] = collections.OrderedDict()
+ if key == 'gate':
+ for queue in self.change_queues:
+ if project['name'] not in queue.getProjects():
+ continue
+ if len(queue.getProjects()) == 1:
+ continue
+ new_project[key]['queue'] = queue.name
+ tmp = [job for job in self.makeNewJobs(value)]
+ self.job_objects.extend(tmp)
+ jobs = [job.toPipelineDict() for job in tmp]
+ new_project[key]['jobs'] = jobs
+
+ for name in templates_to_expand:
+ self.expandTemplateIntoProject(name, new_project)
+
+ # Need a deep copy after expansion, else our templates end up
+ # also getting this change.
+ new_project = copy.deepcopy(new_project)
+ self.applyProjectMatchers(job_matchers, new_project)
+
+ return new_project
+
+ def writeJobs(self):
+ job_outfile, project_outfile = self.setupDir()
+ job_config = []
+ project_config = []
+
+ for template in self.layout.get('project-templates', []):
+ self.log.debug("Processing template: %s", template)
+ new_template = self.writeProjectTemplate(template)
+ self.new_templates[new_template['name']] = new_template
+ if not self.mapping.hasProjectTemplate(template['name']):
+ job_config.append({'project-template': new_template})
+
+ project_names = []
+ for project in self.layout.get('projects', []):
+ project_names.append(project['name'])
+ project_config.append(
+ {'project': self.writeProject(project)})
+
+ seen_jobs = []
+ for job in sorted(self.job_objects, key=lambda job: job.name):
+ if (job.name not in seen_jobs and
+ job.name not in self.mapping.seen_new_jobs and
+ job.emit):
+ has_artifacts, has_post, has_draft = job.emitPlaybooks(
+ self.outdir, self.syntax_check)
+ job_config.append({'job': job.toJobDict(
+ has_artifacts, has_post, has_draft, project_names)})
+ seen_jobs.append(job.name)
+
+ with open(job_outfile, 'w') as yamlout:
+ # Insert an extra space between top-level list items
+ yamlout.write(ordered_dump(job_config).replace('\n-', '\n\n-'))
+
+ with open(project_outfile, 'w') as yamlout:
+ # Insert an extra space between top-level list items
+ yamlout.write(ordered_dump(project_config).replace('\n-', '\n\n-'))
+
+
+def main():
+ yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
+ construct_yaml_map)
+
+ yaml.add_representer(collections.OrderedDict, project_representer,
+ Dumper=IndentedDumper)
+ yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
+
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+ parser.add_argument(
+ 'layout',
+ help="The Zuul v2 layout.yaml file to read.")
+ parser.add_argument(
+ 'job_config',
+ help="Directory containing Jenkins Job Builder job definitions.")
+ parser.add_argument(
+ 'nodepool_config',
+ help="Nodepool config file containing complete set of node names")
+ parser.add_argument(
+ 'outdir',
+ help="A directory into which the Zuul v3 config will be written.")
+ parser.add_argument(
+ '--mapping',
+ default=None,
+ help="A filename with a yaml mapping of old name to new name.")
+ parser.add_argument(
+ '-v', dest='verbose', action='store_true', help='verbose output')
+ parser.add_argument(
+ '--syntax-check', dest='syntax_check', action='store_true',
+ help='Run ansible-playbook --syntax-check on generated playbooks')
+ parser.add_argument(
+ '-m', dest='move', action='store_true',
+ help='Move zuul.yaml to zuul.d if it exists')
+
+ args = parser.parse_args()
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ ZuulMigrate(args.layout, args.job_config, args.nodepool_config,
+ args.outdir, args.mapping, args.move, args.syntax_check).run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 94c0d2a..b70ea59 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -76,6 +76,15 @@
super(MaxNodeError, self).__init__(message)
+class MaxTimeoutError(Exception):
+ def __init__(self, job, tenant):
+ message = textwrap.dedent("""\
+ The job "{job}" exceeds tenant max-job-timeout {maxtimeout}.""")
+ message = textwrap.fill(message.format(
+ job=job.name, maxtimeout=tenant.max_job_timeout))
+ super(MaxTimeoutError, self).__init__(message)
+
+
class DuplicateGroupError(Exception):
def __init__(self, nodeset, group):
message = textwrap.dedent("""\
@@ -97,6 +106,24 @@
super(ProjectNotFoundError, self).__init__(message)
+class SecretNotFoundError(Exception):
+ def __init__(self, secret):
+ message = textwrap.dedent("""\
+ The secret "{secret}" was not found.
+ """)
+ message = textwrap.fill(message.format(secret=secret))
+ super(SecretNotFoundError, self).__init__(message)
+
+
+class NodesetNotFoundError(Exception):
+ def __init__(self, nodeset):
+ message = textwrap.dedent("""\
+ The nodeset "{nodeset}" was not found.
+ """)
+ message = textwrap.fill(message.format(nodeset=nodeset))
+ super(NodesetNotFoundError, self).__init__(message)
+
+
class PipelineNotPermittedError(Exception):
def __init__(self):
message = textwrap.dedent("""\
@@ -120,6 +147,30 @@
@contextmanager
+def early_configuration_exceptions(context):
+ try:
+ yield
+ except ConfigurationSyntaxError:
+ raise
+ except Exception as e:
+ intro = textwrap.fill(textwrap.dedent("""\
+ Zuul encountered a syntax error while parsing its configuration in the
+ repo {repo} on branch {branch}. The error was:""".format(
+ repo=context.project.name,
+ branch=context.branch,
+ )))
+
+ m = textwrap.dedent("""\
+ {intro}
+
+ {error}""")
+
+ m = m.format(intro=intro,
+ error=indent(str(e)))
+ raise ConfigurationSyntaxError(m)
+
+
+@contextmanager
def configuration_exceptions(stanza, conf):
try:
yield
@@ -254,7 +305,7 @@
class NodeSetParser(object):
@staticmethod
- def getSchema():
+ def getSchema(anonymous=False):
node = {vs.Required('name'): str,
vs.Required('label'): str,
}
@@ -263,19 +314,20 @@
vs.Required('nodes'): to_list(str),
}
- nodeset = {vs.Required('name'): str,
- vs.Required('nodes'): to_list(node),
+ nodeset = {vs.Required('nodes'): to_list(node),
'groups': to_list(group),
'_source_context': model.SourceContext,
'_start_mark': ZuulMark,
}
+ if not anonymous:
+ nodeset[vs.Required('name')] = str
return vs.Schema(nodeset)
@staticmethod
- def fromYaml(layout, conf):
- NodeSetParser.getSchema()(conf)
- ns = model.NodeSet(conf['name'])
+ def fromYaml(conf, anonymous=False):
+ NodeSetParser.getSchema(anonymous)(conf)
+ ns = model.NodeSet(conf.get('name'))
node_names = set()
group_names = set()
for conf_node in as_list(conf['nodes']):
@@ -324,10 +376,6 @@
@staticmethod
def getSchema():
- node = {vs.Required('name'): str,
- vs.Required('label'): str,
- }
-
zuul_role = {vs.Required('zuul'): str,
'name': str}
@@ -357,7 +405,8 @@
'files': to_list(str),
'secrets': to_list(vs.Any(secret, str)),
'irrelevant-files': to_list(str),
- 'nodes': vs.Any([node], str),
+ # validation happens in NodeSetParser
+ 'nodeset': vs.Any(dict, str),
'timeout': int,
'attempts': int,
'pre-run': to_list(str),
@@ -453,13 +502,15 @@
# Secrets are part of the playbook context so we must establish
# them earlier than playbooks.
secrets = []
- for secret_config in conf.get('secrets', []):
+ for secret_config in as_list(conf.get('secrets', [])):
if isinstance(secret_config, str):
secret_name = secret_config
- secret = layout.secrets[secret_name]
+ secret = layout.secrets.get(secret_name)
else:
secret_name = secret_config['name']
- secret = layout.secrets[secret_config['secret']]
+ secret = layout.secrets.get(secret_config['secret'])
+ if secret is None:
+ raise SecretNotFoundError(secret_name)
if secret_name == 'zuul':
raise Exception("Secrets named 'zuul' are not allowed.")
if secret.source_context != job.source_context:
@@ -481,6 +532,10 @@
if secrets and not conf['_source_context'].trusted:
job.post_review = True
+ if conf.get('timeout') and tenant.max_job_timeout != -1 and \
+ int(conf['timeout']) > tenant.max_job_timeout:
+ raise MaxTimeoutError(job, tenant)
+
if 'post-review' in conf:
if conf['post-review']:
job.post_review = True
@@ -532,16 +587,15 @@
a = k.replace('-', '_')
if k in conf:
setattr(job, a, conf[k])
- if 'nodes' in conf:
- conf_nodes = conf['nodes']
- if isinstance(conf_nodes, str):
+ if 'nodeset' in conf:
+ conf_nodeset = conf['nodeset']
+ if isinstance(conf_nodeset, str):
# This references an existing named nodeset in the layout.
- ns = layout.nodesets[conf_nodes]
+ ns = layout.nodesets.get(conf_nodeset)
+ if ns is None:
+ raise NodesetNotFoundError(conf_nodeset)
else:
- ns = model.NodeSet()
- for conf_node in conf_nodes:
- node = model.Node(conf_node['name'], conf_node['label'])
- ns.addNode(node)
+ ns = NodeSetParser.fromYaml(conf_nodeset, anonymous=True)
if tenant.max_nodes_per_job != -1 and \
len(ns) > tenant.max_nodes_per_job:
raise MaxNodeError(job, tenant)
@@ -1035,6 +1089,7 @@
def getSchema(connections=None):
tenant = {vs.Required('name'): str,
'max-nodes-per-job': int,
+ 'max-job-timeout': int,
'source': TenantParser.validateTenantSources(connections),
'exclude-unprotected-branches': bool,
'default-parent': str,
@@ -1048,6 +1103,8 @@
tenant = model.Tenant(conf['name'])
if conf.get('max-nodes-per-job') is not None:
tenant.max_nodes_per_job = conf['max-nodes-per-job']
+ if conf.get('max-job-timeout') is not None:
+ tenant.max_job_timeout = int(conf['max-job-timeout'])
if conf.get('exclude-unprotected-branches') is not None:
tenant.exclude_unprotected_branches = \
conf['exclude-unprotected-branches']
@@ -1318,6 +1375,8 @@
continue
TenantParser.log.debug("Waiting for cat job %s" % (job,))
job.wait()
+ if not job.updated:
+ raise Exception("Cat job %s failed" % (job,))
TenantParser.log.debug("Cat job %s got files %s" %
(job, job.files))
loaded = False
@@ -1367,13 +1426,15 @@
def _parseConfigProjectLayout(data, source_context):
# This is the top-level configuration for a tenant.
config = model.UnparsedTenantConfig()
- config.extend(safe_load_yaml(data, source_context))
+ with early_configuration_exceptions(source_context):
+ config.extend(safe_load_yaml(data, source_context))
return config
@staticmethod
def _parseUntrustedProjectLayout(data, source_context):
config = model.UnparsedTenantConfig()
- config.extend(safe_load_yaml(data, source_context))
+ with early_configuration_exceptions(source_context):
+ config.extend(safe_load_yaml(data, source_context))
if config.pipelines:
with configuration_exceptions('pipeline', config.pipelines[0]):
raise PipelineNotPermittedError()
@@ -1404,7 +1465,7 @@
continue
with configuration_exceptions('nodeset', config_nodeset):
layout.addNodeSet(NodeSetParser.fromYaml(
- layout, config_nodeset))
+ config_nodeset))
for config_secret in data.secrets:
classes = TenantParser._getLoadClasses(tenant, config_secret)
@@ -1471,6 +1532,8 @@
@staticmethod
def _parseLayout(base, tenant, data, scheduler, connections):
+ # Don't call this method from dynamic reconfiguration because
+ # it interacts with drivers and connections.
layout = model.Layout(tenant)
TenantParser._parseLayoutItems(layout, tenant, data,
@@ -1582,7 +1645,8 @@
config.extend(incdata)
def createDynamicLayout(self, tenant, files,
- include_config_projects=False):
+ include_config_projects=False,
+ scheduler=None, connections=None):
if include_config_projects:
config = model.UnparsedTenantConfig()
for project in tenant.config_projects:
@@ -1594,22 +1658,29 @@
self._loadDynamicProjectData(config, project, files, False, tenant)
layout = model.Layout(tenant)
- # NOTE: the actual pipeline objects (complete with queues and
- # enqueued items) are copied by reference here. This allows
- # our shadow dynamic configuration to continue to interact
- # with all the other changes, each of which may have their own
- # version of reality. We do not support creating, updating,
- # or deleting pipelines in dynamic layout changes.
- layout.pipelines = tenant.layout.pipelines
+ if not include_config_projects:
+ # NOTE: the actual pipeline objects (complete with queues
+ # and enqueued items) are copied by reference here. This
+ # allows our shadow dynamic configuration to continue to
+ # interact with all the other changes, each of which may
+ # have their own version of reality. We do not support
+ # creating, updating, or deleting pipelines in dynamic
+ # layout changes.
+ layout.pipelines = tenant.layout.pipelines
- # NOTE: the semaphore definitions are copied from the static layout
- # here. For semaphores there should be no per patch max value but
- # exactly one value at any time. So we do not support dynamic semaphore
- # configuration changes.
- layout.semaphores = tenant.layout.semaphores
+ # NOTE: the semaphore definitions are copied from the
+ # static layout here. For semaphores there should be no
+ # per patch max value but exactly one value at any
+ # time. So we do not support dynamic semaphore
+ # configuration changes.
+ layout.semaphores = tenant.layout.semaphores
+ skip_pipelines = skip_semaphores = True
+ else:
+ skip_pipelines = skip_semaphores = False
- TenantParser._parseLayoutItems(layout, tenant, config, None, None,
- skip_pipelines=True,
- skip_semaphores=True)
+ TenantParser._parseLayoutItems(layout, tenant, config,
+ scheduler, connections,
+ skip_pipelines=skip_pipelines,
+ skip_semaphores=skip_semaphores)
return layout
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 35137c7..83871e3 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -133,25 +133,42 @@
event.branch_deleted = True
event.branch = event.ref
- if event.change_number:
- # TODO(jhesketh): Check if the project exists?
- # and self.connection.sched.getProject(event.project_name):
-
- # Call _getChange for the side effect of updating the
- # cache. Note that this modifies Change objects outside
- # the main thread.
- # NOTE(jhesketh): Ideally we'd just remove the change from the
- # cache to denote that it needs updating. However the change
- # object is already used by Items and hence BuildSets etc. and
- # we need to update those objects by reference so that they have
- # the correct/new information and also avoid hitting gerrit
- # multiple times.
- self.connection._getChange(event.change_number,
- event.patch_number,
- refresh=True)
+ self._getChange(event)
self.connection.logEvent(event)
self.connection.sched.addEvent(event)
+ def _getChange(self, event):
+ # Grab the change if we are managing the project or if it exists in the
+ # cache as it may be a dependency
+ if event.change_number:
+ refresh = True
+ if event.change_number not in self.connection._change_cache:
+ refresh = False
+ for tenant in self.connection.sched.abide.tenants.values():
+ # TODO(fungi): it would be better to have some simple means
+ # of inferring the hostname from the connection, or at
+ # least split this into separate method arguments, rather
+ # than assembling and passing in a baked string.
+ if (None, None) != tenant.getProject('/'.join((
+ self.connection.canonical_hostname,
+ event.project_name))):
+ refresh = True
+ break
+
+ if refresh:
+ # Call _getChange for the side effect of updating the
+ # cache. Note that this modifies Change objects outside
+ # the main thread.
+ # NOTE(jhesketh): Ideally we'd just remove the change from the
+ # cache to denote that it needs updating. However the change
+ # object is already used by Items and hence BuildSets etc. and
+ # we need to update those objects by reference so that they
+ # have the correct/new information and also avoid hitting
+ # gerrit multiple times.
+ self.connection._getChange(event.change_number,
+ event.patch_number,
+ refresh=True)
+
def run(self):
while True:
if self._stopped:
@@ -298,12 +315,17 @@
# This lets the user supply a list of change objects that are
# still in use. Anything in our cache that isn't in the supplied
# list should be safe to remove from the cache.
- remove = []
- for key, change in self._change_cache.items():
- if change not in relevant:
- remove.append(key)
- for key in remove:
- del self._change_cache[key]
+ remove = {}
+ for change_number, patchsets in self._change_cache.items():
+ for patchset, change in patchsets.items():
+ if change not in relevant:
+ remove.setdefault(change_number, [])
+ remove[change_number].append(patchset)
+ for change_number, patchsets in remove.items():
+ for patchset in patchsets:
+ del self._change_cache[change_number][patchset]
+ if not self._change_cache[change_number]:
+ del self._change_cache[change_number]
def getChange(self, event, refresh=False):
if event.change_number:
@@ -318,7 +340,7 @@
change.newrev = event.newrev
change.url = self._getGitwebUrl(project, sha=event.newrev)
elif event.ref and not event.ref.startswith('refs/'):
- # Gerrit ref-updated events don't have branch prefixes.
+ # Pre 2.13 Gerrit ref-updated events don't have branch prefixes.
project = self.source.getProject(event.project_name)
change = Branch(project)
change.branch = event.ref
@@ -327,11 +349,11 @@
change.newrev = event.newrev
change.url = self._getGitwebUrl(project, sha=event.newrev)
elif event.ref and event.ref.startswith('refs/heads/'):
- # From the timer trigger
+ # From the timer trigger or Post 2.13 Gerrit
project = self.source.getProject(event.project_name)
change = Branch(project)
change.ref = event.ref
- change.branch = event.branch
+ change.branch = event.ref[len('refs/heads/'):]
change.oldrev = event.oldrev
change.newrev = event.newrev
change.url = self._getGitwebUrl(project, sha=event.newrev)
@@ -349,21 +371,22 @@
return change
def _getChange(self, number, patchset, refresh=False, history=None):
- key = '%s,%s' % (number, patchset)
- change = self._change_cache.get(key)
+ change = self._change_cache.get(number, {}).get(patchset)
if change and not refresh:
return change
if not change:
change = GerritChange(None)
change.number = number
change.patchset = patchset
- key = '%s,%s' % (change.number, change.patchset)
- self._change_cache[key] = change
+ self._change_cache.setdefault(change.number, {})
+ self._change_cache[change.number][change.patchset] = change
try:
self._updateChange(change, history)
except Exception:
- if key in self._change_cache:
- del self._change_cache[key]
+ if self._change_cache.get(change.number, {}).get(change.patchset):
+ del self._change_cache[change.number][change.patchset]
+ if not self._change_cache[change.number]:
+ del self._change_cache[change.number]
raise
return change
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 0ce6ef5..3d0eb37 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -17,6 +17,8 @@
import logging
import hmac
import hashlib
+import queue
+import threading
import time
import re
@@ -80,11 +82,10 @@
delivery=delivery))
self._validate_signature(request)
+ # TODO(jlk): Validate project in the request is a project we know
try:
self.__dispatch_event(request)
- except webob.exc.HTTPNotFound:
- raise
except:
self.log.exception("Exception handling Github event:")
@@ -98,20 +99,58 @@
'header.')
try:
- method = getattr(self, '_event_' + event)
- except AttributeError:
- message = "Unhandled X-Github-Event: {0}".format(event)
- self.log.debug(message)
- # Returns empty 200 on unhandled events
- raise webob.exc.HTTPOk()
-
- try:
json_body = request.json_body
+ self.connection.addEvent(json_body, event)
except:
message = 'Exception deserializing JSON body'
self.log.exception(message)
raise webob.exc.HTTPBadRequest(message)
+ def _validate_signature(self, request):
+ secret = self.connection.connection_config.get('webhook_token', None)
+ if secret is None:
+ raise RuntimeError("webhook_token is required")
+
+ body = request.body
+ try:
+ request_signature = request.headers['X-Hub-Signature']
+ except KeyError:
+ raise webob.exc.HTTPUnauthorized(
+ 'Please specify a X-Hub-Signature header with secret.')
+
+ payload_signature = _sign_request(body, secret)
+
+ self.log.debug("Payload Signature: {0}".format(str(payload_signature)))
+ self.log.debug("Request Signature: {0}".format(str(request_signature)))
+ if not hmac.compare_digest(
+ str(payload_signature), str(request_signature)):
+ raise webob.exc.HTTPUnauthorized(
+ 'Request signature does not match calculated payload '
+ 'signature. Check that secret is correct.')
+
+ return True
+
+
+class GithubEventConnector(threading.Thread):
+ """Move events from GitHub into the scheduler"""
+
+ log = logging.getLogger("zuul.GithubEventConnector")
+
+ def __init__(self, connection):
+ super(GithubEventConnector, self).__init__()
+ self.daemon = True
+ self.connection = connection
+ self._stopped = False
+
+ def stop(self):
+ self._stopped = True
+ self.connection.addEvent(None)
+
+ def _handleEvent(self):
+ json_body, event_type = self.connection.getEvent()
+ if self._stopped:
+ return
+
# If there's any installation mapping information in the body then
# update the project mapping before any requests are made.
installation_id = json_body.get('installation', {}).get('id')
@@ -127,9 +166,17 @@
self.connection.installation_map[project_name] = installation_id
try:
+ method = getattr(self, '_event_' + event_type)
+ except AttributeError:
+ # TODO(jlk): Gracefully handle event types we don't care about
+ # instead of logging an exception.
+ message = "Unhandled X-Github-Event: {0}".format(event_type)
+ self.log.debug(message)
+ # Returns empty on unhandled events
+ return
+
+ try:
event = method(json_body)
- except webob.exc.HTTPNotFound:
- raise
except:
self.log.exception('Exception when handling event:')
event = None
@@ -240,14 +287,6 @@
event.action = body.get('action')
return event
- def _event_ping(self, body):
- project_name = body['repository']['full_name']
- if not self.connection.getProject(project_name):
- self.log.warning("Ping received for unknown project %s" %
- project_name)
- raise webob.exc.HTTPNotFound("Sorry, this project is not "
- "registered")
-
def _event_status(self, body):
action = body.get('action')
if action == 'pending':
@@ -277,30 +316,6 @@
(number, project_name))
return pr_body
- def _validate_signature(self, request):
- secret = self.connection.connection_config.get('webhook_token', None)
- if secret is None:
- raise RuntimeError("webhook_token is required")
-
- body = request.body
- try:
- request_signature = request.headers['X-Hub-Signature']
- except KeyError:
- raise webob.exc.HTTPUnauthorized(
- 'Please specify a X-Hub-Signature header with secret.')
-
- payload_signature = _sign_request(body, secret)
-
- self.log.debug("Payload Signature: {0}".format(str(payload_signature)))
- self.log.debug("Request Signature: {0}".format(str(request_signature)))
- if not hmac.compare_digest(
- str(payload_signature), str(request_signature)):
- raise webob.exc.HTTPUnauthorized(
- 'Request signature does not match calculated payload '
- 'signature. Check that secret is correct.')
-
- return True
-
def _pull_request_to_event(self, pr_body):
event = GithubTriggerEvent()
event.trigger_name = 'github'
@@ -327,6 +342,17 @@
if login:
return self.connection.getUser(login)
+ def run(self):
+ while True:
+ if self._stopped:
+ return
+ try:
+ self._handleEvent()
+ except:
+ self.log.exception("Exception moving GitHub event:")
+ finally:
+ self.connection.eventDone()
+
class GithubUser(collections.Mapping):
log = logging.getLogger('zuul.GithubUser')
@@ -376,6 +402,7 @@
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.server)
self.source = driver.getSource(self)
+ self.event_queue = queue.Queue()
# ssl verification must default to true
verify_ssl = self.connection_config.get('verify_ssl', 'true')
@@ -408,9 +435,20 @@
self.registerHttpHandler(self.payload_path,
webhook_listener.handle_request)
self._authenticateGithubAPI()
+ self._start_event_connector()
def onStop(self):
self.unregisterHttpHandler(self.payload_path)
+ self._stop_event_connector()
+
+ def _start_event_connector(self):
+ self.github_event_connector = GithubEventConnector(self)
+ self.github_event_connector.start()
+
+ def _stop_event_connector(self):
+ if self.github_event_connector:
+ self.github_event_connector.stop()
+ self.github_event_connector.join()
def _createGithubClient(self):
if self.server != 'github.com':
@@ -504,15 +542,21 @@
return token
+ def addEvent(self, data, event=None):
+ return self.event_queue.put((data, event))
+
+ def getEvent(self):
+ return self.event_queue.get()
+
+ def eventDone(self):
+ self.event_queue.task_done()
+
def getGithubClient(self,
project=None,
- user_id=None,
- use_app=True):
+ user_id=None):
# if you're authenticating for a project and you're an integration then
- # you need to use the installation specific token. There are some
- # operations that are not yet supported by integrations so
- # use_app lets you use api_key auth.
- if use_app and project and self.app_id:
+ # you need to use the installation specific token.
+ if project and self.app_id:
github = self._createGithubClient()
github.login(token=self._get_installation_key(project, user_id))
github._zuul_project = project
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index 3b8f518..505757f 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -112,10 +112,10 @@
description = 'status: %s' % self._commit_status
self.log.debug(
- 'Reporting change %s, params %s, status:\n'
+ 'Reporting change %s, params %s, '
'context: %s, state: %s, description: %s, url: %s' %
- (item.change, self.config, context, state,
- description, url))
+ (item.change, self.config,
+ context, state, description, url))
self.connection.setCommitStatus(
project, sha, state, url, description, context)
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index d76fafd..afdc747 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -28,7 +28,7 @@
class SQLConnection(BaseConnection):
driver_name = 'sql'
- log = logging.getLogger("connection.sql")
+ log = logging.getLogger("zuul.SQLConnection")
def __init__(self, driver, connection_name, connection_config):
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index 7c79176..ca35577 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -23,7 +23,7 @@
"""Sends off reports to a database."""
name = 'sql'
- log = logging.getLogger("zuul.reporter.mysql.SQLReporter")
+ log = logging.getLogger("zuul.SQLReporter")
def report(self, item):
"""Create an entry into a database."""
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index be41186..f97d286 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -175,6 +175,8 @@
zuul_params['tag'] = item.change.tag
if hasattr(item.change, 'number'):
zuul_params['change'] = str(item.change.number)
+ if hasattr(item.change, 'url'):
+ zuul_params['change_url'] = item.change.url
if hasattr(item.change, 'patchset'):
zuul_params['patchset'] = str(item.change.patchset)
if (hasattr(item.change, 'oldrev') and item.change.oldrev
@@ -196,6 +198,8 @@
)
if hasattr(i.change, 'number'):
d['change'] = str(i.change.number)
+ if hasattr(i.change, 'url'):
+ d['change_url'] = i.change.url
if hasattr(i.change, 'patchset'):
d['patchset'] = str(i.change.patchset)
if hasattr(i.change, 'branch'):
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 3daafc7..27fd85f 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -36,7 +36,7 @@
import gear
import zuul.merger.merger
-import zuul.ansible
+import zuul.ansible.logconfig
from zuul.lib import commandsocket
BUFFER_LINES_FOR_SYNTAX = 200
@@ -203,7 +203,7 @@
except OSError:
self.log.exception(
'Problem sending SIGTERM to agent {}'.format(self.env))
- self.log.info('Sent SIGTERM to SSH Agent, {}'.format(self.env))
+ self.log.debug('Sent SIGTERM to SSH Agent, {}'.format(self.env))
self.env = {}
def add(self, key_path):
@@ -567,6 +567,7 @@
self.action_dir = os.path.join(plugin_dir, 'action')
self.callback_dir = os.path.join(plugin_dir, 'callback')
self.lookup_dir = os.path.join(plugin_dir, 'lookup')
+ self.filter_dir = os.path.join(plugin_dir, 'filter')
_copy_ansible_files(zuul.ansible, plugin_dir)
@@ -946,7 +947,7 @@
repos += playbook['roles']
for repo in repos:
- self.log.debug("Updating playbook or role %s" % (repo,))
+ self.log.debug("Updating playbook or role %s" % (repo['project'],))
key = (repo['connection'], repo['project'])
if key not in projects:
tasks.append(self.executor_server.update(*key))
@@ -1162,6 +1163,7 @@
ansible_user=self.executor_server.default_username,
ansible_port=port,
nodepool=dict(
+ label=node.get('label'),
az=node.get('az'),
cloud=node.get('cloud'),
provider=node.get('provider'),
@@ -1445,6 +1447,8 @@
config.write('command_warnings = False\n')
config.write('callback_plugins = %s\n' % callback_path)
config.write('stdout_callback = zuul_stream\n')
+ config.write('filter_plugins = %s\n'
+ % self.executor_server.filter_dir)
# bump the timeout because busy nodes may take more than
# 10s to respond
config.write('timeout = 30\n')
@@ -1617,7 +1621,22 @@
now=datetime.datetime.now()))
for line in syntax_buffer:
job_output.write("{now} | {line}\n".format(
- now=datetime.datetime.now(), line=line))
+ now=datetime.datetime.now(),
+ line=line.decode('utf-8').rstrip()))
+ elif ret == 250:
+ # Unexpected error from ansible
+ with open(self.jobdir.job_output_file, 'a') as job_output:
+ job_output.write("{now} | UNEXPECTED ANSIBLE ERROR\n".format(
+ now=datetime.datetime.now()))
+ found_marker = False
+ for line in syntax_buffer:
+ if line.startswith('ERROR! Unexpected Exception'):
+ found_marker = True
+ if not found_marker:
+ continue
+ job_output.write("{now} | {line}\n".format(
+ now=datetime.datetime.now(),
+ line=line.decode('utf-8').rstrip()))
return (self.RESULT_NORMAL, ret)
@@ -1633,7 +1652,7 @@
cmd.extend(['-e', '@' + playbook.secrets])
if success is not None:
- cmd.extend(['-e', 'success=%s' % str(bool(success))])
+ cmd.extend(['-e', 'zuul_success=%s' % str(bool(success))])
if phase:
cmd.extend(['-e', 'zuul_execution_phase=%s' % phase])
diff --git a/zuul/lib/commandsocket.py b/zuul/lib/commandsocket.py
index 901291a..2836999 100644
--- a/zuul/lib/commandsocket.py
+++ b/zuul/lib/commandsocket.py
@@ -44,9 +44,9 @@
# First, wake up our listener thread with a connection and
# tell it to stop running.
self.running = False
- s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- s.connect(self.path)
- s.sendall(b'_stop\n')
+ with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
+ s.connect(self.path)
+ s.sendall(b'_stop\n')
# The command '_stop' will be ignored by our listener, so
# directly inject it into the queue so that consumers of this
# class which are waiting in .get() are awakened. They can
diff --git a/zuul/lib/log_streamer.py b/zuul/lib/log_streamer.py
index 57afef9..3ecaf4d 100644
--- a/zuul/lib/log_streamer.py
+++ b/zuul/lib/log_streamer.py
@@ -168,6 +168,8 @@
'''
Custom version that allows us to drop privileges after port binding.
'''
+ address_family = socket.AF_INET6
+
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.jobdir_root = kwargs.pop('jobdir_root')
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 8282f86..98c7350 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -444,7 +444,9 @@
loader.createDynamicLayout(
item.pipeline.layout.tenant,
build_set.files,
- include_config_projects=True)
+ include_config_projects=True,
+ scheduler=self.sched,
+ connections=self.sched.connections)
# Then create the config a second time but without changes
# to config repos so that we actually use this config.
@@ -527,11 +529,12 @@
if not item.job_graph:
try:
+ self.log.debug("Freezing job graph for %s" % (item,))
item.freezeJobGraph()
except Exception as e:
# TODOv3(jeblair): nicify this exception as it will be reported
self.log.exception("Error freezing job graph for %s" %
- item)
+ (item,))
item.setConfigError("Unable to freeze job graph: %s" %
(str(e)))
return False
@@ -540,6 +543,7 @@
def _processOneItem(self, item, nnfi):
changed = False
ready = False
+ dequeued = False
failing_reasons = [] # Reasons this item is failing
item_ahead = item.item_ahead
@@ -594,8 +598,14 @@
item.reported_start = True
if item.current_build_set.unable_to_merge:
failing_reasons.append("it has a merge conflict")
+ if (not item.live) and (not dequeued):
+ self.dequeueItem(item)
+ changed = dequeued = True
if item.current_build_set.config_error:
failing_reasons.append("it has an invalid configuration")
+ if (not item.live) and (not dequeued):
+ self.dequeueItem(item)
+ changed = dequeued = True
if ready and self.provisionNodes(item):
changed = True
if ready and self.executeJobs(item):
@@ -603,10 +613,10 @@
if item.didAnyJobFail():
failing_reasons.append("at least one job failed")
- if (not item.live) and (not item.items_behind):
+ if (not item.live) and (not item.items_behind) and (not dequeued):
failing_reasons.append("is a non-live item with no items behind")
self.dequeueItem(item)
- changed = True
+ changed = dequeued = True
if ((not item_ahead) and item.areAllJobsComplete() and item.live):
try:
self.reportItem(item)
@@ -618,7 +628,7 @@
(item_behind.change, item))
self.cancelJobs(item_behind)
self.dequeueItem(item)
- changed = True
+ changed = dequeued = True
elif not failing_reasons and item.live:
nnfi = item
item.current_build_set.failing_reasons = failing_reasons
@@ -743,9 +753,12 @@
layout = (item.current_build_set.layout or
self.pipeline.layout)
- if not layout.hasProject(item.change.project):
+ project_in_pipeline = True
+ if not layout.getProjectPipelineConfig(item.change.project,
+ self.pipeline):
self.log.debug("Project %s not in pipeline %s for change %s" % (
item.change.project, self.pipeline, item.change))
+ project_in_pipeline = False
actions = []
elif item.getConfigError():
self.log.debug("Invalid config for change %s" % item.change)
@@ -771,7 +784,7 @@
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
self.pipeline._consecutive_failures += 1
- if layout.hasProject(item.change.project) and self.pipeline._disabled:
+ if project_in_pipeline and self.pipeline._disabled:
actions = self.pipeline.disabled_actions
# Check here if we should disable so that we only use the disabled
# reporters /after/ the last disable_at failure is still reported as
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index 5191a44..2614e58 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -134,18 +134,18 @@
def onBuildCompleted(self, job):
data = getJobData(job)
merged = data.get('merged', False)
- updated = data.get('updated', False)
+ job.updated = data.get('updated', False)
commit = data.get('commit')
files = data.get('files', {})
repo_state = data.get('repo_state', {})
job.files = files
self.log.info("Merge %s complete, merged: %s, updated: %s, "
"commit: %s" %
- (job, merged, updated, commit))
+ (job, merged, job.updated, commit))
job.setComplete()
if job.build_set:
self.sched.onMergeCompleted(job.build_set,
- merged, updated, commit, files,
+ merged, job.updated, commit, files,
repo_state)
# The test suite expects the job to be removed from the
# internal account after the wake flag is set.
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index ed98696..8b98bfb 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -191,11 +191,14 @@
def checkout(self, ref):
repo = self.createRepoObject()
self.log.debug("Checking out %s" % ref)
- repo.head.reference = ref
+ # Perform a hard reset before checking out so that we clean up
+ # anything that might be left over from a merge.
reset_repo_to_head(repo)
+ repo.git.checkout(ref)
return repo.head.commit
def checkoutLocalBranch(self, branch):
+ # TODO(jeblair): retire in favor of checkout
repo = self.createRepoObject()
# Perform a hard reset before checking out so that we clean up
# anything that might be left over from a merge.
@@ -341,10 +344,6 @@
return self._addProject(hostname, project_name, url, sshkey)
def updateRepo(self, connection_name, project_name):
- # TODOv3(jhesketh): Reimplement
- # da90a50b794f18f74de0e2c7ec3210abf79dda24 after merge..
- # Likely we'll handle connection context per projects differently.
- # self._setGitSsh()
repo = self.getRepo(connection_name, project_name)
try:
self.log.info("Updating local repository %s/%s",
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index fc599c1..881209d 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -111,7 +111,7 @@
def refstate(self, job):
args = json.loads(job.arguments)
- success, repo_state = self.merger.getItemRepoState(args['items'])
+ success, repo_state = self.merger.getRepoState(args['items'])
result = dict(updated=success,
repo_state=repo_state)
job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/model.py b/zuul/model.py
index 850bbe2..0e42368 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -21,6 +21,7 @@
import time
from uuid import uuid4
import urllib.parse
+import textwrap
MERGER_MERGE = 1 # "git merge"
MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
@@ -1374,6 +1375,7 @@
self.quiet = False
self.active = False # Whether an item is within an active window
self.live = True # Whether an item is intended to be processed at all
+ # TODO(jeblair): move job_graph to buildset
self.job_graph = None
def __repr__(self):
@@ -1391,6 +1393,7 @@
old.next_build_set = self.current_build_set
self.current_build_set.previous_build_set = old
self.build_sets.append(self.current_build_set)
+ self.job_graph = None
def addBuild(self, build):
self.current_build_set.addBuild(build)
@@ -2093,6 +2096,83 @@
self.private_key_file = None
+class ConfigItemNotListError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration file is not a list. Each zuul.yaml configuration
+ file must be a list of items, for example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item starts with "- " so that it is parsed as a
+ YAML list.
+ """)
+ super(ConfigItemNotListError, self).__init__(message)
+
+
+class ConfigItemNotDictError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item is not a dictionary. Each zuul.yaml
+ configuration file must be a list of dictionaries, for
+ example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item in the list is a dictionary with one
+ key (in this example, 'job' and 'project').
+ """)
+ super(ConfigItemNotDictError, self).__init__(message)
+
+
+class ConfigItemMultipleKeysError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item has more than one key. Each zuul.yaml
+ configuration file must be a list of dictionaries with a
+ single key, for example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ Ensure that every item in the list is a dictionary with only
+ one key (in this example, 'job' and 'project'). This error
+ may be caused by insufficient indentation of the keys under
+ the configuration item ('name' in this example).
+ """)
+ super(ConfigItemMultipleKeysError, self).__init__(message)
+
+
+class ConfigItemUnknownError(Exception):
+ def __init__(self):
+ message = textwrap.dedent("""\
+ Configuration item not recognized. Each zuul.yaml
+ configuration file must be a list of dictionaries, for
+ example:
+
+ - job:
+ name: foo
+
+ - project:
+ name: bar
+
+ The dictionary keys must match one of the configuration item
+ types recognized by zuul (for example, 'job' or 'project').
+ """)
+ super(ConfigItemUnknownError, self).__init__(message)
+
+
class UnparsedAbideConfig(object):
"""A collection of yaml lists that has not yet been parsed into objects.
@@ -2109,25 +2189,18 @@
return
if not isinstance(conf, list):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotListError()
+
for item in conf:
if not isinstance(item, dict):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotDictError()
if len(item.keys()) > 1:
- raise Exception("Configuration item dictionaries must have "
- "a single key (when parsing %s)" %
- (conf,))
+ raise ConfigItemMultipleKeysError()
key, value = list(item.items())[0]
if key == 'tenant':
self.tenants.append(value)
else:
- raise Exception("Configuration item not recognized "
- "(when parsing %s)" %
- (conf,))
+ raise ConfigItemUnknownError()
class UnparsedTenantConfig(object):
@@ -2166,19 +2239,13 @@
return
if not isinstance(conf, list):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotListError()
for item in conf:
if not isinstance(item, dict):
- raise Exception("Configuration items must be in the form of "
- "a list of dictionaries (when parsing %s)" %
- (conf,))
+ raise ConfigItemNotDictError()
if len(item.keys()) > 1:
- raise Exception("Configuration item dictionaries must have "
- "a single key (when parsing %s)" %
- (conf,))
+ raise ConfigItemMultipleKeysError()
key, value = list(item.items())[0]
if key == 'project':
name = value['name']
@@ -2196,9 +2263,7 @@
elif key == 'semaphore':
self.semaphores.append(value)
else:
- raise Exception("Configuration item `%s` not recognized "
- "(when parsing %s)" %
- (item, conf,))
+ raise ConfigItemUnknownError()
class Layout(object):
@@ -2331,19 +2396,21 @@
job_graph.addJob(frozen_job)
def createJobGraph(self, item):
- project_config = self.project_configs.get(
- item.change.project.canonical_name, None)
- ret = JobGraph()
# NOTE(pabelanger): It is possible for a foreign project not to have a
# configured pipeline, if so return an empty JobGraph.
- if project_config and item.pipeline.name in project_config.pipelines:
- project_job_list = \
- project_config.pipelines[item.pipeline.name].job_list
- self._createJobGraph(item, project_job_list, ret)
+ ret = JobGraph()
+ ppc = self.getProjectPipelineConfig(item.change.project,
+ item.pipeline)
+ if ppc:
+ self._createJobGraph(item, ppc.job_list, ret)
return ret
- def hasProject(self, project):
- return project.canonical_name in self.project_configs
+ def getProjectPipelineConfig(self, project, pipeline):
+ project_config = self.project_configs.get(
+ project.canonical_name, None)
+ if not project_config:
+ return None
+ return project_config.pipelines.get(pipeline.name, None)
class Semaphore(object):
@@ -2439,6 +2506,7 @@
def __init__(self, name):
self.name = name
self.max_nodes_per_job = 5
+ self.max_job_timeout = 10800
self.exclude_unprotected_branches = False
self.default_base_job = None
self.layout = None
@@ -2600,20 +2668,30 @@
class TimeDataBase(object):
def __init__(self, root):
self.root = root
- self.jobs = {}
- def _getTD(self, name):
- td = self.jobs.get(name)
- if not td:
- td = JobTimeData(os.path.join(self.root, name))
- self.jobs[name] = td
- td.load()
+ def _getTD(self, build):
+ if hasattr(build.build_set.item.change, 'branch'):
+ branch = build.build_set.item.change.branch
+ else:
+ branch = ''
+
+ dir_path = os.path.join(
+ self.root,
+ build.build_set.item.pipeline.layout.tenant.name,
+ build.build_set.item.change.project.canonical_name,
+ branch)
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+ path = os.path.join(dir_path, build.job.name)
+
+ td = JobTimeData(path)
+ td.load()
return td
def getEstimatedTime(self, name):
return self._getTD(name).getEstimatedTime()
- def update(self, name, elapsed, result):
- td = self._getTD(name)
+ def update(self, build, elapsed, result):
+ td = self._getTD(build)
td.add(elapsed, result)
td.save()
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 52b34ec..5432661 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -543,9 +543,16 @@
tenant, item)
item.item_ahead = None
item.items_behind = []
- if (item.change.project and
- new_pipeline.manager.reEnqueueItem(item,
- last_head)):
+ reenqueued = False
+ if item.change.project:
+ try:
+ reenqueued = new_pipeline.manager.reEnqueueItem(
+ item, last_head)
+ except Exception:
+ self.log.exception(
+ "Exception while re-enqueing item %s",
+ item)
+ if reenqueued:
for build in item.current_build_set.getBuilds():
new_job = item.getJob(build.job.name)
if new_job:
@@ -826,7 +833,7 @@
return
try:
build.estimated_time = float(self.time_database.getEstimatedTime(
- build.job.name))
+ build))
except Exception:
self.log.exception("Exception estimating build time:")
pipeline.manager.onBuildStarted(event.build)
@@ -865,8 +872,7 @@
if build.end_time and build.start_time and build.result:
duration = build.end_time - build.start_time
try:
- self.time_database.update(
- build.job.name, duration, build.result)
+ self.time_database.update(build, duration, build.result)
except Exception:
self.log.exception("Exception recording build time:")
pipeline.manager.onBuildCompleted(event.build)