Merge "Make sure services are running for test-setup.sh" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index e2eea68..50223fa 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,10 +1,12 @@
- job:
name: base
- pre-run: base-pre
- post-run: base-post
+ pre-run: base/pre
+ post-run: base/post
success-url: http://zuulv3-dev.openstack.org/logs/{build.uuid}/
failure-url: http://zuulv3-dev.openstack.org/logs/{build.uuid}/
timeout: 1800
+ vars:
+ zuul_workspace_root: /home/zuul
nodes:
- name: ubuntu-xenial
image: ubuntu-xenial
@@ -12,25 +14,35 @@
- job:
name: tox
parent: base
- pre-run: tox-pre
- post-run: tox-post
+ pre-run: tox/pre
+ post-run: tox/post
- job:
name: tox-cover
parent: tox
+ run: tox/cover
voting: false
- job:
name: tox-docs
parent: tox
+ run: tox/docs
- job:
name: tox-linters
parent: tox
+ run: tox/linters
- job:
name: tox-py27
parent: tox
+ run: tox/py27
+
+- job:
+ name: tox-tarball
+ parent: tox
+ run: tox/tarball
+ post-run: tox/tarball-post
- project:
name: openstack-infra/zuul
@@ -40,3 +52,4 @@
- tox-cover
- tox-linters
- tox-py27
+ - tox-tarball
diff --git a/README.rst b/README.rst
index 932edbf..c55f7b3 100644
--- a/README.rst
+++ b/README.rst
@@ -58,7 +58,7 @@
Some of the information in the specs may be effectively superceded
by changes here, which are still undergoing review.
-4) Read documentation on the internal data model and testing: http://docs.openstack.org/infra/zuul/feature/zuulv3/internals.html
+4) Read developer documentation on the internal data model and testing: http://docs.openstack.org/infra/zuul/feature/zuulv3/developer.html
The general philosophy for Zuul tests is to perform functional
testing of either the individual component or the entire end-to-end
diff --git a/bindep.txt b/bindep.txt
index 8d8c45b..6895444 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -4,4 +4,13 @@
mysql-client [test]
mysql-server [test]
libjpeg-dev [test]
+openssl [test]
zookeeperd [platform:dpkg]
+build-essential [platform:dpkg]
+gcc [platform:rpm]
+libssl-dev [platform:dpkg]
+openssl-devel [platform:rpm]
+libffi-dev [platform:dpkg]
+libffi-devel [platform:rpm]
+python-dev [platform:dpkg]
+python-devel [platform:rpm]
diff --git a/doc/source/datamodel.rst b/doc/source/developer/datamodel.rst
similarity index 93%
rename from doc/source/datamodel.rst
rename to doc/source/developer/datamodel.rst
index 9df6505..2996ff4 100644
--- a/doc/source/datamodel.rst
+++ b/doc/source/developer/datamodel.rst
@@ -26,12 +26,12 @@
A :py:class:`~zuul.model.Job` represents the definition of what to do. A
:py:class:`~zuul.model.Build` represents a single run of a
-:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobTree` is used to
+:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobGraph` is used to
encapsulate the dependencies between one or more :py:class:`~zuul.model.Job`
objects.
.. autoclass:: zuul.model.Job
-.. autoclass:: zuul.model.JobTree
+.. autoclass:: zuul.model.JobGraph
.. autoclass:: zuul.model.Build
The :py:class:`~zuul.manager.base.PipelineManager` enqueues each
@@ -48,7 +48,6 @@
Changes
~~~~~~~
-.. autoclass:: zuul.model.Changeish
.. autoclass:: zuul.model.Change
.. autoclass:: zuul.model.Ref
diff --git a/doc/source/drivers.rst b/doc/source/developer/drivers.rst
similarity index 100%
rename from doc/source/drivers.rst
rename to doc/source/developer/drivers.rst
diff --git a/doc/source/developer.rst b/doc/source/developer/index.rst
similarity index 95%
rename from doc/source/developer.rst
rename to doc/source/developer/index.rst
index 527ea6e..986bbe4 100644
--- a/doc/source/developer.rst
+++ b/doc/source/developer/index.rst
@@ -12,4 +12,5 @@
datamodel
drivers
+ triggers
testing
diff --git a/doc/source/testing.rst b/doc/source/developer/testing.rst
similarity index 92%
rename from doc/source/testing.rst
rename to doc/source/developer/testing.rst
index 092754f..4a813d0 100644
--- a/doc/source/testing.rst
+++ b/doc/source/developer/testing.rst
@@ -19,7 +19,7 @@
.. autoclass:: tests.base.FakeGearmanServer
:members:
-.. autoclass:: tests.base.RecordingLaunchServer
+.. autoclass:: tests.base.RecordingExecutorServer
:members:
.. autoclass:: tests.base.FakeBuild
diff --git a/doc/source/developer/triggers.rst b/doc/source/developer/triggers.rst
new file mode 100644
index 0000000..56f4a03
--- /dev/null
+++ b/doc/source/developer/triggers.rst
@@ -0,0 +1,19 @@
+Triggers
+========
+
+Triggers must inherit from :py:class:`~zuul.trigger.BaseTrigger` and, at a minimum,
+implement the :py:meth:`~zuul.trigger.BaseTrigger.getEventFilters` method.
+
+.. autoclass:: zuul.trigger.BaseTrigger
+ :members:
+
+Current list of triggers are:
+
+.. autoclass:: zuul.driver.gerrit.gerrittrigger.GerritTrigger
+ :members:
+
+.. autoclass:: zuul.driver.timer.timertrigger.TimerTrigger
+ :members:
+
+.. autoclass:: zuul.driver.zuul.zuultrigger.ZuulTrigger
+ :members:
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 3f903db..fb30b92 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -24,7 +24,7 @@
executors
statsd
client
- developer
+ developer/index
Indices and tables
==================
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
index e4ce737..56cc6a8 100644
--- a/doc/source/zuul.rst
+++ b/doc/source/zuul.rst
@@ -124,13 +124,6 @@
optional value and ``1`` is used by default.
``status_expiry=1``
-**url_pattern**
- If you are storing build logs external to the system that originally
- ran jobs and wish to link to those logs when Zuul makes comments on
- Gerrit changes for completed jobs this setting configures what the
- URLs for those links should be. Used by zuul-server only.
- ``http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}``
-
**job_name_in_report**
Boolean value (``true`` or ``false``) that indicates whether the
job name should be included in the report (normally only the URL
@@ -644,10 +637,12 @@
would largely defeat the parallelization of dependent change testing
that is the main feature of Zuul. Default: ``false``.
-**mutex (optional)**
- This is a string that names a mutex that should be observed by this
- job. Only one build of any job that references the same named mutex
- will be enqueued at a time. This applies across all pipelines.
+**semaphore (optional)**
+ This is a string that names a semaphore that should be observed by this
+ job. The semaphore defines how many jobs which reference that semaphore
+ can be enqueued at a time. This applies across all pipelines in the same
+ tenant. The max value of the semaphore can be specified in the config
+ repositories and defaults to 1.
**branch (optional)**
This job should only be run on matching branches. This field is
@@ -850,6 +845,21 @@
or specified in the project itself, the configuration defined by
either the last template or the project itself will take priority.
+
+Semaphores
+""""""""""
+
+When using semaphores the maximum value of each one can be specified in their
+respective config repositories. Unspecified semaphores default to 1::
+
+ - semaphore:
+ name: semaphore-foo
+ max: 5
+ - semaphore:
+ name: semaphore-bar
+ max: 3
+
+
logging.conf
~~~~~~~~~~~~
This file is optional. If provided, it should be a standard
diff --git a/playbooks/base-post.yaml b/playbooks/base/post.yaml
similarity index 100%
rename from playbooks/base-post.yaml
rename to playbooks/base/post.yaml
diff --git a/playbooks/base-pre.yaml b/playbooks/base/pre.yaml
similarity index 100%
rename from playbooks/base-pre.yaml
rename to playbooks/base/pre.yaml
diff --git a/playbooks/base/roles b/playbooks/base/roles
new file mode 120000
index 0000000..7b9ade8
--- /dev/null
+++ b/playbooks/base/roles
@@ -0,0 +1 @@
+../roles/
\ No newline at end of file
diff --git a/playbooks/roles/extra-test-setup/tasks/main.yaml b/playbooks/roles/extra-test-setup/tasks/main.yaml
new file mode 100644
index 0000000..da4259e
--- /dev/null
+++ b/playbooks/roles/extra-test-setup/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: Check if projects tools/test-setup.sh exists.
+ stat:
+ path: "{{ zuul_workspace_root }}/src/{{ zuul.project }}/tools/test-setup.sh"
+ register: p
+
+- name: Run tools/test-setup.sh.
+ shell: tools/test-setup.sh
+ args:
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
+ when:
+ - p.stat.exists
+ - p.stat.executable
diff --git a/playbooks/roles/prepare-workspace/defaults/main.yaml b/playbooks/roles/prepare-workspace/defaults/main.yaml
deleted file mode 100644
index 9127ad8..0000000
--- a/playbooks/roles/prepare-workspace/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# tasks/main.yaml
-prepare_workspace_root: /home/zuul/workspace
diff --git a/playbooks/roles/prepare-workspace/tasks/main.yaml b/playbooks/roles/prepare-workspace/tasks/main.yaml
index d8a5316..4d42b2d 100644
--- a/playbooks/roles/prepare-workspace/tasks/main.yaml
+++ b/playbooks/roles/prepare-workspace/tasks/main.yaml
@@ -10,12 +10,13 @@
- name: Create workspace directory.
file:
- path: "{{ prepare_workspace_root }}"
+ path: "{{ zuul_workspace_root }}"
owner: zuul
group: zuul
state: directory
- name: Synchronize src repos to workspace directory.
synchronize:
- dest: "{{ prepare_workspace_root }}"
+ dest: "{{ zuul_workspace_root }}"
src: "{{ zuul.executor.src_root }}"
+ no_log: true
diff --git a/playbooks/roles/revoke-sudo/tasks/main.yaml b/playbooks/roles/revoke-sudo/tasks/main.yaml
new file mode 100644
index 0000000..1c18187
--- /dev/null
+++ b/playbooks/roles/revoke-sudo/tasks/main.yaml
@@ -0,0 +1,8 @@
+- name: Remove sudo access for zuul user.
+ become: yes
+ file:
+ path: /etc/sudoers.d/zuul-sudo
+ state: absent
+
+- name: Prove that general sudo access is actually revoked.
+ shell: ! sudo -n true
diff --git a/playbooks/roles/run-bindep/tasks/main.yaml b/playbooks/roles/run-bindep/tasks/main.yaml
index 7717c86..5a9d33e 100644
--- a/playbooks/roles/run-bindep/tasks/main.yaml
+++ b/playbooks/roles/run-bindep/tasks/main.yaml
@@ -2,4 +2,4 @@
- name: Run install-distro-packages.sh
shell: /usr/local/jenkins/slave_scripts/install-distro-packages.sh
args:
- chdir: "/home/zuul/workspace/src/{{ zuul.project }}"
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/roles/run-cover/tasks/main.yaml b/playbooks/roles/run-cover/tasks/main.yaml
index 5fce7f1..caed13c 100644
--- a/playbooks/roles/run-cover/tasks/main.yaml
+++ b/playbooks/roles/run-cover/tasks/main.yaml
@@ -1,4 +1,4 @@
- name: Execute run-cover.sh.
shell: "/usr/local/jenkins/slave_scripts/run-cover.sh {{ run_cover_envlist }}"
args:
- chdir: "/home/zuul/workspace/src/{{ zuul.project }}"
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/roles/run-docs/tasks/main.yaml b/playbooks/roles/run-docs/tasks/main.yaml
index 3266f2b..2250593 100644
--- a/playbooks/roles/run-docs/tasks/main.yaml
+++ b/playbooks/roles/run-docs/tasks/main.yaml
@@ -1,4 +1,4 @@
- name: Execute run-docs.sh.
shell: "/usr/local/jenkins/slave_scripts/run-docs.sh {{ run_docs_envlist }}"
args:
- chdir: "/home/zuul/workspace/src/{{ zuul.project }}"
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/roles/run-tarball/defaults/main.yaml b/playbooks/roles/run-tarball/defaults/main.yaml
new file mode 100644
index 0000000..072828a
--- /dev/null
+++ b/playbooks/roles/run-tarball/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+run_tarball_envlist: venv
diff --git a/playbooks/roles/run-tarball/tasks/main.yaml b/playbooks/roles/run-tarball/tasks/main.yaml
new file mode 100644
index 0000000..e21c4c8
--- /dev/null
+++ b/playbooks/roles/run-tarball/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Execute run-tarball.sh.
+ shell: "/usr/local/jenkins/slave_scripts/run-tarball.sh {{ run_tarball_envlist }}"
+ args:
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/roles/run-tox/tasks/main.yaml b/playbooks/roles/run-tox/tasks/main.yaml
index 1053690..29a4cc4 100644
--- a/playbooks/roles/run-tox/tasks/main.yaml
+++ b/playbooks/roles/run-tox/tasks/main.yaml
@@ -1,4 +1,4 @@
- name: Run tox
shell: "/usr/local/jenkins/slave_scripts/run-tox.sh {{ run_tox_envlist }}"
args:
- chdir: "/home/zuul/workspace/src/{{ zuul.project }}"
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/roles/run-wheel/defaults/main.yaml b/playbooks/roles/run-wheel/defaults/main.yaml
new file mode 100644
index 0000000..8645d33
--- /dev/null
+++ b/playbooks/roles/run-wheel/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+run_wheel_envlist: venv
diff --git a/playbooks/roles/run-wheel/tasks/main.yaml b/playbooks/roles/run-wheel/tasks/main.yaml
new file mode 100644
index 0000000..f5aaf54
--- /dev/null
+++ b/playbooks/roles/run-wheel/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Execute run-wheel.sh.
+ shell: "/usr/local/jenkins/slave_scripts/run-wheel.sh {{ run_wheel_envlist }}"
+ args:
+ chdir: "{{ zuul_workspace_root }}/src/{{ zuul.project }}"
diff --git a/playbooks/tox-cover.yaml b/playbooks/tox-cover.yaml
deleted file mode 100644
index ca391e1..0000000
--- a/playbooks/tox-cover.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-- hosts: all
- roles:
- - run-cover
diff --git a/playbooks/tox/cover.yaml b/playbooks/tox/cover.yaml
new file mode 100644
index 0000000..642eb4e
--- /dev/null
+++ b/playbooks/tox/cover.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ roles:
+ - extra-test-setup
+ - revoke-sudo
+ - run-cover
diff --git a/playbooks/tox-docs.yaml b/playbooks/tox/docs.yaml
similarity index 67%
rename from playbooks/tox-docs.yaml
rename to playbooks/tox/docs.yaml
index 98b3313..028e1c5 100644
--- a/playbooks/tox-docs.yaml
+++ b/playbooks/tox/docs.yaml
@@ -1,3 +1,4 @@
- hosts: all
roles:
+ - revoke-sudo
- run-docs
diff --git a/playbooks/tox-linters.yaml b/playbooks/tox/linters.yaml
similarity index 79%
rename from playbooks/tox-linters.yaml
rename to playbooks/tox/linters.yaml
index 9da2e8a..d1e7f13 100644
--- a/playbooks/tox-linters.yaml
+++ b/playbooks/tox/linters.yaml
@@ -2,4 +2,5 @@
vars:
run_tox_envlist: pep8
roles:
+ - revoke-sudo
- run-tox
diff --git a/playbooks/tox-post.yaml b/playbooks/tox/post.yaml
similarity index 86%
rename from playbooks/tox-post.yaml
rename to playbooks/tox/post.yaml
index 697b727..3b035f8 100644
--- a/playbooks/tox-post.yaml
+++ b/playbooks/tox/post.yaml
@@ -3,7 +3,7 @@
- name: Find tox directories to synchrionize.
find:
file_type: directory
- paths: "/home/zuul/workspace/src/{{ zuul.project }}/.tox"
+ paths: "{{ zuul_workspace_root }}/src/{{ zuul.project }}/.tox"
# NOTE(pabelanger): The .tox/log folder is empty, ignore it.
patterns: ^(?!log).*$
use_regex: yes
diff --git a/playbooks/tox-pre.yaml b/playbooks/tox/pre.yaml
similarity index 100%
rename from playbooks/tox-pre.yaml
rename to playbooks/tox/pre.yaml
diff --git a/playbooks/tox-py27.yaml b/playbooks/tox/py27.yaml
similarity index 62%
rename from playbooks/tox-py27.yaml
rename to playbooks/tox/py27.yaml
index 13756b5..fd45f27 100644
--- a/playbooks/tox-py27.yaml
+++ b/playbooks/tox/py27.yaml
@@ -2,4 +2,6 @@
vars:
run_tox_envlist: py27
roles:
+ - extra-test-setup
+ - revoke-sudo
- run-tox
diff --git a/playbooks/tox/roles b/playbooks/tox/roles
new file mode 120000
index 0000000..7b9ade8
--- /dev/null
+++ b/playbooks/tox/roles
@@ -0,0 +1 @@
+../roles/
\ No newline at end of file
diff --git a/playbooks/tox/tarball-post.yaml b/playbooks/tox/tarball-post.yaml
new file mode 100644
index 0000000..fb41707
--- /dev/null
+++ b/playbooks/tox/tarball-post.yaml
@@ -0,0 +1,10 @@
+- hosts: all
+ tasks:
+ - name: Collect tarball artifacts.
+ synchronize:
+ dest: "{{ zuul.executor.src_root }}/tarballs"
+ mode: pull
+ src: "{{ zuul_workspace_root }}/src/{{ zuul.project }}/dist/{{ item }}"
+ with_items:
+ - "*.tar.gz"
+ - "*.whl"
diff --git a/playbooks/tox/tarball.yaml b/playbooks/tox/tarball.yaml
new file mode 100644
index 0000000..4d5a8f6
--- /dev/null
+++ b/playbooks/tox/tarball.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ roles:
+ - revoke-sudo
+ - run-tarball
+ - run-wheel
diff --git a/requirements.txt b/requirements.txt
index 186e7f6..c7e059a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,4 @@
kazoo
sqlalchemy
alembic
+cryptography>=1.6
diff --git a/setup.cfg b/setup.cfg
index 86ebf65..9ee64f3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -31,6 +31,7 @@
source-dir = doc/source
build-dir = doc/build
all_files = 1
+warning-is-error = 1
[extras]
mysql_reporter=
diff --git a/test-requirements.txt b/test-requirements.txt
index b99c803..6262a02 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,7 +1,7 @@
hacking>=0.12.0,!=0.13.0,<0.14 # Apache-2.0
coverage>=3.6
-sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+sphinx>=1.5.1
sphinxcontrib-blockdiag>=1.1.0
fixtures>=0.3.14
python-keystoneclient>=0.4.2
diff --git a/tests/base.py b/tests/base.py
index 29981ac..9bd44f6 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -50,6 +50,7 @@
import testtools.content
import testtools.content_type
from git.exc import NoSuchPathError
+import yaml
import zuul.driver.gerrit.gerritsource as gerritsource
import zuul.driver.gerrit.gerritconnection as gerritconnection
@@ -663,8 +664,8 @@
"""Return whether this build has certain changes in its git repos.
:arg FakeChange changes: One or more changes (varargs) that
- are expected to be present (in order) in the git repository of
- the active project.
+ are expected to be present (in order) in the git repository of
+ the active project.
:returns: Whether the build has the indicated changes.
:rtype: bool
@@ -801,9 +802,13 @@
def getHostList(self, args):
self.log.debug("hostlist")
hosts = super(RecordingAnsibleJob, self).getHostList(args)
- for name, d in hosts:
- d['ansible_connection'] = 'local'
- hosts.append(('localhost', dict(ansible_connection='local')))
+ for host in hosts:
+ host['host_vars']['ansible_connection'] = 'local'
+
+ hosts.append(dict(
+ name='localhost',
+ host_vars=dict(ansible_connection='local'),
+ host_keys=[]))
return hosts
@@ -990,6 +995,7 @@
created_time=now,
updated_time=now,
image_id=None,
+ host_keys=["fake-key1", "fake-key2"],
executor='fake-nodepool')
data = json.dumps(data)
path = self.client.create(path, data,
@@ -1208,6 +1214,11 @@
different tenant/project layout while using the standard main
configuration.
+ :cvar bool create_project_keys: Indicates whether Zuul should
+ auto-generate keys for each project, or whether the test
+ infrastructure should insert dummy keys to save time during
+ startup. Defaults to False.
+
The following are instance variables that are useful within test
methods:
@@ -1239,6 +1250,7 @@
config_file = 'zuul.conf'
run_ansible = False
+ create_project_keys = False
def _startMerger(self):
self.merge_server = zuul.merger.server.MergeServer(self.config,
@@ -1433,6 +1445,39 @@
project = reponame.replace('_', '/')
self.copyDirToRepo(project,
os.path.join(git_path, reponame))
+ self.setupAllProjectKeys()
+
+ def setupAllProjectKeys(self):
+ if self.create_project_keys:
+ return
+
+ path = self.config.get('zuul', 'tenant_config')
+ with open(os.path.join(FIXTURE_DIR, path)) as f:
+ tenant_config = yaml.safe_load(f.read())
+ for tenant in tenant_config:
+ sources = tenant['tenant']['source']
+ for source, conf in sources.items():
+ for project in conf.get('config-repos', []):
+ self.setupProjectKeys(source, project)
+ for project in conf.get('project-repos', []):
+ self.setupProjectKeys(source, project)
+
+ def setupProjectKeys(self, source, project):
+ # Make sure we set up an RSA key for the project so that we
+ # don't spend time generating one:
+
+ key_root = os.path.join(self.state_root, 'keys')
+ if not os.path.isdir(key_root):
+ os.mkdir(key_root, 0o700)
+ private_key_file = os.path.join(key_root, source, project + '.pem')
+ private_key_dir = os.path.dirname(private_key_file)
+ self.log.debug("Installing test keys for project %s at %s" % (
+ project, private_key_file))
+ if not os.path.isdir(private_key_dir):
+ os.makedirs(private_key_dir)
+ with open(os.path.join(FIXTURE_DIR, 'private.pem')) as i:
+ with open(private_key_file, 'w') as o:
+ o.write(i.read())
def setupZK(self):
self.zk_chroot_fixture = self.useFixture(ChrootedKazooFixture())
@@ -1468,6 +1513,22 @@
self.assertFalse(node['_lock'], "Node %s is locked" %
(node['_oid'],))
+ def assertNoGeneratedKeys(self):
+ # Make sure that Zuul did not generate any project keys
+ # (unless it was supposed to).
+
+ if self.create_project_keys:
+ return
+
+ with open(os.path.join(FIXTURE_DIR, 'private.pem')) as i:
+ test_key = i.read()
+
+ key_root = os.path.join(self.state_root, 'keys')
+ for root, dirname, files in os.walk(key_root):
+ for fn in files:
+ with open(os.path.join(root, fn)) as f:
+ self.assertEqual(test_key, f.read())
+
def assertFinalState(self):
# Make sure that git.Repo objects have been garbage collected.
repos = []
@@ -1479,6 +1540,7 @@
self.assertEqual(len(repos), 0)
self.assertEmptyQueues()
self.assertNodepoolState()
+ self.assertNoGeneratedKeys()
ipm = zuul.manager.independent.IndependentPipelineManager
for tenant in self.sched.abide.tenants.values():
for pipeline in tenant.layout.pipelines.values():
@@ -1845,6 +1907,7 @@
f.close()
self.config.set('zuul', 'tenant_config',
os.path.join(FIXTURE_DIR, f.name))
+ self.setupAllProjectKeys()
def addCommitToRepo(self, project, message, files,
branch='master', tag=None):
@@ -1873,11 +1936,18 @@
def commitLayoutUpdate(self, orig_name, source_name):
source_path = os.path.join(self.test_root, 'upstream',
- source_name, 'zuul.yaml')
- with open(source_path, 'r') as nt:
- before = self.addCommitToRepo(
- orig_name, 'Pulling content from %s' % source_name,
- {'zuul.yaml': nt.read()})
+ source_name)
+ to_copy = ['zuul.yaml']
+ for playbook in os.listdir(os.path.join(source_path, 'playbooks')):
+ to_copy.append('playbooks/{}'.format(playbook))
+ commit_data = {}
+ for source_file in to_copy:
+ source_file_path = os.path.join(source_path, source_file)
+ with open(source_file_path, 'r') as nt:
+ commit_data[source_file] = nt.read()
+ before = self.addCommitToRepo(
+ orig_name, 'Pulling content from %s' % source_name,
+ commit_data)
return before
def addEvent(self, connection, event):
@@ -1900,7 +1970,7 @@
this method.
:arg str connection: The name of the connection corresponding
- to the gerrit server.
+ to the gerrit server.
:arg str event: The JSON-encoded event.
"""
diff --git a/tests/encrypt_secret.py b/tests/encrypt_secret.py
new file mode 100644
index 0000000..b8524a0
--- /dev/null
+++ b/tests/encrypt_secret.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+import os
+
+from zuul.lib import encryption
+
+FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
+ 'fixtures')
+
+
+def main():
+ private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
+ with open(private_key_file, "rb") as f:
+ private_key, public_key = \
+ encryption.deserialize_rsa_keypair(f.read())
+
+ ciphertext = encryption.encrypt_pkcs1_oaep(sys.argv[1], public_key)
+ print(ciphertext.encode('base64'))
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
new file mode 100644
index 0000000..92c66d1
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
@@ -0,0 +1,15 @@
+- hosts: ubuntu-xenial
+ tasks:
+ - name: Assert nodepool variables are valid.
+ assert:
+ that:
+ - nodepool_az == 'test-az'
+ - nodepool_region == 'test-region'
+ - nodepool_provider == 'test-provider'
+
+ - name: Assert zuul-executor variables are valid.
+ assert:
+ that:
+ - zuul.executor.hostname is defined
+ - zuul.executor.src_root is defined
+ - zuul.executor.log_root is defined
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/nodepool.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/nodepool.yaml
deleted file mode 100644
index 9970dd7..0000000
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/nodepool.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-- hosts: ubuntu-xenial
- tasks:
- - name: Assert nodepool variables are valid.
- assert:
- that:
- - nodepool_az == 'test-az'
- - nodepool_region == 'test-region'
- - nodepool_provider == 'test-provider'
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
index 45acb87..3371a20 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
@@ -6,5 +6,8 @@
- copy:
src: "{{zuul._test.test_root}}/{{zuul.uuid}}.flag"
dest: "{{zuul._test.test_root}}/{{zuul.uuid}}.copied"
+ - copy:
+ content: "{{test_secret.username}} {{test_secret.password}}"
+ dest: "{{zuul._test.test_root}}/{{zuul.uuid}}.secrets"
roles:
- bare-role
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index 50f353d..0980bc1 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -2,6 +2,7 @@
name: check
manager: independent
source: gerrit
+ allow-secrets: true
trigger:
gerrit:
- event: patchset-created
@@ -34,6 +35,21 @@
verified: 0
precedence: high
+- secret:
+ name: test_secret
+ data:
+ username: test-username
+ password: !encrypted/pkcs1-oaep |
+ BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi1wGZZ
+ L0LveZjUN0t6OU1VZKSG8R5Ly7urjaSo1pPVIq5Rtt/H7W14Lecd+cUeKb4joeusC9drN3AA8a4o
+ ykcVpt1wVqUnTbMGC9ARMCQP6eopcs1l7tzMseprW4RDNhIuz3CRgd0QBMPl6VDoFgBPB8vxtJw+
+ 3m0rqBYZCLZgCXekqlny8s2s92nJMuUABbJOEcDRarzibDsSXsfJt1y+5n7yOURsC7lovMg4GF/v
+ Cl/0YMKjBO5bpv9EM5fToeKYyPGSKQoHOnCYceb3cAVcv5UawcCic8XjhEhp4K7WPdYf2HVAC/qt
+ xhbpjTxG4U5Q/SoppOJ60WqEkQvbXs6n5Dvy7xmph6GWmU/bAv3eUK3pdD3xa2Ue1lHWz3U+rsYr
+ aI+AKYsMYx3RBlfAmCeC1ve2BXPrqnOo7G8tnUvfdYPbK4Aakk0ds/AVqFHEZN+S6hRBmBjLaRFW
+ Z3QSO1NjbBxWnaHKZYT7nkrJm8AMCgZU0ZArFLpaufKCeiK5ECSsDxic4FIsY1OkWT42qEUfL0Wd
+ +150AKGNZpPJnnP3QYY4W/MWcKH/zdO400+zWN52WevbSqZy90tqKDJrBkMl1ydqbuw1E4ZHvIs=
+
- job:
name: python27
pre-run: pre
@@ -42,6 +58,9 @@
flagpath: '{{zuul._test.test_root}}/{{zuul.uuid}}.flag'
roles:
- zuul: bare-role
+ auth:
+ secrets:
+ - test_secret
- job:
parent: python27
@@ -50,7 +69,7 @@
- job:
parent: python27
- name: nodepool
+ name: check-vars
nodes:
- name: ubuntu-xenial
image: ubuntu-xenial
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index 24ba019..a2d9c6f 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -8,5 +8,5 @@
jobs:
- python27
- faillocal
- - nodepool
+ - check-vars
- timeout
diff --git a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
index d6f083d..60cd434 100644
--- a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
@@ -6,3 +6,7 @@
tenant-one-gate:
jobs:
- project-test1
+
+- semaphore:
+ name: test-semaphore
+ max: 1
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml
new file mode 100644
index 0000000..d18ed46
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml
@@ -0,0 +1,13 @@
+- pipeline:
+ name: check
+ manager: independent
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README b/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README b/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/playbooks/project1-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/playbooks/project1-test1.yaml
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml
new file mode 100644
index 0000000..5e377e7
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml
@@ -0,0 +1,13 @@
+- job:
+ name: project1-test1
+ semaphore: test-semaphore
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - project1-test1
+
+- semaphore:
+ name: test-semaphore
+ max: 1
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/playbooks/project2-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/playbooks/project2-test1.yaml
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml
new file mode 100644
index 0000000..a310532
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml
@@ -0,0 +1,13 @@
+- job:
+ name: project2-test1
+ semaphore: test-semaphore
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - project2-test1
+
+- semaphore:
+ name: test-semaphore
+ max: 2
diff --git a/tests/fixtures/config/multi-tenant-semaphore/main.yaml b/tests/fixtures/config/multi-tenant-semaphore/main.yaml
new file mode 100644
index 0000000..b1c47b1
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/main.yaml
@@ -0,0 +1,15 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-repos:
+ - common-config
+ - tenant-one-config
+
+- tenant:
+ name: tenant-two
+ source:
+ gerrit:
+ config-repos:
+ - common-config
+ - tenant-two-config
diff --git a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
index 47c173d..dff18de 100644
--- a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
@@ -151,6 +151,15 @@
- project:
name: org/project2
+ check:
+ jobs:
+ - project-merge
+ - project-test1:
+ dependencies: project-merge
+ - project-test2:
+ dependencies: project-merge
+ - project1-project2-integration:
+ dependencies: project-merge
gate:
queue: integrated
jobs:
diff --git a/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml
new file mode 100644
index 0000000..4010372
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml
@@ -0,0 +1,66 @@
+- pipeline:
+ name: check
+ manager: independent
+ ignore-dependencies: true
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- job:
+ name: project1-merge
+
+- job:
+ name: project1-test1
+
+- job:
+ name: project1-test2
+
+- job:
+ name: project2-merge
+
+- job:
+ name: project2-test1
+
+- job:
+ name: project2-test2
+
+- job:
+ name: project1-project2-integration
+ queue-name: integration
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - project1-merge
+ - project1-test1:
+ dependencies:
+ - project1-merge
+ - project1-test2:
+ dependencies:
+ - project1-merge
+ - project1-project2-integration:
+ dependencies:
+ - project1-merge
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - project2-merge
+ - project2-test1:
+ dependencies:
+ - project2-merge
+ - project2-test2:
+ dependencies:
+ - project2-merge
+ - project1-project2-integration:
+ dependencies:
+ - project2-merge
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-merge.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-merge.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-testfile.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/playbooks/project-testfile.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/zuul.yaml
new file mode 100644
index 0000000..a6d6599
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-live-reconfiguration-del-project/zuul.yaml
@@ -0,0 +1,42 @@
+- pipeline:
+ name: check
+ manager: independent
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- job:
+ name: project-merge
+ hold-following-changes: true
+
+- job:
+ name: project-test1
+
+- job:
+ name: project-test2
+
+- job:
+ name: project-testfile
+
+- project:
+ name: org/project
+ merge-mode: cherry-pick
+ check:
+ jobs:
+ - project-merge
+ - project-test1:
+ dependencies:
+ - project-merge
+ - project-test2:
+ dependencies:
+ - project-merge
+ - project-testfile:
+ dependencies:
+ - project-merge
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml b/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml
deleted file mode 100644
index f679dce..0000000
--- a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-- hosts: all
- tasks: []
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml
deleted file mode 100644
index bb92b7a..0000000
--- a/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-- pipeline:
- name: check
- manager: independent
- source: gerrit
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
-- job:
- name: project-test1
-
-- job:
- name: mutex-one
- mutex: test-mutex
-
-- job:
- name: mutex-two
- mutex: test-mutex
-
-- project:
- name: org/project
- check:
- jobs:
- - project-test1
- - mutex-one
- - mutex-two
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-no-jobs/playbooks/gate-noop.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
rename to tests/fixtures/config/single-tenant/git/layout-no-jobs/playbooks/gate-noop.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-no-jobs/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-no-jobs/zuul.yaml
new file mode 100644
index 0000000..5894440
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-no-jobs/zuul.yaml
@@ -0,0 +1,49 @@
+- pipeline:
+ name: check
+ manager: independent
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ source:
+ gerrit
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ start:
+ gerrit:
+ verified: 0
+ precedence: high
+
+- job:
+ name: gate-noop
+
+- project:
+ name: org/project
+ merge-mode: cherry-pick
+ check:
+ jobs:
+ - gate-noop
+ gate:
+ jobs:
+ - gate-noop
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-merge.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-merge.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test1.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
rename to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml
new file mode 100644
index 0000000..c4e00f6
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml
@@ -0,0 +1,47 @@
+- pipeline:
+ name: gate
+ manager: dependent
+ failure-message: Build failed. For information on how to proceed, see http://wiki.example.org/Test_Failures
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ start:
+ gerrit:
+ verified: 0
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ window: 2
+ window-floor: 1
+ window-increase-type: linear
+ window-increase-factor: 1
+ window-decrease-type: exponential
+ window-decrease-factor: 2
+
+- job:
+ name: project-merge
+
+- job:
+ name: project-test1
+
+- job:
+ name: project-test2
+
+- project:
+ name: org/project
+ gate:
+ jobs:
+ - project-merge
+ - project-test1:
+ dependencies:
+ - project-merge
+ - project-test2:
+ dependencies:
+ - project-merge
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/playbooks/project-test1.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml
rename to tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/zuul.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/zuul.yaml
rename to tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/zuul.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/project-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml
new file mode 100644
index 0000000..f935112
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml
@@ -0,0 +1,52 @@
+- pipeline:
+ name: check
+ manager: independent
+ source: gerrit
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- job:
+ name: project-test1
+
+- job:
+ name: semaphore-one-test1
+ semaphore: test-semaphore
+
+- job:
+ name: semaphore-one-test2
+ semaphore: test-semaphore
+
+- job:
+ name: semaphore-two-test1
+ semaphore: test-semaphore-two
+
+- job:
+ name: semaphore-two-test2
+ semaphore: test-semaphore-two
+
+- project:
+ name: org/project
+ check:
+ jobs:
+ - project-test1
+ - semaphore-one-test1
+ - semaphore-one-test2
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - project-test1
+ - semaphore-two-test1
+ - semaphore-two-test2
+
+- semaphore:
+ name: test-semaphore-two
+ max: 2
diff --git a/tests/fixtures/config/single-tenant/main.yaml b/tests/fixtures/config/single-tenant/main.yaml
index a22ed5c..d9868fa 100644
--- a/tests/fixtures/config/single-tenant/main.yaml
+++ b/tests/fixtures/config/single-tenant/main.yaml
@@ -4,3 +4,5 @@
gerrit:
config-repos:
- common-config
+ project-repos:
+ - org/project
diff --git a/tests/fixtures/config/success-url/git/common-config/zuul.yaml b/tests/fixtures/config/success-url/git/common-config/zuul.yaml
index f2d5251..b3ecf6d 100644
--- a/tests/fixtures/config/success-url/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/success-url/git/common-config/zuul.yaml
@@ -19,7 +19,7 @@
- job:
name: docs-draft-test
- success-url: http://docs-draft.example.org/{build.parameters[LOG_PATH]}/publish-docs/
+ success-url: http://docs-draft.example.org/{change.number:.2}/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.uuid:.7}/publish-docs/
- job:
name: docs-draft-test2
diff --git a/tests/fixtures/layout-ignore-dependencies.yaml b/tests/fixtures/layout-ignore-dependencies.yaml
deleted file mode 100644
index 5c0257c..0000000
--- a/tests/fixtures/layout-ignore-dependencies.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-pipelines:
- - name: check
- manager: IndependentPipelineManager
- ignore-dependencies: true
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
-projects:
- - name: org/project1
- check:
- - project1-merge:
- - project1-test1
- - project1-test2
- - project1-project2-integration
-
- - name: org/project2
- check:
- - project2-merge:
- - project2-test1
- - project2-test2
- - project1-project2-integration
diff --git a/tests/fixtures/layout-live-reconfiguration-del-project.yaml b/tests/fixtures/layout-live-reconfiguration-del-project.yaml
deleted file mode 100644
index 07ffb2e..0000000
--- a/tests/fixtures/layout-live-reconfiguration-del-project.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-pipelines:
- - name: check
- manager: IndependentPipelineManager
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
-projects:
- - name: org/project
- merge-mode: cherry-pick
- check:
- - project-merge:
- - project-test1
- - project-test2
- - project-testfile
diff --git a/tests/fixtures/layout-mutex-reconfiguration.yaml b/tests/fixtures/layout-mutex-reconfiguration.yaml
deleted file mode 100644
index 76cf1e9..0000000
--- a/tests/fixtures/layout-mutex-reconfiguration.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-pipelines:
- - name: check
- manager: IndependentPipelineManager
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
-jobs:
- - name: mutex-one
- mutex: test-mutex
- - name: mutex-two
- mutex: test-mutex
-
-projects:
- - name: org/project
- check:
- - project-test1
diff --git a/tests/fixtures/layout-no-jobs.yaml b/tests/fixtures/layout-no-jobs.yaml
deleted file mode 100644
index e860ad5..0000000
--- a/tests/fixtures/layout-no-jobs.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-includes:
- - python-file: custom_functions.py
-
-pipelines:
- - name: check
- manager: IndependentPipelineManager
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
- - name: gate
- manager: DependentPipelineManager
- failure-message: Build failed. For information on how to proceed, see http://wiki.example.org/Test_Failures
- trigger:
- gerrit:
- - event: comment-added
- approval:
- - approved: 1
- success:
- gerrit:
- verified: 2
- submit: true
- failure:
- gerrit:
- verified: -2
- start:
- gerrit:
- verified: 0
- precedence: high
-
-projects:
- - name: org/project
- merge-mode: cherry-pick
- check:
- - gate-noop
- gate:
- - gate-noop
diff --git a/tests/fixtures/layout-rate-limit.yaml b/tests/fixtures/layout-rate-limit.yaml
deleted file mode 100644
index 9f6748c..0000000
--- a/tests/fixtures/layout-rate-limit.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-pipelines:
- - name: gate
- manager: DependentPipelineManager
- failure-message: Build failed. For information on how to proceed, see http://wiki.example.org/Test_Failures
- trigger:
- gerrit:
- - event: comment-added
- approval:
- - approved: 1
- start:
- gerrit:
- verified: 0
- success:
- gerrit:
- verified: 2
- submit: true
- failure:
- gerrit:
- verified: -2
- window: 2
- window-floor: 1
- window-increase-type: linear
- window-increase-factor: 1
- window-decrease-type: exponential
- window-decrease-factor: 2
-
-projects:
- - name: org/project
- gate:
- - project-merge:
- - project-test1
- - project-test2
diff --git a/tests/fixtures/private.pem b/tests/fixtures/private.pem
new file mode 100644
index 0000000..fa709b6
--- /dev/null
+++ b/tests/fixtures/private.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAsGqZLUUwV/EZJKddMS206mH7qYmqYhWLo/TUlpDt2JuEaBqC
+YV8mF9LsjpoqM/Pp0U/r5aQLDUXbRLDn+K+NqbvTJajYxHJicP1CAWg1eKUNZjUa
+ya5HP4Ow1hS7AeiF4TSRdiwtHT/gJO2NSsavyc30/meKt0WBgbYlrBB81HEQjYWn
+ajf/4so5E8DdrC9tAqmmzde1qcTz7ULouIz53hjp/U3yVMFbpawv194jzHvddmAX
+3aEUByx2t6lP7dhOAEIEmzmh15hRbacxQI5aYWv+ZR0z9PqdwwD+DBbb1AwiX5MJ
+jtIoVCmkEZvcUFiDicyteNMCa5ulpj2SF0oH4MlialOP6MiJnmxklDYO07AM/qom
+cU55pCD8ctu1yD/UydecLk0Uj/9XxqmPQJFEcstdXJZQfr5ZNnChOEg6oQ9UImWj
+av8HQsA6mFW1oAKbDMrgEewooWriqGW5pYtR7JBfph6Mt5HGaeH4uqYpb1fveHG1
+ODa7HBnlNo3qMendBb2wzHGCgtUgWnGfp24TsUOUlndCXYhsYbOZbCTW5GwElK0G
+ri06KPpybY43AIaxcxqilVh5Eapmq7axBm4ZzbTOfv15L0FIemEGgpnklevQbZNL
+IrcE0cS/13qJUvFaYX4yjrtEnzZ3ntjXrpFdgLPBKn7Aqf6lWz6BPi07axECAwEA
+AQKCAgEAkoPltYhZ7x+ojx2Es1xPfb1kwlg4Ln/QWpnymR3Cu3vlioRBtlbMj0q4
+9nIpDL7NeO4Ub8M+/oX+5ly6O3qpf8cjRIqnhPeutEJRuFNw3ULPDwyZs9hPCfv4
+OMQ80AfqcLA1At0Lltg+8sxr5SeARW0MxOD/fth2B2FchjunQNSqN69B7GCX3yWu
+I66xK9izg1uc0iYNlPKi13ETUHqc5ozwgFRlJ2jzEXQgw/qU5rYUpsSF7aZiuNZ/
+vmcan+FeXq51nulNdX3mWthZelD/1RtYy2dmiFZAAf1oAGhXqBNv1MqMTJZTshpn
+TcyRPBVXIXHgvJEa2H4LJDbMhxUP1opJ+Vxa8Cy6I60O8TwPBHwL83K5oH4yugun
+AP2hWZxFMK9YcVliJwt3Mjozuh5vCRF9+7oqi0fASuhOY+eYNQAtcPK9WBti6qmN
+hUO4bdx+r+UEb8TliUDH+x5lNmKc2pgptYS+O8+oB2vh2V7e0mwvc3jg4S7E5Ukm
+y4Y9JS0c4q352W0lrfPCDYwzXEpK8mmCjvBC/w320Yi2HJwqkfYQThgEbzOP37dW
+Ei+0+cu6RuA4H+1DozkrWybFw6Ju12IE4vfbliyht1yuj0+/Rpevp1KpFKuy5xSB
+1Jq3lGxTFDGle7nRBc2JwfIu63texnmvTwKlx1+w0tqpY/gVZhUCggEBAOAzVHum
+luqKVewWT8yR4mZx4jiWdxLch3Q+scMq2mthQ5773Of0P2r45iJz7jDS7fT0yuRF
+gBpqygX42xe+wqJleKAzKyMQ9aWtYRszfCz6Ob9kLTtoi0/Xuo5dMyg41BRHAatr
+acj9NXBEvRS4oNKw3nxEVayBjvYN5LwLAzGNorXCkt9E+72eWJU6eg0CQQxwI2rG
+f/S+niMtLDWfayHPu7KBKRVlUu1kI07JF1eSJmsHBcTN1+CaXuN82Ty+ucdtjRWR
+5FyLZxaceLGrY5so87pH7kcBB2+H7ovuash7g+CT3XyDcQACWTjTszIpt6fGO6ux
+7Tea5/OOLaJiaI8CggEBAMlwPPW3HQzC6dqwBVNgVYQh9ZEoygKOWNMPNE1TuqUU
+boJLazQI5Qd/qm17otAnDrIX7cEB/+6xiQPZkw6lsqdzGHNBSXb8OPYvLDBHq2oR
+oNjdW4/c5znBL3ExXqEJIHAl9FWc5YLRvboHwtkKCpK5mdlZyoMVsBX62IFodAhK
+a8oQiLvYjOwFOay3sOMdhc+ndupw7b9MaAsbe1w7DW3Y7I/bHstxiriDfuTI/nt7
+MPZBzj9afqWHEJ3TWwuJ1IuUhHupf9ylA06GfBgerWSlp90yVfbZNQDljtdNwIZW
+oBLF6EhZxh6ka8iodeS4cduxEV3BoofMXjIjVReCgl8CggEBALSwabwl7Kclyk21
+RabnRAGwctOMYHbxCLHk/Tr/xHyaLPdqoQTH0nySEFdf+22Z8XFkAEiswquHuT3K
+7Dhc41wiT289Ddz7BB78drCHc+KD4Bqhz9p7TRuSD6ZA8sPN2Q5mk6/lp6H2gCT1
+ITYb/nEPXp/kKvAWknM3i0sJzQ8YyTOXluseG40cmuPZ9xeY43f0wHaDeAh1v9k1
+xNWKn7rmQq2Abu3xdT4hYFtUsd0/ynqjdEDCbON1Rlgs/J96Txus7PGfXN5A81pD
+zPnT2TjpblSJOD49VBLNCLH5+lGNSiGqyexZuq55NhMYeulIud0bZGfhw/72d03R
+HnIqwX0CggEBAKiKglbMuT+eLfBN6obSSXretwqXaD4vP96IECjK75WDvNrDo5TM
+BGT7ymsEUTt8Em2sW79rnunmHU/dUY+l0A8O29xDOeaWLkq9OWnD7YY37a7FtwBt
+wgGuw7Ufq59tdXigKQkg119XgjkOmVbjcelF5ZXX7Ps0wDoDwfa0oLD3I6zTnLQf
+AfnQfWsn3paIcxdFdNe/WQ0ALuVsPxDyT9Ai+ft7SQ7Ll1e+ngNqsJI8hsDkWl7j
+pqd0lNCYsMq8rduDjj2xmkvQvS2MlHPR5x4ZBJSsswRwxEpVx+gZJAbCn/hVIn62
+rm+g/pXLbajLMmiwhGk/xG9+7SliKqYbCl0CggEATQtwqAVPdwzT5XaRS1CeLId5
+sZD8mP5WLBKas69nfISilcUKqJjqTTqxfXs60wOK3/r43B+7QLitfPLRqf0hRQT9
+6HQG1YGx1FfZwgsP5SJKpAGGjenhsSTwpMJJI5s2I2e1O01frF2qEodqmRUwHXbh
+rGXqzAHLieaBzHjSvS2Z4kGVu6ZbpRXSNTSiiF+z8O9PCahzNFrC/ty+lbtxcqhf
+wHttEccW1TmiuB9GD23NI96zLsjZALvdqpvHMf5OHiDdLmI+Ap7qlR04V3bDDzF4
+B6HR6bRxVZQQWaEwE1RfuDgj5Msrbcgq0yFayPvXGiIIrAUWkUUQVsUU/TOfBQ==
+-----END RSA PRIVATE KEY-----
diff --git a/tests/fixtures/public.pem b/tests/fixtures/public.pem
new file mode 100644
index 0000000..33a78c4
--- /dev/null
+++ b/tests/fixtures/public.pem
@@ -0,0 +1,14 @@
+-----BEGIN PUBLIC KEY-----
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsGqZLUUwV/EZJKddMS20
+6mH7qYmqYhWLo/TUlpDt2JuEaBqCYV8mF9LsjpoqM/Pp0U/r5aQLDUXbRLDn+K+N
+qbvTJajYxHJicP1CAWg1eKUNZjUaya5HP4Ow1hS7AeiF4TSRdiwtHT/gJO2NSsav
+yc30/meKt0WBgbYlrBB81HEQjYWnajf/4so5E8DdrC9tAqmmzde1qcTz7ULouIz5
+3hjp/U3yVMFbpawv194jzHvddmAX3aEUByx2t6lP7dhOAEIEmzmh15hRbacxQI5a
+YWv+ZR0z9PqdwwD+DBbb1AwiX5MJjtIoVCmkEZvcUFiDicyteNMCa5ulpj2SF0oH
+4MlialOP6MiJnmxklDYO07AM/qomcU55pCD8ctu1yD/UydecLk0Uj/9XxqmPQJFE
+cstdXJZQfr5ZNnChOEg6oQ9UImWjav8HQsA6mFW1oAKbDMrgEewooWriqGW5pYtR
+7JBfph6Mt5HGaeH4uqYpb1fveHG1ODa7HBnlNo3qMendBb2wzHGCgtUgWnGfp24T
+sUOUlndCXYhsYbOZbCTW5GwElK0Gri06KPpybY43AIaxcxqilVh5Eapmq7axBm4Z
+zbTOfv15L0FIemEGgpnklevQbZNLIrcE0cS/13qJUvFaYX4yjrtEnzZ3ntjXrpFd
+gLPBKn7Aqf6lWz6BPi07axECAwEAAQ==
+-----END PUBLIC KEY-----
diff --git a/tests/fixtures/zuul-connections-multiple-gerrits.conf b/tests/fixtures/zuul-connections-multiple-gerrits.conf
index b3182d7..d1522ec 100644
--- a/tests/fixtures/zuul-connections-multiple-gerrits.conf
+++ b/tests/fixtures/zuul-connections-multiple-gerrits.conf
@@ -3,7 +3,6 @@
[zuul]
tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
job_name_in_report=true
[merger]
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
index 6156df4..8ddd0f1 100644
--- a/tests/fixtures/zuul-connections-same-gerrit.conf
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -3,7 +3,6 @@
[zuul]
tenant_config=config/zuul-connections-same-gerrit/main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
job_name_in_report=true
[merger]
diff --git a/tests/fixtures/zuul-git-driver.conf b/tests/fixtures/zuul-git-driver.conf
index 0a4e230..499b564 100644
--- a/tests/fixtures/zuul-git-driver.conf
+++ b/tests/fixtures/zuul-git-driver.conf
@@ -3,7 +3,6 @@
[zuul]
tenant_config=config/zuul-connections-same-gerrit/main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
job_name_in_report=true
[merger]
diff --git a/tests/fixtures/zuul-sql-driver-bad.conf b/tests/fixtures/zuul-sql-driver-bad.conf
index d91e2f6..a4df735 100644
--- a/tests/fixtures/zuul-sql-driver-bad.conf
+++ b/tests/fixtures/zuul-sql-driver-bad.conf
@@ -2,8 +2,7 @@
server=127.0.0.1
[zuul]
-tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+layout_config=layout-connections-multiple-voters.yaml
job_name_in_report=true
[merger]
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index ce29310..cd80a45 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -3,7 +3,6 @@
[zuul]
tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
job_name_in_report=true
[merger]
diff --git a/tests/make_playbooks.py b/tests/make_playbooks.py
index 17acba8..93c37bc 100755
--- a/tests/make_playbooks.py
+++ b/tests/make_playbooks.py
@@ -14,7 +14,7 @@
import os
-import yaml
+from zuul.lib import yamlutil as yaml
FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
'fixtures')
diff --git a/tests/nodepool/test_nodepool_integration.py b/tests/nodepool/test_nodepool_integration.py
index 67968a3..2c9a9b3 100644
--- a/tests/nodepool/test_nodepool_integration.py
+++ b/tests/nodepool/test_nodepool_integration.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+import socket
import time
from unittest import skip
@@ -30,9 +30,9 @@
def setUp(self):
super(BaseTestCase, self).setUp()
- self.zk_config = zuul.zk.ZooKeeperConnectionConfig('localhost')
self.zk = zuul.zk.ZooKeeper()
- self.zk.connect([self.zk_config])
+ self.zk.connect('localhost:2181')
+ self.hostname = socket.gethostname()
self.provisioned_requests = []
# This class implements the scheduler methods zuul.nodepool
diff --git a/tests/unit/test_encryption.py b/tests/unit/test_encryption.py
new file mode 100644
index 0000000..4dda78b
--- /dev/null
+++ b/tests/unit/test_encryption.py
@@ -0,0 +1,69 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import subprocess
+import tempfile
+
+from zuul.lib import encryption
+
+from tests.base import BaseTestCase
+
+
+class TestEncryption(BaseTestCase):
+
+ def setUp(self):
+ super(TestEncryption, self).setUp()
+ self.private, self.public = encryption.generate_rsa_keypair()
+
+ def test_serialization(self):
+ "Verify key serialization"
+ pem_private = encryption.serialize_rsa_private_key(self.private)
+ private2, public2 = encryption.deserialize_rsa_keypair(pem_private)
+
+ # cryptography public / private key objects don't implement
+ # equality testing, so we make sure they have the same numbers.
+ self.assertEqual(self.private.private_numbers(),
+ private2.private_numbers())
+ self.assertEqual(self.public.public_numbers(),
+ public2.public_numbers())
+
+ def test_pkcs1_oaep(self):
+ "Verify encryption and decryption"
+ orig_plaintext = "some text to encrypt"
+ ciphertext = encryption.encrypt_pkcs1_oaep(orig_plaintext, self.public)
+ plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
+ self.assertEqual(orig_plaintext, plaintext)
+
+ def test_openssl_pkcs1_oaep(self):
+ "Verify that we can decrypt something encrypted with OpenSSL"
+ orig_plaintext = "some text to encrypt"
+ pem_public = encryption.serialize_rsa_public_key(self.public)
+ public_file = tempfile.NamedTemporaryFile(delete=False)
+ try:
+ public_file.write(pem_public)
+ public_file.close()
+
+ p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
+ '-oaep', '-pubin', '-inkey',
+ public_file.name],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate(orig_plaintext)
+ ciphertext = stdout
+ finally:
+ os.unlink(public_file.name)
+
+ plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
+ self.assertEqual(orig_plaintext, plaintext)
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index ee7c6ab..2167a3b 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -18,19 +18,37 @@
import fixtures
import testtools
-import yaml
from zuul import model
from zuul import configloader
+from zuul.lib import encryption
+from zuul.lib import yamlutil as yaml
-from tests.base import BaseTestCase
+from tests.base import BaseTestCase, FIXTURE_DIR
+
+
+class FakeSource(object):
+ def __init__(self, name):
+ self.name = name
class TestJob(BaseTestCase):
def setUp(self):
super(TestJob, self).setUp()
- self.project = model.Project('project', None)
+ self.tenant = model.Tenant('tenant')
+ self.layout = model.Layout()
+ self.project = model.Project('project', 'connection')
+ self.source = FakeSource('connection')
+ self.tenant.addProjectRepo(self.source, self.project)
+ self.pipeline = model.Pipeline('gate', self.layout)
+ self.layout.addPipeline(self.pipeline)
+ self.queue = model.ChangeQueue(self.pipeline)
+
+ private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
+ with open(private_key_file, "rb") as f:
+ self.project.private_key, self.project.public_key = \
+ encryption.deserialize_rsa_keypair(f.read())
self.context = model.SourceContext(self.project, 'master',
'test', True)
self.start_mark = yaml.Mark('name', 0, 0, 0, '', 0)
@@ -73,7 +91,7 @@
base.pre_run = [base_pre]
base.run = [base_run]
base.post_run = [base_post]
- base.auth = dict(foo='bar', inherit=False)
+ base.auth = model.AuthContext()
py27 = model.Job('py27')
self.assertEqual(None, py27.timeout)
@@ -85,7 +103,7 @@
[x.path for x in py27.run])
self.assertEqual(['base-post'],
[x.path for x in py27.post_run])
- self.assertEqual({}, py27.auth)
+ self.assertEqual(None, py27.auth)
def test_job_variants(self):
# This simulates freezing a job.
@@ -99,7 +117,8 @@
py27.pre_run = [py27_pre]
py27.run = [py27_run]
py27.post_run = [py27_post]
- auth = dict(foo='bar', inherit=False)
+ auth = model.AuthContext()
+ auth.secrets.append('foo')
py27.auth = auth
job = py27.copy()
@@ -302,6 +321,29 @@
tenant = model.Tenant('tenant')
layout = model.Layout()
+ conf = yaml.safe_load('''
+- secret:
+ name: pypi-credentials
+ data:
+ username: test-username
+ password: !encrypted/pkcs1-oaep |
+ BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi1wGZZ
+ L0LveZjUN0t6OU1VZKSG8R5Ly7urjaSo1pPVIq5Rtt/H7W14Lecd+cUeKb4joeusC9drN3AA8a4o
+ ykcVpt1wVqUnTbMGC9ARMCQP6eopcs1l7tzMseprW4RDNhIuz3CRgd0QBMPl6VDoFgBPB8vxtJw+
+ 3m0rqBYZCLZgCXekqlny8s2s92nJMuUABbJOEcDRarzibDsSXsfJt1y+5n7yOURsC7lovMg4GF/v
+ Cl/0YMKjBO5bpv9EM5fToeKYyPGSKQoHOnCYceb3cAVcv5UawcCic8XjhEhp4K7WPdYf2HVAC/qt
+ xhbpjTxG4U5Q/SoppOJ60WqEkQvbXs6n5Dvy7xmph6GWmU/bAv3eUK3pdD3xa2Ue1lHWz3U+rsYr
+ aI+AKYsMYx3RBlfAmCeC1ve2BXPrqnOo7G8tnUvfdYPbK4Aakk0ds/AVqFHEZN+S6hRBmBjLaRFW
+ Z3QSO1NjbBxWnaHKZYT7nkrJm8AMCgZU0ZArFLpaufKCeiK5ECSsDxic4FIsY1OkWT42qEUfL0Wd
+ +150AKGNZpPJnnP3QYY4W/MWcKH/zdO400+zWN52WevbSqZy90tqKDJrBkMl1ydqbuw1E4ZHvIs=
+''')[0]['secret']
+
+ conf['_source_context'] = self.context
+ conf['_start_mark'] = self.start_mark
+
+ secret = configloader.SecretParser.fromYaml(layout, conf)
+ layout.addSecret(secret)
+
base = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': self.context,
'_start_mark': self.start_mark,
@@ -378,11 +420,11 @@
})
layout.addJob(in_repo_job_with_inherit_false)
- self.assertNotIn('auth', in_repo_job_without_inherit.auth)
- self.assertIn('secrets', in_repo_job_with_inherit.auth)
- self.assertEquals(in_repo_job_with_inherit.auth['secrets'],
- ['pypi-credentials'])
- self.assertNotIn('auth', in_repo_job_with_inherit_false.auth)
+ self.assertEqual(None, in_repo_job_without_inherit.auth)
+ self.assertEqual(1, len(in_repo_job_with_inherit.auth.secrets))
+ self.assertEqual(in_repo_job_with_inherit.auth.secrets[0].name,
+ 'pypi-credentials')
+ self.assertEqual(None, in_repo_job_with_inherit_false.auth)
def test_job_inheritance_job_tree(self):
tenant = model.Tenant('tenant')
@@ -537,6 +579,80 @@
"to shadow job base in base_project"):
layout.addJob(base2)
+ def test_job_allowed_projects(self):
+ job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+ '_source_context': self.context,
+ '_start_mark': self.start_mark,
+ 'name': 'job',
+ 'allowed-projects': ['project'],
+ })
+ self.layout.addJob(job)
+
+ project2 = model.Project('project2', None)
+ context2 = model.SourceContext(project2, 'master',
+ 'test', True)
+
+ project2_config = configloader.ProjectParser.fromYaml(
+ self.tenant, self.layout, [{
+ '_source_context': context2,
+ '_start_mark': self.start_mark,
+ 'name': 'project2',
+ 'gate': {
+ 'jobs': [
+ 'job'
+ ]
+ }
+ }]
+ )
+ self.layout.addProjectConfig(project2_config)
+
+ change = model.Change(project2)
+ # Test master
+ change.branch = 'master'
+ item = self.queue.enqueueChange(change)
+ item.current_build_set.layout = self.layout
+ with testtools.ExpectedException(
+ Exception,
+ "Project project2 is not allowed to run job job"):
+ item.freezeJobGraph()
+
+ def test_job_pipeline_allow_secrets(self):
+ self.pipeline.allow_secrets = False
+ job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+ '_source_context': self.context,
+ '_start_mark': self.start_mark,
+ 'name': 'job',
+ })
+ auth = model.AuthContext()
+ auth.secrets.append('foo')
+ job.auth = auth
+
+ self.layout.addJob(job)
+
+ project_config = configloader.ProjectParser.fromYaml(
+ self.tenant, self.layout, [{
+ '_source_context': self.context,
+ '_start_mark': self.start_mark,
+ 'name': 'project',
+ 'gate': {
+ 'jobs': [
+ 'job'
+ ]
+ }
+ }]
+ )
+ self.layout.addProjectConfig(project_config)
+
+ change = model.Change(self.project)
+ # Test master
+ change.branch = 'master'
+ item = self.queue.enqueueChange(change)
+ item.current_build_set.layout = self.layout
+ with testtools.ExpectedException(
+ Exception,
+ "Pipeline gate does not allow jobs with secrets"):
+ item.freezeJobGraph()
+
class TestJobTimeData(BaseTestCase):
def setUp(self):
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index a923ff1..e8954df 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -15,6 +15,8 @@
# under the License.
import json
+import textwrap
+
import os
import re
import shutil
@@ -1950,28 +1952,25 @@
self.assertReportedStat('test-timing', '3|ms')
self.assertReportedStat('test-gauge', '12|g')
- @skip("Disabled for early v3 development")
def test_stuck_job_cleanup(self):
"Test that pending jobs are cleaned up if removed from layout"
- # This job won't be registered at startup because it is not in
- # the standard layout, but we need it to already be registerd
- # for when we reconfigure, as that is when Zuul will attempt
- # to run the new job.
- self.worker.registerFunction('build:gate-noop')
+
+ # We want to hold the project-merge job that the fake change enqueues
self.gearman_server.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('code-review', 2)
self.fake_gerrit.addEvent(A.addApproval('approved', 1))
self.waitUntilSettled()
+ # The assertion is that we have one job in the queue, project-merge
self.assertEqual(len(self.gearman_server.getQueue()), 1)
- self.updateConfigLayout(
- 'tests/fixtures/layout-no-jobs.yaml')
+ self.commitLayoutUpdate('common-config', 'layout-no-jobs')
self.sched.reconfigure(self.config)
self.waitUntilSettled()
self.gearman_server.release('gate-noop')
self.waitUntilSettled()
+ # asserting that project-merge is removed from queue
self.assertEqual(len(self.gearman_server.getQueue()), 0)
self.assertTrue(self.sched._areAllBuildsComplete())
@@ -2180,66 +2179,68 @@
self.assertEqual('https://server/job/project-test2/0/',
status_jobs[2]['report_url'])
- @skip("Disabled for early v3 development")
- def test_merging_queues(self):
- "Test that transitively-connected change queues are merged"
- self.updateConfigLayout(
- 'tests/fixtures/layout-merge-queues.yaml')
+ def test_semaphore_one(self):
+ "Test semaphores with max=1 (mutex)"
+ self.updateConfigLayout('layout-semaphore')
self.sched.reconfigure(self.config)
- self.assertEqual(len(self.sched.layout.pipelines['gate'].queues), 1)
- def test_mutex(self):
- "Test job mutexes"
- self.updateConfigLayout('layout-mutex')
- self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
self.executor_server.hold_jobs_in_build = True
+
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
+
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
- self.assertEqual(self.builds[1].name, 'mutex-one')
+ self.assertEqual(self.builds[1].name, 'semaphore-one-test1')
self.assertEqual(self.builds[2].name, 'project-test1')
- self.executor_server.release('mutex-one')
+ self.executor_server.release('semaphore-one-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
- self.assertEqual(self.builds[2].name, 'mutex-two')
- self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+ self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
- self.executor_server.release('mutex-two')
+ self.executor_server.release('semaphore-one-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
- self.assertEqual(self.builds[2].name, 'mutex-one')
- self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+ self.assertEqual(self.builds[2].name, 'semaphore-one-test1')
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
- self.executor_server.release('mutex-one')
+ self.executor_server.release('semaphore-one-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
- self.assertEqual(self.builds[2].name, 'mutex-two')
- self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+ self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
- self.executor_server.release('mutex-two')
+ self.executor_server.release('semaphore-one-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
@@ -2249,25 +2250,115 @@
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
- def test_mutex_abandon(self):
- "Test abandon with job mutexes"
- self.updateConfigLayout('layout-mutex')
+ def test_semaphore_two(self):
+ "Test semaphores with max>1"
+ self.updateConfigLayout('layout-semaphore')
self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
+
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+ self.assertFalse('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 4)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'semaphore-two-test1')
+ self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
+ self.assertEqual(self.builds[3].name, 'project-test1')
+ self.assertTrue('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore-two', [])), 2)
+
+ self.executor_server.release('semaphore-two-test1')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 4)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'semaphore-two-test2')
+ self.assertEqual(self.builds[2].name, 'project-test1')
+ self.assertEqual(self.builds[3].name, 'semaphore-two-test1')
+ self.assertTrue('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore-two', [])), 2)
+
+ self.executor_server.release('semaphore-two-test2')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 4)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'project-test1')
+ self.assertEqual(self.builds[2].name, 'semaphore-two-test1')
+ self.assertEqual(self.builds[3].name, 'semaphore-two-test2')
+ self.assertTrue('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore-two', [])), 2)
+
+ self.executor_server.release('semaphore-two-test1')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 3)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'project-test1')
+ self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
+ self.assertTrue('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore-two', [])), 1)
+
+ self.executor_server.release('semaphore-two-test2')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 2)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'project-test1')
+ self.assertFalse('test-semaphore-two' in
+ tenant.semaphore_handler.semaphores)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+
+ self.waitUntilSettled()
+ self.assertEqual(len(self.builds), 0)
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(B.reported, 1)
+
+ def test_semaphore_abandon(self):
+ "Test abandon with job semaphores"
+ self.updateConfigLayout('layout-semaphore')
+ self.sched.reconfigure(self.config)
+
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
+
self.executor_server.hold_jobs_in_build = True
tenant = self.sched.abide.tenants.get('openstack')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
@@ -2276,31 +2367,47 @@
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
- # The mutex should be released
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ # The semaphore should be released
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
- def test_mutex_reconfigure(self):
- "Test reconfigure with job mutexes"
- self.updateConfigLayout('layout-mutex')
+ def test_semaphore_reconfigure(self):
+ "Test reconfigure with job semaphores"
+ self.updateConfigLayout('layout-semaphore')
self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
+
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
- self.updateConfigLayout('layout-mutex-reconfiguration')
+ # reconfigure without layout change
self.sched.reconfigure(self.config)
self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
+
+ # semaphore still must be held
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+
+ self.updateConfigLayout('layout-semaphore-reconfiguration')
+ self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('openstack')
self.executor_server.release('project-test1')
self.waitUntilSettled()
@@ -2308,8 +2415,9 @@
# There should be no builds anymore
self.assertEqual(len(self.builds), 0)
- # The mutex should be released
- self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+ # The semaphore should be released
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
def test_live_reconfiguration(self):
"Test that live reconfiguration works"
@@ -2611,7 +2719,6 @@
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
- @skip("Disabled for early v3 development")
def test_live_reconfiguration_del_project(self):
# Test project deletion from layout
# while changes are enqueued
@@ -2634,14 +2741,14 @@
self.assertEqual(len(self.builds), 5)
# This layout defines only org/project, not org/project1
- self.updateConfigLayout(
- 'tests/fixtures/layout-live-reconfiguration-del-project.yaml')
+ self.commitLayoutUpdate('common-config',
+ 'layout-live-reconfiguration-del-project')
self.sched.reconfigure(self.config)
self.waitUntilSettled()
# Builds for C aborted, builds for A succeed,
# and have change B applied ahead
- job_c = self.getJobFromHistory('project1-test1')
+ job_c = self.getJobFromHistory('project-test1')
self.assertEqual(job_c.changes, '3,1')
self.assertEqual(job_c.result, 'ABORTED')
@@ -2649,8 +2756,9 @@
self.executor_server.release()
self.waitUntilSettled()
- self.assertEqual(self.getJobFromHistory('project-test1').changes,
- '2,1 1,1')
+ self.assertEqual(
+ self.getJobFromHistory('project-test1', 'org/project').changes,
+ '2,1 1,1')
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
@@ -2659,40 +2767,11 @@
self.assertEqual(B.reported, 0)
self.assertEqual(C.reported, 0)
- self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
@skip("Disabled for early v3 development")
- def test_live_reconfiguration_functions(self):
- "Test live reconfiguration with a custom function"
- self.worker.registerFunction('build:node-project-test1:debian')
- self.worker.registerFunction('build:node-project-test1:wheezy')
- A = self.fake_gerrit.addFakeChange('org/node-project', 'master', 'A')
- A.addApproval('code-review', 2)
- self.fake_gerrit.addEvent(A.addApproval('approved', 1))
- self.waitUntilSettled()
-
- self.assertIsNone(self.getJobFromHistory('node-project-merge').node)
- self.assertEqual(self.getJobFromHistory('node-project-test1').node,
- 'debian')
- self.assertIsNone(self.getJobFromHistory('node-project-test2').node)
-
- self.updateConfigLayout(
- 'tests/fixtures/layout-live-reconfiguration-functions.yaml')
- self.sched.reconfigure(self.config)
- self.worker.build_history = []
-
- B = self.fake_gerrit.addFakeChange('org/node-project', 'master', 'B')
- B.addApproval('code-review', 2)
- self.fake_gerrit.addEvent(B.addApproval('approved', 1))
- self.waitUntilSettled()
-
- self.assertIsNone(self.getJobFromHistory('node-project-merge').node)
- self.assertEqual(self.getJobFromHistory('node-project-test1').node,
- 'wheezy')
- self.assertIsNone(self.getJobFromHistory('node-project-test2').node)
-
- @skip("Disabled for early v3 development")
def test_delayed_repo_init(self):
self.updateConfigLayout(
'tests/fixtures/layout-delayed-repo-init.yaml')
@@ -2814,7 +2893,6 @@
for q in p['change_queues']:
for head in q['heads']:
for change in head:
- self.assertEqual(change['id'], None)
for job in change['jobs']:
status_jobs.add(job['name'])
self.assertIn('project-bitrot-stable-old', status_jobs)
@@ -3242,11 +3320,9 @@
self.executor_server.release()
self.waitUntilSettled()
- @skip("Disabled for early v3 development")
def test_queue_rate_limiting(self):
"Test that DependentPipelines are rate limited with dep across window"
- self.updateConfigLayout(
- 'tests/fixtures/layout-rate-limit.yaml')
+ self.updateConfigLayout('layout-rate-limit')
self.sched.reconfigure(self.config)
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -3287,7 +3363,8 @@
self.executor_server.release('project-.*')
self.waitUntilSettled()
- queue = self.sched.layout.pipelines['gate'].queues[0]
+ tenant = self.sched.abide.tenants.get('openstack')
+ queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
self.assertEqual(queue.window_floor, 1)
@@ -3334,11 +3411,9 @@
self.assertEqual(queue.window_floor, 1)
self.assertEqual(C.data['status'], 'MERGED')
- @skip("Disabled for early v3 development")
def test_queue_rate_limiting_dependent(self):
"Test that DependentPipelines are rate limited with dep in window"
- self.updateConfigLayout(
- 'tests/fixtures/layout-rate-limit.yaml')
+ self.updateConfigLayout('layout-rate-limit')
self.sched.reconfigure(self.config)
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -3380,7 +3455,8 @@
self.executor_server.release('project-.*')
self.waitUntilSettled()
- queue = self.sched.layout.pipelines['gate'].queues[0]
+ tenant = self.sched.abide.tenants.get('openstack')
+ queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
self.assertEqual(queue.window_floor, 1)
@@ -4126,13 +4202,10 @@
self.init_repo("org/unknown")
self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
- @skip("Disabled for early v3 development")
def test_crd_check_ignore_dependencies(self):
"Test cross-repo dependencies can be ignored"
- self.updateConfigLayout(
- 'tests/fixtures/layout-ignore-dependencies.yaml')
+ self.updateConfigLayout('layout-ignore-dependencies')
self.sched.reconfigure(self.config)
- self.registerJobs()
self.gearman_server.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
@@ -4151,7 +4224,8 @@
# Make sure none of the items share a change queue, and all
# are live.
- check_pipeline = self.sched.layout.pipelines['check']
+ tenant = self.sched.abide.tenants.get('openstack')
+ check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.queues), 3)
self.assertEqual(len(check_pipeline.getAllItems()), 3)
for item in check_pipeline.getAllItems():
@@ -4172,7 +4246,6 @@
for job in self.history:
self.assertEqual(len(job.changes.split()), 1)
- @skip("Disabled for early v3 development")
def test_crd_check_transitive(self):
"Test transitive cross-repo dependencies"
# Specifically, if A -> B -> C, and C gets a new patchset and
@@ -4915,3 +4988,239 @@
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
+
+
+class TestSemaphoreMultiTenant(ZuulTestCase):
+ tenant_config_file = 'config/multi-tenant-semaphore/main.yaml'
+
+ def test_semaphore_tenant_isolation(self):
+ "Test semaphores in multiple tenants"
+
+ self.waitUntilSettled()
+ tenant_one = self.sched.abide.tenants.get('tenant-one')
+ tenant_two = self.sched.abide.tenants.get('tenant-two')
+
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+ C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
+ D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
+ E = self.fake_gerrit.addFakeChange('org/project2', 'master', 'E')
+ self.assertFalse('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertFalse('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+
+ # add patches to project1 of tenant-one
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # one build of project1-test1 must run
+ # semaphore of tenant-one must be acquired once
+ # semaphore of tenant-two must not be acquired
+ self.assertEqual(len(self.builds), 1)
+ self.assertEqual(self.builds[0].name, 'project1-test1')
+ self.assertTrue('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+ self.assertFalse('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+
+ # add patches to project2 of tenant-two
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # one build of project1-test1 must run
+ # two builds of project2-test1 must run
+ # semaphore of tenant-one must be acquired once
+ # semaphore of tenant-two must be acquired twice
+ self.assertEqual(len(self.builds), 3)
+ self.assertEqual(self.builds[0].name, 'project1-test1')
+ self.assertEqual(self.builds[1].name, 'project2-test1')
+ self.assertEqual(self.builds[2].name, 'project2-test1')
+ self.assertTrue('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+ self.assertTrue('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 2)
+
+ self.executor_server.release('project1-test1')
+ self.waitUntilSettled()
+
+ # one build of project1-test1 must run
+ # two builds of project2-test1 must run
+ # semaphore of tenant-one must be acquired once
+ # semaphore of tenant-two must be acquired twice
+ self.assertEqual(len(self.builds), 3)
+ self.assertEqual(self.builds[0].name, 'project2-test1')
+ self.assertEqual(self.builds[1].name, 'project2-test1')
+ self.assertEqual(self.builds[2].name, 'project1-test1')
+ self.assertTrue('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+ self.assertTrue('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 2)
+
+ self.executor_server.release('project2-test1')
+ self.waitUntilSettled()
+
+ # one build of project1-test1 must run
+ # one build of project2-test1 must run
+ # semaphore of tenant-one must be acquired once
+ # semaphore of tenant-two must be acquired once
+ self.assertEqual(len(self.builds), 2)
+ self.assertTrue('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+ self.assertTrue('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+
+ self.waitUntilSettled()
+
+ # no build must run
+ # semaphore of tenant-one must not be acquired
+ # semaphore of tenant-two must not be acquired
+ self.assertEqual(len(self.builds), 0)
+ self.assertFalse('test-semaphore' in
+ tenant_one.semaphore_handler.semaphores)
+ self.assertFalse('test-semaphore' in
+ tenant_two.semaphore_handler.semaphores)
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(B.reported, 1)
+
+
+class TestSemaphoreInRepo(ZuulTestCase):
+ tenant_config_file = 'config/in-repo/main.yaml'
+
+ def test_semaphore_in_repo(self):
+ "Test semaphores in repo config"
+
+ # This tests dynamic semaphore handling in project repos. The semaphore
+ # max value should not be evaluated dynamically but must be updated
+ # after the change lands.
+
+ self.waitUntilSettled()
+ tenant = self.sched.abide.tenants.get('tenant-one')
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+ semaphore: test-semaphore
+
+ - project:
+ name: org/project
+ tenant-one-gate:
+ jobs:
+ - project-test2
+
+ # the max value in dynamic layout must be ignored
+ - semaphore:
+ name: test-semaphore
+ max: 2
+ """)
+
+ in_repo_playbook = textwrap.dedent(
+ """
+ - hosts: all
+ tasks: []
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf,
+ 'playbooks/project-test2.yaml': in_repo_playbook}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
+ B.setDependsOn(A, 1)
+ C.setDependsOn(A, 1)
+
+ self.executor_server.hold_jobs_in_build = True
+
+ A.addApproval('code-review', 2)
+ B.addApproval('code-review', 2)
+ C.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+ self.waitUntilSettled()
+
+ # check that the layout in a queue item still has max value of 1
+ # for test-semaphore
+ pipeline = tenant.layout.pipelines.get('tenant-one-gate')
+ queue = None
+ for queue_candidate in pipeline.queues:
+ if queue_candidate.name == 'org/project':
+ queue = queue_candidate
+ break
+ queue_item = queue.queue[0]
+ item_dynamic_layout = queue_item.current_build_set.layout
+ dynamic_test_semaphore = \
+ item_dynamic_layout.semaphores.get('test-semaphore')
+ self.assertEqual(dynamic_test_semaphore.max, 1)
+
+ # one build must be in queue, one semaphores acquired
+ self.assertEqual(len(self.builds), 1)
+ self.assertEqual(self.builds[0].name, 'project-test2')
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 1)
+
+ self.executor_server.release('project-test2')
+ self.waitUntilSettled()
+
+ # change A must be merged
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(A.reported, 2)
+
+ # send change-merged event as the gerrit mock doesn't send it
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ # now that change A was merged, the new semaphore max must be effective
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(tenant.layout.semaphores.get('test-semaphore').max, 2)
+
+ # two builds must be in queue, two semaphores acquired
+ self.assertEqual(len(self.builds), 2)
+ self.assertEqual(self.builds[0].name, 'project-test2')
+ self.assertEqual(self.builds[1].name, 'project-test2')
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+ self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+ 'test-semaphore', [])), 2)
+
+ self.executor_server.release('project-test2')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 0)
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+
+ self.waitUntilSettled()
+ self.assertEqual(len(self.builds), 0)
+
+ self.assertEqual(A.reported, 2)
+ self.assertEqual(B.reported, 2)
+ self.assertEqual(C.reported, 2)
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 5c0679d..678b957 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -20,7 +20,8 @@
import testtools
import zuul.configloader
-from tests.base import AnsibleZuulTestCase, ZuulTestCase
+from zuul.lib import encryption
+from tests.base import AnsibleZuulTestCase, ZuulTestCase, FIXTURE_DIR
class TestMultipleTenants(AnsibleZuulTestCase):
@@ -266,7 +267,7 @@
self.assertEqual(build.result, 'ABORTED')
build = self.getJobFromHistory('faillocal')
self.assertEqual(build.result, 'FAILURE')
- build = self.getJobFromHistory('nodepool')
+ build = self.getJobFromHistory('check-vars')
self.assertEqual(build.result, 'SUCCESS')
build = self.getJobFromHistory('python27')
self.assertEqual(build.result, 'SUCCESS')
@@ -288,6 +289,11 @@
build.uuid + '.bare-role.flag')
self.assertTrue(os.path.exists(bare_role_flag_path))
+ secrets_path = os.path.join(self.test_root,
+ build.uuid + '.secrets')
+ with open(secrets_path) as f:
+ self.assertEqual(f.read(), "test-username test-password")
+
class TestBrokenConfig(ZuulTestCase):
# Test that we get an appropriate syntax error if we start with a
@@ -303,3 +309,33 @@
def test_broken_config_on_startup(self):
pass
+
+
+class TestProjectKeys(ZuulTestCase):
+ # Test that we can generate project keys
+
+ # Normally the test infrastructure copies a static key in place
+ # for each project before starting tests. This saves time because
+ # Zuul's automatic key-generation on startup can be slow. To make
+ # sure we exercise that code, in this test we allow Zuul to create
+ # keys for the project on startup.
+ create_project_keys = True
+ tenant_config_file = 'config/in-repo/main.yaml'
+
+ def test_key_generation(self):
+ key_root = os.path.join(self.state_root, 'keys')
+ private_key_file = os.path.join(key_root, 'gerrit/org/project.pem')
+ # Make sure that a proper key was created on startup
+ with open(private_key_file, "rb") as f:
+ private_key, public_key = \
+ encryption.deserialize_rsa_keypair(f.read())
+
+ with open(os.path.join(FIXTURE_DIR, 'private.pem')) as i:
+ fixture_private_key = i.read()
+
+ # Make sure that we didn't just end up with the static fixture
+ # key
+ self.assertNotEqual(fixture_private_key, private_key)
+
+ # Make sure it's the right length
+ self.assertEqual(4096, private_key.key_size)
diff --git a/tests/unit/test_webapp.py b/tests/unit/test_webapp.py
index acff09a..8791a25 100644
--- a/tests/unit/test_webapp.py
+++ b/tests/unit/test_webapp.py
@@ -15,11 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import json
from six.moves import urllib
-from tests.base import ZuulTestCase
+from tests.base import ZuulTestCase, FIXTURE_DIR
class TestWebapp(ZuulTestCase):
@@ -85,3 +86,13 @@
self.assertEqual(1, len(data), data)
self.assertEqual("org/project1", data[0]['project'], data)
+
+ def test_webapp_keys(self):
+ with open(os.path.join(FIXTURE_DIR, 'public.pem')) as f:
+ public_pem = f.read()
+
+ req = urllib.request.Request(
+ "http://localhost:%s/tenant-one/keys/gerrit/org/project.pub" %
+ self.port)
+ f = urllib.request.urlopen(req)
+ self.assertEqual(f.read(), public_pem)
diff --git a/tools/encrypt_secret.py b/tools/encrypt_secret.py
new file mode 100644
index 0000000..4865edd
--- /dev/null
+++ b/tools/encrypt_secret.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import subprocess
+import sys
+import tempfile
+from six.moves import urllib
+
+DESCRIPTION = """Encrypt a secret for Zuul.
+
+This program fetches a project-specific public key from a Zuul server and
+uses that to encrypt a secret. The only pre-requisite is an installed
+OpenSSL binary.
+"""
+
+
+def main():
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+ parser.add_argument('url',
+ help="The base URL of the zuul server and tenant. "
+ "E.g., https://zuul.example.com/tenant-name")
+ # TODO(jeblair,mordred): When projects have canonical names, use that here.
+ # TODO(jeblair): Throw a fit if SSL is not used.
+ parser.add_argument('source',
+ help="The Zuul source of the project.")
+ parser.add_argument('project',
+ help="The name of the project.")
+ parser.add_argument('--infile',
+ default=None,
+ help="A filename whose contents will be encrypted. "
+ "If not supplied, the value will be read from "
+ "standard input.")
+ parser.add_argument('--outfile',
+ default=None,
+ help="A filename to which the encrypted value will be "
+ "written. If not supplied, the value will be written "
+ "to standard output.")
+ args = parser.parse_args()
+
+ req = urllib.request.Request("%s/keys/%s/%s.pub" % (
+ args.url, args.source, args.project))
+ pubkey = urllib.request.urlopen(req)
+
+ if args.infile:
+ with open(args.infile) as f:
+ plaintext = f.read()
+ else:
+ plaintext = sys.stdin.read()
+
+ pubkey_file = tempfile.NamedTemporaryFile(delete=False)
+ try:
+ pubkey_file.write(pubkey.read())
+ pubkey_file.close()
+
+ p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
+ '-oaep', '-pubin', '-inkey',
+ pubkey_file.name],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate(plaintext)
+ if p.returncode != 0:
+ raise Exception("Return code %s from openssl" % p.returncode)
+ ciphertext = stdout.encode('base64')
+ finally:
+ os.unlink(pubkey_file.name)
+
+ if args.outfile:
+ with open(args.outfile, "w") as f:
+ f.write(ciphertext)
+ else:
+ print(ciphertext)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/zuul/ansible/lookup/__init__.py b/zuul/ansible/lookup/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/lookup/__init__.py
diff --git a/zuul/ansible/lookup/_banned.py b/zuul/ansible/lookup/_banned.py
new file mode 100644
index 0000000..65708f8
--- /dev/null
+++ b/zuul/ansible/lookup/_banned.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, *args, **kwargs):
+ raise AnsibleError(
+ "Use of lookup modules that perform local actions on the executor"
+ " is forbidden.")
diff --git a/zuul/ansible/lookup/consul_kv.py b/zuul/ansible/lookup/consul_kv.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/consul_kv.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/credstash.py b/zuul/ansible/lookup/credstash.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/credstash.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/csvfile.py b/zuul/ansible/lookup/csvfile.py
new file mode 100644
index 0000000..6506aa2
--- /dev/null
+++ b/zuul/ansible/lookup/csvfile.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+csvfile = paths._import_ansible_lookup_plugin("csvfile")
+
+
+class LookupModule(csvfile.LookupModule):
+
+ def read_csv(self, filename, *args, **kwargs):
+ paths._fail_if_unsafe(filename)
+ return super(LookupModule, self).read_csv(filename, *args, **kwargs)
diff --git a/zuul/ansible/lookup/dig.py b/zuul/ansible/lookup/dig.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/dig.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/dnstxt.py b/zuul/ansible/lookup/dnstxt.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/dnstxt.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/env.py b/zuul/ansible/lookup/env.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/env.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/etcd.py b/zuul/ansible/lookup/etcd.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/etcd.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/file.py b/zuul/ansible/lookup/file.py
new file mode 100644
index 0000000..7403535
--- /dev/null
+++ b/zuul/ansible/lookup/file.py
@@ -0,0 +1,28 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+file_mod = paths._import_ansible_lookup_plugin("file")
+
+
+class LookupModule(file_mod.LookupModule):
+
+ def run(self, terms, variables=None, **kwargs):
+ for term in terms:
+ lookupfile = self.find_file_in_search_path(
+ variables, 'files', term)
+ paths._fail_if_unsafe(lookupfile)
+ return super(LookupModule, self).run(terms, variables, **kwargs)
diff --git a/zuul/ansible/lookup/fileglob.py b/zuul/ansible/lookup/fileglob.py
new file mode 100644
index 0000000..4b9b449
--- /dev/null
+++ b/zuul/ansible/lookup/fileglob.py
@@ -0,0 +1,45 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+# Forked from lib/ansible/plugins/lookup/fileglob.py in ansible
+
+import os
+import glob
+
+from zuul.ansible import paths
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ dwimmed_path = self.find_file_in_search_path(
+ variables, 'files', os.path.dirname(term))
+ if dwimmed_path:
+ paths._fail_if_unsafe(dwimmed_path)
+ globbed = glob.glob(to_bytes(
+ os.path.join(dwimmed_path, term_file),
+ errors='surrogate_or_strict'))
+ ret.extend(
+ to_text(g, errors='surrogate_or_strict')
+ for g in globbed if os.path.isfile(g))
+ return ret
diff --git a/zuul/ansible/lookup/filetree.py b/zuul/ansible/lookup/filetree.py
new file mode 100644
index 0000000..0c054a3
--- /dev/null
+++ b/zuul/ansible/lookup/filetree.py
@@ -0,0 +1,32 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from zuul.ansible import paths
+filetree = paths._import_ansible_lookup_plugin("filetree")
+
+
+class LookupModule(filetree.LookupModule):
+
+ def run(self, terms, variables=None, **kwargs):
+ basedir = self.get_basedir(variables)
+ for term in terms:
+ term_file = os.path.basename(term)
+ dwimmed_path = self._loader.path_dwim_relative(
+ basedir, 'files', os.path.dirname(term))
+ path = os.path.join(dwimmed_path, term_file)
+ paths._fail_if_unsafe(path)
+ return super(LookupModule, self).run(terms, variables, **kwargs)
diff --git a/zuul/ansible/lookup/first_found.py b/zuul/ansible/lookup/first_found.py
new file mode 100644
index 0000000..d741df0
--- /dev/null
+++ b/zuul/ansible/lookup/first_found.py
@@ -0,0 +1,201 @@
+# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+# take a list of files and (optionally) a list of paths
+# return the first existing file found in the paths
+# [file1, file2, file3], [path1, path2, path3]
+# search order is:
+# path1/file1
+# path1/file2
+# path1/file3
+# path2/file1
+# path2/file2
+# path2/file3
+# path3/file1
+# path3/file2
+# path3/file3
+
+# first file found with os.path.exists() is returned
+# no file matches raises ansibleerror
+# EXAMPLES
+# - name: copy first existing file found to /some/file
+# action: copy src=$item dest=/some/file
+# with_first_found:
+# - files: foo ${inventory_hostname} bar
+# paths: /tmp/production /tmp/staging
+
+# that will look for files in this order:
+# /tmp/production/foo
+# ${inventory_hostname}
+# bar
+# /tmp/staging/foo
+# ${inventory_hostname}
+# bar
+
+# - name: copy first existing file found to /some/file
+# action: copy src=$item dest=/some/file
+# with_first_found:
+# - files: /some/place/foo ${inventory_hostname} /some/place/else
+
+# that will look for files in this order:
+# /some/place/foo
+# $relative_path/${inventory_hostname}
+# /some/place/else
+
+# example - including tasks:
+# tasks:
+# - include: $item
+# with_first_found:
+# - files: generic
+# paths: tasks/staging tasks/production
+# this will include the tasks in the file generic where it is found first
+# (staging or production)
+
+# example simple file lists
+# tasks:
+# - name: first found file
+# action: copy src=$item dest=/etc/file.cfg
+# with_first_found:
+# - files: foo.${inventory_hostname} foo
+
+
+# example skipping if no matched files
+# First_found also offers the ability to control whether or not failing
+# to find a file returns an error or not
+#
+# - name: first found file - or skip
+# action: copy src=$item dest=/etc/file.cfg
+# with_first_found:
+# - files: foo.${inventory_hostname}
+# skip: true
+
+# example a role with default configuration and configuration per host
+# you can set multiple terms with their own files and paths to look through.
+# consider a role that sets some configuration per host falling back on a
+# default config.
+#
+# - name: some configuration template
+# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
+# with_first_found:
+# - files:
+# - ${inventory_hostname}/etc/file.cfg
+# paths:
+# - ../../../templates.overwrites
+# - ../../../templates
+# - files:
+# - etc/file.cfg
+# paths:
+# - templates
+
+# the above will return an empty list if the files cannot be found at all
+# if skip is unspecificed or if it is set to false then it will return a list
+# error which can be caught bye ignore_errors: true for that action.
+
+# finally - if you want you can use it, in place to replace
+# first_available_file:
+# you simply cannot use the - files, path or skip options. simply replace
+# first_available_file with with_first_found and leave the file listing in
+# place
+#
+#
+# - name: with_first_found like first_available_file
+# action: copy src=$item dest=/tmp/faftest
+# with_first_found:
+# - ../files/foo
+# - ../files/bar
+# - ../files/baz
+# ignore_errors: true
+
+import os
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.constants import mk_boolean as boolean
+from ansible.errors import AnsibleLookupError
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+from zuul.ansible import paths as zuul_paths
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ anydict = False
+ skip = False
+
+ for term in terms:
+ if isinstance(term, dict):
+ anydict = True
+
+ total_search = []
+ if anydict:
+ for term in terms:
+ if isinstance(term, dict):
+ files = term.get('files', [])
+ paths = term.get('paths', [])
+ skip = boolean(term.get('skip', False))
+
+ filelist = files
+ if isinstance(files, string_types):
+ files = files.replace(',', ' ')
+ files = files.replace(';', ' ')
+ filelist = files.split(' ')
+
+ pathlist = paths
+ if paths:
+ if isinstance(paths, string_types):
+ paths = paths.replace(',', ' ')
+ paths = paths.replace(':', ' ')
+ paths = paths.replace(';', ' ')
+ pathlist = paths.split(' ')
+
+ if not pathlist:
+ total_search = filelist
+ else:
+ for path in pathlist:
+ for fn in filelist:
+ f = os.path.join(path, fn)
+ total_search.append(f)
+ else:
+ total_search.append(term)
+ else:
+ total_search = self._flatten(terms)
+
+ for fn in total_search:
+ zuul_paths._fail_if_unsafe(fn)
+ try:
+ fn = self._templar.template(fn)
+ except (AnsibleUndefinedVariable, UndefinedError):
+ continue
+
+ # get subdir if set by task executor, default to files otherwise
+ subdir = getattr(self, '_subdir', 'files')
+ path = None
+ path = self.find_file_in_search_path(
+ variables, subdir, fn, ignore_missing=True)
+ if path is not None:
+ return [path]
+ else:
+ if skip:
+ return []
+ else:
+ raise AnsibleLookupError(
+ "No file was found when using with_first_found. Use the"
+ " 'skip: true' option to allow this task to be skipped if"
+ " no files are found")
diff --git a/zuul/ansible/lookup/hashi_valut.py b/zuul/ansible/lookup/hashi_valut.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/hashi_valut.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/ini.py b/zuul/ansible/lookup/ini.py
new file mode 100644
index 0000000..51127ff
--- /dev/null
+++ b/zuul/ansible/lookup/ini.py
@@ -0,0 +1,31 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+ini = paths._import_ansible_lookup_plugin("ini")
+
+
+class LookupModule(ini.LookupModule):
+
+ def read_properties(self, filename, *args, **kwargs):
+ paths._fail_if_unsafe(filename)
+ return super(LookupModule, self).read_properties(
+ filename, *args, **kwargs)
+
+ def read_ini(self, filename, *args, **kwargs):
+ paths._fail_if_unsafe(filename)
+ return super(LookupModule, self).read_ini(
+ filename, *args, **kwargs)
diff --git a/zuul/ansible/lookup/keyring.py b/zuul/ansible/lookup/keyring.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/keyring.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/lastpass.py b/zuul/ansible/lookup/lastpass.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/lastpass.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/lines.py b/zuul/ansible/lookup/lines.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/lines.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/mongodb.py b/zuul/ansible/lookup/mongodb.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/mongodb.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/password.py b/zuul/ansible/lookup/password.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/password.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/passwordstore.py b/zuul/ansible/lookup/passwordstore.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/passwordstore.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/pipe.py b/zuul/ansible/lookup/pipe.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/pipe.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/redis_kv.py b/zuul/ansible/lookup/redis_kv.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/redis_kv.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/shelvefile.py b/zuul/ansible/lookup/shelvefile.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/shelvefile.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/template.py b/zuul/ansible/lookup/template.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/template.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/url.py b/zuul/ansible/lookup/url.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/url.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/paths.py b/zuul/ansible/paths.py
index e387732..bc61975 100644
--- a/zuul/ansible/paths.py
+++ b/zuul/ansible/paths.py
@@ -16,7 +16,9 @@
import imp
import os
+from ansible.errors import AnsibleError
import ansible.plugins.action
+import ansible.plugins.lookup
def _is_safe_path(path):
@@ -35,6 +37,12 @@
curdir=os.path.abspath(os.path.curdir)))
+def _fail_if_unsafe(path):
+ if not _is_safe_path(path):
+ msg_dict = _fail_dict(path)
+ raise AnsibleError(msg_dict['msg'])
+
+
def _import_ansible_action_plugin(name):
# Ansible forces the import of our action plugins
# (zuul.ansible.action.foo) as ansible.plugins.action.foo, which
@@ -51,3 +59,11 @@
return imp.load_module(
'zuul.ansible.protected.action.' + name,
*imp.find_module(name, ansible.plugins.action.__path__))
+
+
+def _import_ansible_lookup_plugin(name):
+ # See _import_ansible_action_plugin
+
+ return imp.load_module(
+ 'zuul.ansible.protected.lookup.' + name,
+ *imp.find_module(name, ansible.plugins.lookup.__path__))
diff --git a/zuul/change_matcher.py b/zuul/change_matcher.py
index 845ba1c..1da1d2c 100644
--- a/zuul/change_matcher.py
+++ b/zuul/change_matcher.py
@@ -62,7 +62,8 @@
def matches(self, change):
return (
(hasattr(change, 'branch') and self.regex.match(change.branch)) or
- (hasattr(change, 'ref') and self.regex.match(change.ref))
+ (hasattr(change, 'ref') and
+ change.ref is not None and self.regex.match(change.ref))
)
diff --git a/zuul/cmd/__init__.py b/zuul/cmd/__init__.py
index 9fa4c03..f2a2612 100644
--- a/zuul/cmd/__init__.py
+++ b/zuul/cmd/__init__.py
@@ -24,10 +24,10 @@
import sys
import traceback
-import yaml
yappi = extras.try_import('yappi')
import zuul.lib.connections
+from zuul.lib import yamlutil as yaml
# Do not import modules that will pull in paramiko which must not be
# imported until after the daemonization.
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 8bae3c5..5e88ee7 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -15,16 +15,17 @@
import os
import logging
import six
-import yaml
import pprint
import textwrap
import voluptuous as vs
from zuul import model
+from zuul.lib import yamlutil as yaml
import zuul.manager.dependent
import zuul.manager.independent
from zuul import change_matcher
+from zuul.lib import encryption
# Several forms accept either a single item or a list, this makes
@@ -84,8 +85,9 @@
class ZuulSafeLoader(yaml.SafeLoader):
- zuul_node_types = frozenset(('job', 'nodeset', 'pipeline',
- 'project', 'project-template'))
+ zuul_node_types = frozenset(('job', 'nodeset', 'secret', 'pipeline',
+ 'project', 'project-template',
+ 'semaphore'))
def __init__(self, stream, context):
super(ZuulSafeLoader, self).__init__(stream)
@@ -122,6 +124,29 @@
loader.dispose()
+class EncryptedPKCS1_OAEP(yaml.YAMLObject):
+ yaml_tag = u'!encrypted/pkcs1-oaep'
+ yaml_loader = yaml.SafeLoader
+
+ def __init__(self, ciphertext):
+ self.ciphertext = ciphertext.decode('base64')
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, EncryptedPKCS1_OAEP):
+ return False
+ return (self.ciphertext == other.ciphertext)
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ return cls(node.value)
+
+ def decrypt(self, private_key):
+ return encryption.decrypt_pkcs1_oaep(self.ciphertext, private_key)
+
+
class NodeSetParser(object):
@staticmethod
def getSchema():
@@ -148,6 +173,28 @@
return ns
+class SecretParser(object):
+ @staticmethod
+ def getSchema():
+ data = {str: vs.Any(str, EncryptedPKCS1_OAEP)}
+
+ secret = {vs.Required('name'): str,
+ vs.Required('data'): data,
+ '_source_context': model.SourceContext,
+ '_start_mark': yaml.Mark,
+ }
+
+ return vs.Schema(secret)
+
+ @staticmethod
+ def fromYaml(layout, conf):
+ with configuration_exceptions('secret', conf):
+ SecretParser.getSchema()(conf)
+ s = model.Secret(conf['name'], conf['_source_context'])
+ s.secret_data = conf['data']
+ return s
+
+
class JobParser(object):
@staticmethod
def getSchema():
@@ -176,7 +223,7 @@
'success-url': str,
'hold-following-changes': bool,
'voting': bool,
- 'mutex': str,
+ 'semaphore': str,
'tags': to_list(str),
'branches': to_list(str),
'files': to_list(str),
@@ -194,6 +241,7 @@
'repos': to_list(str),
'vars': dict,
'dependencies': to_list(str),
+ 'allowed-projects': to_list(str),
}
return vs.Schema(job)
@@ -203,7 +251,7 @@
'workspace',
'voting',
'hold-following-changes',
- 'mutex',
+ 'semaphore',
'attempts',
'failure-message',
'success-message',
@@ -224,7 +272,19 @@
job = model.Job(conf['name'])
job.source_context = conf.get('_source_context')
if 'auth' in conf:
- job.auth = conf.get('auth')
+ job.auth = model.AuthContext()
+ if 'inherit' in conf['auth']:
+ job.auth.inherit = conf['auth']['inherit']
+
+ for secret_name in conf['auth'].get('secrets', []):
+ secret = layout.secrets[secret_name]
+ if secret.source_context != job.source_context:
+ raise Exception(
+ "Unable to use secret %s. Secrets must be "
+ "defined in the same project in which they "
+ "are used" % secret_name)
+ job.auth.secrets.append(secret.decrypt(
+ job.source_context.project.private_key))
if 'parent' in conf:
parent = layout.getJob(conf['parent'])
@@ -291,6 +351,19 @@
if variables:
job.updateVariables(variables)
+ allowed_projects = conf.get('allowed-projects', None)
+ if allowed_projects:
+ allowed = []
+ for p in as_list(allowed_projects):
+ # TODOv3(jeblair): this limits allowed_projects to the same
+ # source; we should remove that limitation.
+ source = job.source_context.project.connection_name
+ (trusted, project) = tenant.getRepo(source, p)
+ if project is None:
+ raise Exception("Unknown project %s" % (p,))
+ allowed.append(project.name)
+ job.allowed_projects = frozenset(allowed)
+
# If the definition for this job came from a project repo,
# implicitly apply a branch matcher for the branch it was on.
if (not job.source_context.trusted):
@@ -465,6 +538,7 @@
project_pipeline.queue_name = queue_name
if pipeline_defined:
project.pipelines[pipeline.name] = project_pipeline
+
return project
@@ -535,6 +609,7 @@
'footer-message': str,
'dequeue-on-new-patchset': bool,
'ignore-dependencies': bool,
+ 'allow-secrets': bool,
'disable-after-consecutive-failures':
vs.All(int, vs.Range(min=1)),
'window': window,
@@ -582,6 +657,7 @@
'dequeue-on-new-patchset', True)
pipeline.ignore_dependencies = conf.get(
'ignore-dependencies', False)
+ pipeline.allow_secrets = conf.get('allow-secrets', False)
for conf_key, action in PipelineParser.reporter_actions.items():
reporter_set = []
@@ -645,6 +721,25 @@
return pipeline
+class SemaphoreParser(object):
+ @staticmethod
+ def getSchema():
+ semaphore = {vs.Required('name'): str,
+ 'max': int,
+ '_source_context': model.SourceContext,
+ '_start_mark': yaml.Mark,
+ }
+
+ return vs.Schema(semaphore)
+
+ @staticmethod
+ def fromYaml(conf):
+ SemaphoreParser.getSchema()(conf)
+ semaphore = model.Semaphore(conf['name'], conf.get('max', 1))
+ semaphore.source_context = conf.get('_source_context')
+ return semaphore
+
+
class TenantParser(object):
log = logging.getLogger("zuul.TenantParser")
@@ -673,13 +768,15 @@
return vs.Schema(tenant)
@staticmethod
- def fromYaml(base, connections, scheduler, merger, conf, cached):
+ def fromYaml(base, project_key_dir, connections, scheduler, merger, conf,
+ cached):
TenantParser.getSchema(connections)(conf)
tenant = model.Tenant(conf['name'])
tenant.unparsed_config = conf
unparsed_config = model.UnparsedTenantConfig()
tenant.config_repos, tenant.project_repos = \
- TenantParser._loadTenantConfigRepos(connections, conf)
+ TenantParser._loadTenantConfigRepos(
+ project_key_dir, connections, conf)
for source, repo in tenant.config_repos:
tenant.addConfigRepo(source, repo)
for source, repo in tenant.project_repos:
@@ -699,7 +796,53 @@
return tenant
@staticmethod
- def _loadTenantConfigRepos(connections, conf_tenant):
+ def _loadProjectKeys(project_key_dir, connection_name, project):
+ project.private_key_file = (
+ os.path.join(project_key_dir, connection_name,
+ project.name + '.pem'))
+
+ TenantParser._generateKeys(project)
+ TenantParser._loadKeys(project)
+
+ @staticmethod
+ def _generateKeys(project):
+ if os.path.isfile(project.private_key_file):
+ return
+
+ key_dir = os.path.dirname(project.private_key_file)
+ if not os.path.isdir(key_dir):
+ os.makedirs(key_dir)
+
+ TenantParser.log.info(
+ "Generating RSA keypair for project %s" % (project.name,)
+ )
+ private_key, public_key = encryption.generate_rsa_keypair()
+ pem_private_key = encryption.serialize_rsa_private_key(private_key)
+
+ # Dump keys to filesystem. We only save the private key
+ # because the public key can be constructed from it.
+ TenantParser.log.info(
+ "Saving RSA keypair for project %s to %s" % (
+ project.name, project.private_key_file)
+ )
+ with open(project.private_key_file, 'wb') as f:
+ f.write(pem_private_key)
+
+ @staticmethod
+ def _loadKeys(project):
+ # Check the key files specified are there
+ if not os.path.isfile(project.private_key_file):
+ raise Exception(
+ 'Private key file {0} not found'.format(
+ project.private_key_file))
+
+ # Load keypair
+ with open(project.private_key_file, "rb") as f:
+ (project.private_key, project.public_key) = \
+ encryption.deserialize_rsa_keypair(f.read())
+
+ @staticmethod
+ def _loadTenantConfigRepos(project_key_dir, connections, conf_tenant):
config_repos = []
project_repos = []
@@ -708,10 +851,14 @@
for conf_repo in conf_source.get('config-repos', []):
project = source.getProject(conf_repo)
+ TenantParser._loadProjectKeys(
+ project_key_dir, source_name, project)
config_repos.append((source, project))
for conf_repo in conf_source.get('project-repos', []):
project = source.getProject(conf_repo)
+ TenantParser._loadProjectKeys(
+ project_key_dir, source_name, project)
project_repos.append((source, project))
return config_repos, project_repos
@@ -833,9 +980,15 @@
for config_nodeset in data.nodesets:
layout.addNodeSet(NodeSetParser.fromYaml(layout, config_nodeset))
+ for config_secret in data.secrets:
+ layout.addSecret(SecretParser.fromYaml(layout, config_secret))
+
for config_job in data.jobs:
layout.addJob(JobParser.fromYaml(tenant, layout, config_job))
+ for config_semaphore in data.semaphores:
+ layout.addSemaphore(SemaphoreParser.fromYaml(config_semaphore))
+
for config_template in data.project_templates:
layout.addProjectTemplate(ProjectTemplateParser.fromYaml(
tenant, layout, config_template))
@@ -861,7 +1014,8 @@
config_path)
return config_path
- def loadConfig(self, config_path, scheduler, merger, connections):
+ def loadConfig(self, config_path, project_key_dir, scheduler, merger,
+ connections):
abide = model.Abide()
config_path = self.expandConfigPath(config_path)
@@ -874,13 +1028,14 @@
for conf_tenant in config.tenants:
# When performing a full reload, do not use cached data.
- tenant = TenantParser.fromYaml(base, connections, scheduler,
- merger, conf_tenant, cached=False)
+ tenant = TenantParser.fromYaml(
+ base, project_key_dir, connections, scheduler, merger,
+ conf_tenant, cached=False)
abide.tenants[tenant.name] = tenant
return abide
- def reloadTenant(self, config_path, scheduler, merger, connections,
- abide, tenant):
+ def reloadTenant(self, config_path, project_key_dir, scheduler,
+ merger, connections, abide, tenant):
new_abide = model.Abide()
new_abide.tenants = abide.tenants.copy()
@@ -888,9 +1043,9 @@
base = os.path.dirname(os.path.realpath(config_path))
# When reloading a tenant only, use cached data if available.
- new_tenant = TenantParser.fromYaml(base, connections, scheduler,
- merger, tenant.unparsed_config,
- cached=True)
+ new_tenant = TenantParser.fromYaml(
+ base, project_key_dir, connections, scheduler, merger,
+ tenant.unparsed_config, cached=True)
new_abide.tenants[tenant.name] = new_tenant
return new_abide
@@ -940,6 +1095,12 @@
# or deleting pipelines in dynamic layout changes.
layout.pipelines = tenant.layout.pipelines
+ # NOTE: the semaphore definitions are copied from the static layout
+ # here. For semaphores there should be no per patch max value but
+ # exactly one value at any time. So we do not support dynamic semaphore
+ # configuration changes.
+ layout.semaphores = tenant.layout.semaphores
+
for config_job in config.jobs:
layout.addJob(JobParser.fromYaml(tenant, layout, config_job))
diff --git a/zuul/driver/__init__.py b/zuul/driver/__init__.py
index 36e83bd..1cc5235 100644
--- a/zuul/driver/__init__.py
+++ b/zuul/driver/__init__.py
@@ -117,25 +117,28 @@
class TriggerInterface(object):
"""The trigger interface.
- A driver which is able to supply a Trigger should implement this
+ A driver which is able to supply a trigger should implement this
interface.
"""
@abc.abstractmethod
def getTrigger(self, connection, config=None):
- """Create and return a new Trigger object.
+ """Create and return a new trigger object.
This method is required by the interface.
+ The trigger object returned should inherit from the
+ :py:class:`~zuul.trigger.BaseTrigger` class.
+
:arg Connection connection: The Connection object associated
with the trigger (as previously returned by getConnection)
or None.
:arg dict config: The configuration information supplied along
with the trigger in the layout.
- :returns: A new Trigger object.
- :rtype: Trigger
+ :returns: A new trigger object.
+ :rtype: :py:class:`~zuul.trigger.BaseTrigger`
"""
pass
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 286006f..e3c726f 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -26,7 +26,7 @@
import voluptuous as v
from zuul.connection import BaseConnection
-from zuul.model import TriggerEvent, Project, Change, Ref, NullChange
+from zuul.model import TriggerEvent, Project, Change, Ref
from zuul import exceptions
@@ -93,6 +93,9 @@
event.ref = refupdate.get('refName')
event.oldrev = refupdate.get('oldRev')
event.newrev = refupdate.get('newRev')
+ if event.project_name is None:
+ # ref-replica* events
+ event.project_name = data.get('project')
# Map the event types to a field name holding a Gerrit
# account attribute. See Gerrit stream-event documentation
# in cmd-stream-events.html
@@ -108,6 +111,7 @@
'reviewer-added': 'reviewer', # Gerrit 2.5/2.6
'ref-replicated': None,
'ref-replication-done': None,
+ 'ref-replication-scheduled': None,
'topic-changed': 'changer',
}
event.account = None
@@ -292,7 +296,13 @@
change.url = self._getGitwebUrl(project, sha=event.newrev)
else:
project = self.getProject(event.project_name)
- change = NullChange(project)
+ change = Ref(project)
+ branch = event.branch or 'master'
+ change.ref = 'refs/heads/%s' % branch
+ refs = self.getInfoRefs(project)
+ change.oldrev = refs[change.ref]
+ change.newrev = refs[change.ref]
+ change.url = self._getGitwebUrl(project, sha=change.newrev)
return change
def _getChange(self, number, patchset, refresh=False, history=None):
diff --git a/zuul/driver/sql/alembic_reporter.ini b/zuul/driver/sql/alembic.ini
similarity index 100%
rename from zuul/driver/sql/alembic_reporter.ini
rename to zuul/driver/sql/alembic.ini
diff --git a/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py b/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py
new file mode 100644
index 0000000..b153cab
--- /dev/null
+++ b/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py
@@ -0,0 +1,25 @@
+"""Allow score to be null
+
+Revision ID: 1dd914d4a482
+Revises: 4d3ebd7f06b9
+Create Date: 2017-03-28 08:09:32.908643
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1dd914d4a482'
+down_revision = '4d3ebd7f06b9'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.alter_column('zuul_buildset', 'score', nullable=True,
+ existing_type=sa.Integer)
+
+
+def downgrade():
+ raise Exception("Downgrades not supported")
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 69e53df..31bc13a 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -80,7 +80,7 @@
sa.Column('change', sa.Integer, nullable=True),
sa.Column('patchset', sa.Integer, nullable=True),
sa.Column('ref', sa.String(255)),
- sa.Column('score', sa.Integer),
+ sa.Column('score', sa.Integer, nullable=True),
sa.Column('message', sa.TEXT()),
)
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index 2129f53..d6e547d 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -28,6 +28,7 @@
def __init__(self, driver, connection, config={}):
super(SQLReporter, self).__init__(
driver, connection, config)
+ # TODO(jeblair): document this is stored as NULL if unspecified
self.result_score = config.get('score', None)
def report(self, source, pipeline, item):
@@ -37,13 +38,6 @@
self.log.warn("SQL reporter (%s) is disabled " % self)
return
- if self.driver.sched.config.has_option('zuul', 'url_pattern'):
- url_pattern = self.driver.sched.config.get('zuul', 'url_pattern')
- else:
- url_pattern = None
-
- score = self.config.get('score', 0)
-
with self.connection.engine.begin() as conn:
buildset_ins = self.connection.zuul_buildset_table.insert().values(
zuul_ref=item.current_build_set.ref,
@@ -52,7 +46,7 @@
change=item.change.number,
patchset=item.change.patchset,
ref=item.change.refspec,
- score=score,
+ score=self.result_score,
message=self._formatItemReport(
pipeline, item, with_jobs=False),
)
@@ -67,7 +61,7 @@
# information about the change.
continue
- (result, url) = item.formatJobResult(job, url_pattern)
+ (result, url) = item.formatJobResult(job)
build_inserts.append({
'buildset_id': buildset_ins_result.inserted_primary_key,
diff --git a/zuul/driver/zuul/__init__.py b/zuul/driver/zuul/__init__.py
index 1bc0ee9..47ccec0 100644
--- a/zuul/driver/zuul/__init__.py
+++ b/zuul/driver/zuul/__init__.py
@@ -87,7 +87,7 @@
def _createParentChangeEnqueuedEvents(self, change, pipeline):
self.log.debug("Checking for changes needing %s:" % change)
if not hasattr(change, 'needed_by_changes'):
- self.log.debug(" Changeish does not support dependencies")
+ self.log.debug(" %s does not support dependencies" % type(change))
return
for needs in change.needed_by_changes:
self._createParentChangeEnqueuedEvent(needs, pipeline)
diff --git a/zuul/executor/ansiblelaunchserver.py b/zuul/executor/ansiblelaunchserver.py
index 875cf2b..0202bdd 100644
--- a/zuul/executor/ansiblelaunchserver.py
+++ b/zuul/executor/ansiblelaunchserver.py
@@ -35,13 +35,13 @@
import Queue
import gear
-import yaml
import jenkins_jobs.builder
import jenkins_jobs.formatter
import zmq
import zuul.ansible.library
from zuul.lib import commandsocket
+from zuul.lib import yamlutil as yaml
ANSIBLE_WATCHDOG_GRACE = 5 * 60
ANSIBLE_DEFAULT_TIMEOUT = 2 * 60 * 60
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index 31646f8..90cfa9b 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -16,7 +16,6 @@
import gear
import json
import logging
-import os
import time
import threading
from uuid import uuid4
@@ -258,7 +257,7 @@
params['ZUUL_CHANGE_IDS'] = zuul_changes
params['ZUUL_CHANGE'] = str(item.change.number)
params['ZUUL_PATCHSET'] = str(item.change.patchset)
- if hasattr(item.change, 'ref'):
+ if hasattr(item.change, 'ref') and item.change.ref is not None:
params['ZUUL_REFNAME'] = item.change.ref
params['ZUUL_OLDREV'] = item.change.oldrev
params['ZUUL_NEWREV'] = item.change.newrev
@@ -266,13 +265,6 @@
params['ZUUL_REF'] = item.change.ref
params['ZUUL_COMMIT'] = item.change.newrev
- # The destination_path is a unique path for this build request
- # and generally where the logs are expected to be placed
- destination_path = os.path.join(item.change.getBasePath(),
- pipeline.name, job.name, uuid[:7])
- params['BASE_LOG_PATH'] = item.change.getBasePath()
- params['LOG_PATH'] = destination_path
-
# This is what we should be heading toward for parameters:
# required:
@@ -312,12 +304,16 @@
for node in item.current_build_set.getJobNodeSet(job.name).getNodes():
nodes.append(dict(name=node.name, image=node.image,
az=node.az,
+ host_keys=node.host_keys,
provider=node.provider,
region=node.region,
public_ipv6=node.public_ipv6,
public_ipv4=node.public_ipv4))
params['nodes'] = nodes
params['vars'] = copy.deepcopy(job.variables)
+ if job.auth:
+ for secret in job.auth.secrets:
+ params['vars'][secret.name] = copy.deepcopy(secret.secret_data)
params['vars']['zuul'] = zuul_params
projects = set()
if job.repos:
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index d0741bb..582d099 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -24,15 +24,17 @@
import threading
import time
import traceback
-import yaml
+from zuul.lib.yamlutil import yaml
import gear
import git
+from six.moves import shlex_quote
import zuul.merger.merger
import zuul.ansible.action
import zuul.ansible.callback
import zuul.ansible.library
+import zuul.ansible.lookup
from zuul.lib import commandsocket
COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
@@ -274,6 +276,10 @@
if not os.path.exists(self.callback_dir):
os.makedirs(self.callback_dir)
+ self.lookup_dir = os.path.join(ansible_dir, 'lookup')
+ if not os.path.exists(self.lookup_dir):
+ os.makedirs(self.lookup_dir)
+
library_path = os.path.dirname(os.path.abspath(
zuul.ansible.library.__file__))
for fn in os.listdir(library_path):
@@ -289,6 +295,11 @@
for fn in os.listdir(callback_path):
shutil.copy(os.path.join(callback_path, fn), self.callback_dir)
+ lookup_path = os.path.dirname(os.path.abspath(
+ zuul.ansible.lookup.__file__))
+ for fn in os.listdir(lookup_path):
+ shutil.copy(os.path.join(lookup_path, fn), self.lookup_dir)
+
self.job_workers = {}
def _getMerger(self, root):
@@ -634,18 +645,31 @@
return result
def getHostList(self, args):
- # TODO(clarkb): This prefers v4 because we're not sure if we
- # expect v6 to work. If we can determine how to prefer v6
hosts = []
for node in args['nodes']:
- ip = node.get('public_ipv4')
- if not ip:
- ip = node.get('public_ipv6')
- hosts.append((node['name'], dict(
+ # NOTE(mordred): This assumes that the nodepool launcher
+ # and the zuul executor both have similar network
+ # characteristics, as the launcher will do a test for ipv6
+ # viability and if so, and if the node has an ipv6
+ # address, it will be the interface_ip. force-ipv4 can be
+ # set to True in the clouds.yaml for a cloud if this
+ # results in the wrong thing being in interface_ip
+ # TODO(jeblair): Move this notice to the docs.
+ ip = node.get('interface_ip')
+ host_vars = dict(
ansible_host=ip,
nodepool_az=node.get('az'),
nodepool_provider=node.get('provider'),
- nodepool_region=node.get('region'))))
+ nodepool_region=node.get('region'))
+
+ host_keys = []
+ for key in node.get('host_keys'):
+ host_keys.append("%s %s" % (ip, key))
+
+ hosts.append(dict(
+ name=node['name'],
+ host_vars=host_vars,
+ host_keys=host_keys))
return hosts
def _blockPluginDirs(self, path):
@@ -806,21 +830,26 @@
self.jobdir.roles_path.append(role_path)
def prepareAnsibleFiles(self, args):
+ keys = []
with open(self.jobdir.inventory, 'w') as inventory:
- for host_name, host_vars in self.getHostList(args):
- inventory.write(host_name)
- for k, v in host_vars.items():
+ for item in self.getHostList(args):
+ inventory.write(item['name'])
+ for k, v in item['host_vars'].items():
inventory.write(' %s=%s' % (k, v))
inventory.write('\n')
- if 'ansible_host' in host_vars:
- os.system("ssh-keyscan %s >> %s" % (
- host_vars['ansible_host'],
- self.jobdir.known_hosts))
+ for key in item['host_keys']:
+ keys.append(key)
+
+ with open(self.jobdir.known_hosts, 'w') as known_hosts:
+ for key in keys:
+ known_hosts.write('%s\n' % key)
with open(self.jobdir.vars, 'w') as vars_yaml:
zuul_vars = dict(args['vars'])
- zuul_vars['zuul']['executor'] = dict(src_root=self.jobdir.src_root,
- log_root=self.jobdir.log_root)
+ zuul_vars['zuul']['executor'] = dict(
+ hostname=self.executor_server.hostname,
+ src_root=self.jobdir.src_root,
+ log_root=self.jobdir.log_root)
vars_yaml.write(
yaml.safe_dump(zuul_vars, default_flow_style=False))
self.writeAnsibleConfig(self.jobdir.untrusted_config)
@@ -843,6 +872,7 @@
if self.jobdir.roles_path:
config.write('roles_path = %s\n' %
':'.join(self.jobdir.roles_path))
+ config.write('command_warnings = False\n')
config.write('callback_plugins = %s\n'
% self.executor_server.callback_dir)
config.write('stdout_callback = zuul_stream\n')
@@ -852,6 +882,8 @@
if not trusted:
config.write('action_plugins = %s\n'
% self.executor_server.action_dir)
+ config.write('lookup_plugins = %s\n'
+ % self.executor_server.lookup_dir)
# On trusted jobs, we want to prevent the printing of args,
# since trusted jobs might have access to secrets that they may
@@ -897,14 +929,17 @@
env_copy['LOGNAME'] = 'zuul'
if trusted:
- env_copy['ANSIBLE_CONFIG'] = self.jobdir.trusted_config
+ config_file = self.jobdir.trusted_config
else:
- env_copy['ANSIBLE_CONFIG'] = self.jobdir.untrusted_config
+ config_file = self.jobdir.untrusted_config
+
+ env_copy['ANSIBLE_CONFIG'] = config_file
with self.proc_lock:
if self.aborted:
return (self.RESULT_ABORTED, None)
- self.log.debug("Ansible command: %s" % (cmd,))
+ self.log.debug("Ansible command: ANSIBLE_CONFIG=%s %s",
+ config_file, " ".join(shlex_quote(c) for c in cmd))
self.proc = subprocess.Popen(
cmd,
cwd=self.jobdir.work_root,
diff --git a/zuul/lib/cloner.py b/zuul/lib/cloner.py
index 18dea91..bec8ebe 100644
--- a/zuul/lib/cloner.py
+++ b/zuul/lib/cloner.py
@@ -17,13 +17,13 @@
import logging
import os
import re
-import yaml
import six
from git import GitCommandError
from zuul import exceptions
from zuul.lib.clonemapper import CloneMapper
+from zuul.lib import yamlutil as yaml
from zuul.merger.merger import Repo
diff --git a/zuul/lib/encryption.py b/zuul/lib/encryption.py
new file mode 100644
index 0000000..24224d8
--- /dev/null
+++ b/zuul/lib/encryption.py
@@ -0,0 +1,138 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives import hashes
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#generation
+def generate_rsa_keypair():
+ """Generate an RSA keypair.
+
+ :returns: A tuple (private_key, public_key)
+
+ """
+ private_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=4096,
+ backend=default_backend()
+ )
+ public_key = private_key.public_key()
+ return (private_key, public_key)
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#key-serialization
+def serialize_rsa_private_key(private_key):
+ """Serialize an RSA private key
+
+ This returns a PEM-encoded serialized form of an RSA private key
+ suitable for storing on disk. It is not password-protected.
+
+ :arg private_key: A private key object as returned by
+ :func:generate_rsa_keypair()
+
+ :returns: A PEM-encoded string representation of the private key.
+
+ """
+ return private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption()
+ )
+
+
+def serialize_rsa_public_key(public_key):
+ """Serialize an RSA public key
+
+ This returns a PEM-encoded serialized form of an RSA public key
+ suitable for distribution.
+
+ :arg public_key: A pubilc key object as returned by
+ :func:generate_rsa_keypair()
+
+ :returns: A PEM-encoded string representation of the public key.
+
+ """
+ return public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#key-loading
+def deserialize_rsa_keypair(data):
+ """Deserialize an RSA private key
+
+ This deserializes an RSA private key and returns the keypair
+ (private and public) for use in decryption.
+
+ :arg data: A PEM-encoded serialized private key
+
+ :returns: A tuple (private_key, public_key)
+
+ """
+ private_key = serialization.load_pem_private_key(
+ data,
+ password=None,
+ backend=default_backend()
+ )
+ public_key = private_key.public_key()
+ return (private_key, public_key)
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#decryption
+def decrypt_pkcs1_oaep(ciphertext, private_key):
+ """Decrypt PKCS#1 (RSAES-OAEP) encoded ciphertext
+
+ :arg ciphertext: A string previously encrypted with PKCS#1
+ (RSAES-OAEP).
+ :arg private_key: A private key object as returned by
+ :func:generate_rsa_keypair()
+
+ :returns: The decrypted form of the ciphertext as a string.
+
+ """
+ return private_key.decrypt(
+ ciphertext,
+ padding.OAEP(
+ mgf=padding.MGF1(algorithm=hashes.SHA1()),
+ algorithm=hashes.SHA1(),
+ label=None
+ )
+ )
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#encryption
+def encrypt_pkcs1_oaep(plaintext, public_key):
+ """Encrypt data with PKCS#1 (RSAES-OAEP)
+
+ :arg plaintext: A string to encrypt with PKCS#1 (RSAES-OAEP).
+
+ :arg public_key: A public key object as returned by
+ :func:generate_rsa_keypair()
+
+ :returns: The encrypted form of the plaintext.
+
+ """
+ return public_key.encrypt(
+ plaintext,
+ padding.OAEP(
+ mgf=padding.MGF1(algorithm=hashes.SHA1()),
+ algorithm=hashes.SHA1(),
+ label=None
+ )
+ )
diff --git a/zuul/lib/yamlutil.py b/zuul/lib/yamlutil.py
new file mode 100644
index 0000000..2419906
--- /dev/null
+++ b/zuul/lib/yamlutil.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import yaml
+from yaml import YAMLObject, YAMLError # noqa: F401
+
+try:
+ from yaml import cyaml
+ import _yaml
+ SafeLoader = cyaml.CSafeLoader
+ SafeDumper = cyaml.CSafeDumper
+ Mark = _yaml.Mark
+except ImportError:
+ SafeLoader = yaml.SafeLoader
+ SafeDumper = yaml.SafeDumper
+ Mark = yaml.Mark
+
+
+def safe_load(stream, *args, **kwargs):
+ return yaml.load(stream, *args, Loader=SafeLoader, **kwargs)
+
+
+def safe_dump(stream, *args, **kwargs):
+ return yaml.dump(stream, *args, Dumper=SafeDumper, **kwargs)
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 58ad607..75e8edb 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -13,7 +13,6 @@
import logging
from zuul import exceptions
-from zuul.model import NullChange
class DynamicChangeQueueContextManager(object):
@@ -82,8 +81,8 @@
tags.append('[hold]')
if not variant.voting:
tags.append('[nonvoting]')
- if variant.mutex:
- tags.append('[mutex: %s]' % variant.mutex)
+ if variant.semaphore:
+ tags.append('[semaphore: %s]' % variant.semaphore)
tags = ' '.join(tags)
self.log.info(" %s%s %s" % (repr(variant),
efilters, tags))
@@ -387,7 +386,8 @@
if not item.current_build_set.layout:
return False
- jobs = item.findJobsToRun(self.sched.mutex)
+ jobs = item.findJobsToRun(
+ item.pipeline.layout.tenant.semaphore_handler)
if jobs:
self._executeJobs(item, jobs)
@@ -412,7 +412,8 @@
self.log.exception("Exception while canceling build %s "
"for change %s" % (build, item.change))
finally:
- self.sched.mutex.release(build.build_set.item, build.job)
+ old_build_set.layout.tenant.semaphore_handler.release(
+ old_build_set.item, build.job)
if not was_running:
try:
@@ -664,7 +665,7 @@
item = build.build_set.item
item.setResult(build)
- self.sched.mutex.release(item, build.job)
+ item.pipeline.layout.tenant.semaphore_handler.release(item, build.job)
self.log.debug("Item %s status is now:\n %s" %
(item, item.formatStatus()))
@@ -682,9 +683,8 @@
build_set.commit = event.commit
build_set.files.setFiles(event.files)
elif event.updated:
- if not isinstance(item.change, NullChange):
- build_set.commit = item.change.newrev
- if not build_set.commit and not isinstance(item.change, NullChange):
+ build_set.commit = item.change.newrev
+ if not build_set.commit:
self.log.info("Unable to merge change %s" % item.change)
item.setUnableToMerge()
diff --git a/zuul/manager/dependent.py b/zuul/manager/dependent.py
index f5fa579..4c48568 100644
--- a/zuul/manager/dependent.py
+++ b/zuul/manager/dependent.py
@@ -89,7 +89,7 @@
to_enqueue = []
self.log.debug("Checking for changes needing %s:" % change)
if not hasattr(change, 'needed_by_changes'):
- self.log.debug(" Changeish does not support dependencies")
+ self.log.debug(" %s does not support dependencies" % type(change))
return
for other_change in change.needed_by_changes:
with self.getChangeQueue(other_change) as other_change_queue:
@@ -133,7 +133,7 @@
# Return true if okay to proceed enqueing this change,
# false if the change should not be enqueued.
if not hasattr(change, 'needs_changes'):
- self.log.debug(" Changeish does not support dependencies")
+ self.log.debug(" %s does not support dependencies" % type(change))
return True
if not change.needs_changes:
self.log.debug(" No changes needed")
diff --git a/zuul/manager/independent.py b/zuul/manager/independent.py
index 3d28327..9e2a7d6 100644
--- a/zuul/manager/independent.py
+++ b/zuul/manager/independent.py
@@ -62,7 +62,7 @@
# Return true if okay to proceed enqueing this change,
# false if the change should not be enqueued.
if not hasattr(change, 'needs_changes'):
- self.log.debug(" Changeish does not support dependencies")
+ self.log.debug(" %s does not support dependencies" % type(change))
return True
if not change.needs_changes:
self.log.debug(" No changes needed")
diff --git a/zuul/model.py b/zuul/model.py
index 3676b68..744c0f3 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -14,6 +14,8 @@
import abc
import copy
+
+import logging
import os
import re
import struct
@@ -96,6 +98,13 @@
return re.sub(' ', '-', name)
+class Attributes(object):
+ """A class to hold attributes for string formatting."""
+
+ def __init__(self, **kw):
+ setattr(self, '__dict__', kw)
+
+
class Pipeline(object):
"""A configuration that ties triggers, reporters, managers and sources.
@@ -121,6 +130,7 @@
self.success_message = None
self.footer_message = None
self.start_message = None
+ self.allow_secrets = False
self.dequeue_on_new_patchset = True
self.ignore_dependencies = False
self.manager = None
@@ -156,6 +166,9 @@
def __repr__(self):
return '<Pipeline %s>' % self.name
+ def getSafeAttributes(self):
+ return Attributes(name=self.name)
+
def setManager(self, manager):
self.manager = manager
@@ -183,7 +196,7 @@
items.extend(shared_queue.queue)
return items
- def formatStatusJSON(self, url_pattern=None):
+ def formatStatusJSON(self):
j_pipeline = dict(name=self.name,
description=self.description)
j_queues = []
@@ -200,7 +213,7 @@
if j_changes:
j_queue['heads'].append(j_changes)
j_changes = []
- j_changes.append(e.formatJSON(url_pattern))
+ j_changes.append(e.formatJSON())
if (len(j_changes) > 1 and
(j_changes[-2]['remaining_time'] is not None) and
(j_changes[-1]['remaining_time'] is not None)):
@@ -515,6 +528,51 @@
self.state_time = data['state_time']
+class Secret(object):
+ """A collection of private data.
+
+ In configuration, Secrets are collections of private data in
+ key-value pair format. They are defined as top-level
+ configuration objects and then referenced by Jobs.
+
+ """
+
+ def __init__(self, name, source_context):
+ self.name = name
+ self.source_context = source_context
+ # The secret data may or may not be encrypted. This attribute
+ # is named 'secret_data' to make it easy to search for and
+ # spot where it is directly used.
+ self.secret_data = {}
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, Secret):
+ return False
+ return (self.name == other.name and
+ self.source_context == other.source_context and
+ self.secret_data == other.secret_data)
+
+ def __repr__(self):
+ return '<Secret %s>' % (self.name,)
+
+ def decrypt(self, private_key):
+ """Return a copy of this secret with any encrypted data decrypted.
+ Note that the original remains encrypted."""
+
+ r = copy.deepcopy(self)
+ decrypted_secret_data = {}
+ for k, v in r.secret_data.items():
+ if hasattr(v, 'decrypt'):
+ decrypted_secret_data[k] = v.decrypt(private_key)
+ else:
+ decrypted_secret_data[k] = v
+ r.secret_data = decrypted_secret_data
+ return r
+
+
class SourceContext(object):
"""A reference to the branch of a project in configuration.
@@ -644,6 +702,28 @@
return d
+class AuthContext(object):
+ """The authentication information for a job.
+
+ Authentication information (both the actual data and metadata such
+ as whether it should be inherited) for a job is grouped together
+ in this object.
+ """
+
+ def __init__(self, inherit=False):
+ self.inherit = inherit
+ self.secrets = []
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, AuthContext):
+ return False
+ return (self.inherit == other.inherit and
+ self.secrets == other.secrets)
+
+
class Job(object):
"""A Job represents the defintion of actions to perform.
@@ -686,17 +766,18 @@
timeout=None,
variables={},
nodeset=NodeSet(),
- auth={},
+ auth=None,
workspace=None,
pre_run=(),
post_run=(),
run=(),
implied_run=(),
- mutex=None,
+ semaphore=None,
attempts=3,
final=False,
roles=frozenset(),
repos=frozenset(),
+ allowed_projects=None,
)
# These are generally internal attributes which are not
@@ -749,6 +830,9 @@
def _get(self, name):
return self.__dict__.get(name)
+ def getSafeAttributes(self):
+ return Attributes(name=self.name)
+
def setRun(self):
if not self.run:
self.run = self.implied_run
@@ -776,7 +860,7 @@
raise Exception("Job unable to inherit from %s" % (other,))
do_not_inherit = set()
- if other.auth and not other.auth.get('inherit'):
+ if other.auth and not other.auth.inherit:
do_not_inherit.add('auth')
# copy all attributes
@@ -978,6 +1062,9 @@
return ('<Build %s of %s on %s>' %
(self.uuid, self.job.name, self.worker))
+ def getSafeAttributes(self):
+ return Attributes(uuid=self.uuid)
+
class Worker(object):
"""Information about the specific worker executing a Build."""
@@ -1300,7 +1387,7 @@
return False
return self.item_ahead.isHoldingFollowingChanges()
- def findJobsToRun(self, mutex):
+ def findJobsToRun(self, semaphore_handler):
torun = []
if not self.live:
return []
@@ -1339,9 +1426,9 @@
# The nodes for this job are not ready, skip
# it for now.
continue
- if mutex.acquire(self, job):
- # If this job needs a mutex, either acquire it or make
- # sure that we have it before running the job.
+ if semaphore_handler.acquire(self, job):
+ # If this job needs a semaphore, either acquire it or
+ # make sure that we have it before running the job.
torun.append(job)
return torun
@@ -1418,10 +1505,10 @@
fakebuild.result = 'SKIPPED'
self.addBuild(fakebuild)
- def formatJobResult(self, job, url_pattern=None):
+ def formatJobResult(self, job):
build = self.current_build_set.getBuild(job.name)
result = build.result
- pattern = url_pattern
+ pattern = None
if result == 'SUCCESS':
if job.success_message:
result = job.success_message
@@ -1433,19 +1520,27 @@
if job.failure_url:
pattern = job.failure_url
url = None
+ # Produce safe versions of objects which may be useful in
+ # result formatting, but don't allow users to crawl through
+ # the entire data structure where they might be able to access
+ # secrets, etc.
+ safe_change = self.change.getSafeAttributes()
+ safe_pipeline = self.pipeline.getSafeAttributes()
+ safe_job = job.getSafeAttributes()
+ safe_build = build.getSafeAttributes()
if pattern:
try:
- url = pattern.format(change=self.change,
- pipeline=self.pipeline,
- job=job,
- build=build)
+ url = pattern.format(change=safe_change,
+ pipeline=safe_pipeline,
+ job=safe_job,
+ build=safe_build)
except Exception:
pass # FIXME: log this or something?
if not url:
url = build.url or job.name
return (result, url)
- def formatJSON(self, url_pattern=None):
+ def formatJSON(self):
changeish = self.change
ret = {}
ret['active'] = self.active
@@ -1488,7 +1583,7 @@
if build:
result = build.result
build_url = build.url
- (unused, report_url) = self.formatJobResult(job, url_pattern)
+ (unused, report_url) = self.formatJobResult(job)
if build.start_time:
if build.end_time:
elapsed = int((build.end_time -
@@ -1578,103 +1673,22 @@
return ret
-class Changeish(object):
- """Base class for Change and Ref."""
+class Ref(object):
+ """An existing state of a Project."""
def __init__(self, project):
self.project = project
-
- def getBasePath(self):
- base_path = ''
- if hasattr(self, 'refspec'):
- base_path = "%s/%s/%s" % (
- self.number[-2:], self.number, self.patchset)
- elif hasattr(self, 'ref'):
- base_path = "%s/%s" % (self.newrev[:2], self.newrev)
-
- return base_path
-
- def equals(self, other):
- raise NotImplementedError()
-
- def isUpdateOf(self, other):
- raise NotImplementedError()
-
- def filterJobs(self, jobs):
- return filter(lambda job: job.changeMatches(self), jobs)
-
- def getRelatedChanges(self):
- return set()
-
- def updatesConfig(self):
- return False
-
-
-class Change(Changeish):
- """A proposed new state for a Project."""
- def __init__(self, project):
- super(Change, self).__init__(project)
- self.branch = None
- self.number = None
- self.url = None
- self.patchset = None
- self.refspec = None
-
- self.files = []
- self.needs_changes = []
- self.needed_by_changes = []
- self.is_current_patchset = True
- self.can_merge = False
- self.is_merged = False
- self.failed_to_merge = False
- self.approvals = []
- self.open = None
- self.status = None
- self.owner = None
-
- def _id(self):
- return '%s,%s' % (self.number, self.patchset)
-
- def __repr__(self):
- return '<Change 0x%x %s>' % (id(self), self._id())
-
- def equals(self, other):
- if self.number == other.number and self.patchset == other.patchset:
- return True
- return False
-
- def isUpdateOf(self, other):
- if ((hasattr(other, 'number') and self.number == other.number) and
- (hasattr(other, 'patchset') and
- self.patchset is not None and
- other.patchset is not None and
- int(self.patchset) > int(other.patchset))):
- return True
- return False
-
- def getRelatedChanges(self):
- related = set()
- for c in self.needs_changes:
- related.add(c)
- for c in self.needed_by_changes:
- related.add(c)
- related.update(c.getRelatedChanges())
- return related
-
- def updatesConfig(self):
- if 'zuul.yaml' in self.files or '.zuul.yaml' in self.files:
- return True
- return False
-
-
-class Ref(Changeish):
- """An existing state of a Project."""
- def __init__(self, project):
- super(Ref, self).__init__(project)
self.ref = None
self.oldrev = None
self.newrev = None
+ def getBasePath(self):
+ base_path = ''
+ if hasattr(self, 'ref'):
+ base_path = "%s/%s" % (self.newrev[:2], self.newrev)
+
+ return base_path
+
def _id(self):
return self.newrev
@@ -1703,25 +1717,89 @@
def isUpdateOf(self, other):
return False
+ def filterJobs(self, jobs):
+ return filter(lambda job: job.changeMatches(self), jobs)
-class NullChange(Changeish):
- # TODOv3(jeblair): remove this in favor of enqueueing Refs (eg
- # current master) instead.
- def __repr__(self):
- return '<NullChange for %s>' % (self.project)
+ def getRelatedChanges(self):
+ return set()
+
+ def updatesConfig(self):
+ return False
+
+ def getSafeAttributes(self):
+ return Attributes(project=self.project,
+ ref=self.ref,
+ oldrev=self.oldrev,
+ newrev=self.newrev)
+
+
+class Change(Ref):
+ """A proposed new state for a Project."""
+ def __init__(self, project):
+ super(Change, self).__init__(project)
+ self.branch = None
+ self.number = None
+ self.url = None
+ self.patchset = None
+ self.refspec = None
+
+ self.files = []
+ self.needs_changes = []
+ self.needed_by_changes = []
+ self.is_current_patchset = True
+ self.can_merge = False
+ self.is_merged = False
+ self.failed_to_merge = False
+ self.approvals = []
+ self.open = None
+ self.status = None
+ self.owner = None
def _id(self):
- return None
+ return '%s,%s' % (self.number, self.patchset)
+
+ def __repr__(self):
+ return '<Change 0x%x %s>' % (id(self), self._id())
+
+ def getBasePath(self):
+ if hasattr(self, 'refspec'):
+ return "%s/%s/%s" % (
+ self.number[-2:], self.number, self.patchset)
+ return super(Change, self).getBasePath()
def equals(self, other):
- if (self.project == other.project
- and other._id() is None):
+ if self.number == other.number and self.patchset == other.patchset:
return True
return False
def isUpdateOf(self, other):
+ if ((hasattr(other, 'number') and self.number == other.number) and
+ (hasattr(other, 'patchset') and
+ self.patchset is not None and
+ other.patchset is not None and
+ int(self.patchset) > int(other.patchset))):
+ return True
return False
+ def getRelatedChanges(self):
+ related = set()
+ for c in self.needs_changes:
+ related.add(c)
+ for c in self.needed_by_changes:
+ related.add(c)
+ related.update(c.getRelatedChanges())
+ return related
+
+ def updatesConfig(self):
+ if 'zuul.yaml' in self.files or '.zuul.yaml' in self.files:
+ return True
+ return False
+
+ def getSafeAttributes(self):
+ return Attributes(project=self.project,
+ number=self.number,
+ patchset=self.patchset)
+
class TriggerEvent(object):
"""Incoming event from an external system."""
@@ -2084,6 +2162,7 @@
self.name = name
self.merge_mode = None
self.pipelines = {}
+ self.private_key_file = None
class UnparsedAbideConfig(object):
@@ -2131,6 +2210,8 @@
self.project_templates = []
self.projects = {}
self.nodesets = []
+ self.secrets = []
+ self.semaphores = []
def copy(self):
r = UnparsedTenantConfig()
@@ -2139,6 +2220,8 @@
r.project_templates = copy.deepcopy(self.project_templates)
r.projects = copy.deepcopy(self.projects)
r.nodesets = copy.deepcopy(self.nodesets)
+ r.secrets = copy.deepcopy(self.secrets)
+ r.semaphores = copy.deepcopy(self.semaphores)
return r
def extend(self, conf):
@@ -2149,6 +2232,8 @@
for k, v in conf.projects.items():
self.projects.setdefault(k, []).extend(v)
self.nodesets.extend(conf.nodesets)
+ self.secrets.extend(conf.secrets)
+ self.semaphores.extend(conf.semaphores)
return
if not isinstance(conf, list):
@@ -2177,6 +2262,10 @@
self.pipelines.append(value)
elif key == 'nodeset':
self.nodesets.append(value)
+ elif key == 'secret':
+ self.secrets.append(value)
+ elif key == 'semaphore':
+ self.semaphores.append(value)
else:
raise Exception("Configuration item `%s` not recognized "
"(when parsing %s)" %
@@ -2199,6 +2288,8 @@
# inherit from the reference definition.
self.jobs = {'noop': [Job('noop')]}
self.nodesets = {}
+ self.secrets = {}
+ self.semaphores = {}
def getJob(self, name):
if name in self.jobs:
@@ -2232,6 +2323,16 @@
raise Exception("NodeSet %s already defined" % (nodeset.name,))
self.nodesets[nodeset.name] = nodeset
+ def addSecret(self, secret):
+ if secret.name in self.secrets:
+ raise Exception("Secret %s already defined" % (secret.name,))
+ self.secrets[secret.name] = secret
+
+ def addSemaphore(self, semaphore):
+ if semaphore.name in self.semaphores:
+ raise Exception("Semaphore %s already defined" % (semaphore.name,))
+ self.semaphores[semaphore.name] = semaphore
+
def addPipeline(self, pipeline):
self.pipelines[pipeline.name] = pipeline
@@ -2241,7 +2342,9 @@
def addProjectConfig(self, project_config):
self.project_configs[project_config.name] = project_config
- def _createJobGraph(self, change, job_list, job_graph):
+ def _createJobGraph(self, item, job_list, job_graph):
+ change = item.change
+ pipeline = item.pipeline
for jobname in job_list.jobs:
# This is the final job we are constructing
frozen_job = None
@@ -2263,7 +2366,7 @@
# If the job does not allow auth inheritance, do not allow
# the project-pipeline variants to update its execution
# attributes.
- if frozen_job.auth and not frozen_job.auth.get('inherit'):
+ if frozen_job.auth and not frozen_job.auth.inherit:
frozen_job.final = True
# Whether the change matches any of the project pipeline
# variants
@@ -2276,6 +2379,15 @@
# A change must match at least one project pipeline
# job variant.
continue
+ if (frozen_job.allowed_projects and
+ change.project.name not in frozen_job.allowed_projects):
+ raise Exception("Project %s is not allowed to run job %s" %
+ (change.project.name, frozen_job.name))
+ if ((not pipeline.allow_secrets) and frozen_job.auth and
+ frozen_job.auth.secrets):
+ raise Exception("Pipeline %s does not allow jobs with "
+ "secrets (job %s)" % (
+ pipeline.name, frozen_job.name))
job_graph.addJob(frozen_job)
def createJobGraph(self, item):
@@ -2287,10 +2399,99 @@
if project_config and item.pipeline.name in project_config.pipelines:
project_job_list = \
project_config.pipelines[item.pipeline.name].job_list
- self._createJobGraph(item.change, project_job_list, ret)
+ self._createJobGraph(item, project_job_list, ret)
return ret
+class Semaphore(object):
+ def __init__(self, name, max=1):
+ self.name = name
+ self.max = int(max)
+
+
+class SemaphoreHandler(object):
+ log = logging.getLogger("zuul.SemaphoreHandler")
+
+ def __init__(self):
+ self.semaphores = {}
+
+ def acquire(self, item, job):
+ if not job.semaphore:
+ return True
+
+ semaphore_key = job.semaphore
+
+ m = self.semaphores.get(semaphore_key)
+ if not m:
+ # The semaphore is not held, acquire it
+ self._acquire(semaphore_key, item, job.name)
+ return True
+ if (item, job.name) in m:
+ # This item already holds the semaphore
+ return True
+
+ # semaphore is there, check max
+ if len(m) < self._max_count(item, job.semaphore):
+ self._acquire(semaphore_key, item, job.name)
+ return True
+
+ return False
+
+ def release(self, item, job):
+ if not job.semaphore:
+ return
+
+ semaphore_key = job.semaphore
+
+ m = self.semaphores.get(semaphore_key)
+ if not m:
+ # The semaphore is not held, nothing to do
+ self.log.error("Semaphore can not be released for %s "
+ "because the semaphore is not held" %
+ item)
+ return
+ if (item, job.name) in m:
+ # This item is a holder of the semaphore
+ self._release(semaphore_key, item, job.name)
+ return
+ self.log.error("Semaphore can not be released for %s "
+ "which does not hold it" % item)
+
+ def _acquire(self, semaphore_key, item, job_name):
+ self.log.debug("Semaphore acquire {semaphore}: job {job}, item {item}"
+ .format(semaphore=semaphore_key,
+ job=job_name,
+ item=item))
+ if semaphore_key not in self.semaphores:
+ self.semaphores[semaphore_key] = []
+ self.semaphores[semaphore_key].append((item, job_name))
+
+ def _release(self, semaphore_key, item, job_name):
+ self.log.debug("Semaphore release {semaphore}: job {job}, item {item}"
+ .format(semaphore=semaphore_key,
+ job=job_name,
+ item=item))
+ sem_item = (item, job_name)
+ if sem_item in self.semaphores[semaphore_key]:
+ self.semaphores[semaphore_key].remove(sem_item)
+
+ # cleanup if there is no user of the semaphore anymore
+ if len(self.semaphores[semaphore_key]) == 0:
+ del self.semaphores[semaphore_key]
+
+ @staticmethod
+ def _max_count(item, semaphore_name):
+ if not item.current_build_set.layout:
+ # This should not occur as the layout of the item must already be
+ # built when acquiring or releasing a semaphore for a job.
+ raise Exception("Item {} has no layout".format(item))
+
+ # find the right semaphore
+ default_semaphore = Semaphore(semaphore_name, 1)
+ semaphores = item.current_build_set.layout.semaphores
+ return semaphores.get(semaphore_name, default_semaphore).max
+
+
class Tenant(object):
def __init__(self, name):
self.name = name
@@ -2311,6 +2512,8 @@
# A mapping of source -> {config_repos: {}, project_repos: {}}
self.sources = {}
+ self.semaphore_handler = SemaphoreHandler()
+
def addConfigRepo(self, source, project):
sd = self.sources.setdefault(source.name,
{'config_repos': {},
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 6df3f1b..5e25e7c 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -111,14 +111,10 @@
ret = ''
config = self.connection.sched.config
- if config.has_option('zuul', 'url_pattern'):
- url_pattern = config.get('zuul', 'url_pattern')
- else:
- url_pattern = None
for job in item.getJobs():
build = item.current_build_set.getBuild(job.name)
- (result, url) = item.formatJobResult(job, url_pattern)
+ (result, url) = item.formatJobResult(job)
if not job.voting:
voting = ' (non-voting)'
else:
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 7fb1568..0fa1763 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -33,68 +33,6 @@
from zuul import version as zuul_version
-class MutexHandler(object):
- log = logging.getLogger("zuul.MutexHandler")
-
- def __init__(self):
- self.mutexes = {}
-
- def acquire(self, item, job):
- if not job.mutex:
- return True
- mutex_name = job.mutex
- m = self.mutexes.get(mutex_name)
- if not m:
- # The mutex is not held, acquire it
- self._acquire(mutex_name, item, job.name)
- return True
- held_item, held_job_name = m
- if held_item is item and held_job_name == job.name:
- # This item already holds the mutex
- return True
- held_build = held_item.current_build_set.getBuild(held_job_name)
- if held_build and held_build.result:
- # The build that held the mutex is complete, release it
- # and let the new item have it.
- self.log.error("Held mutex %s being released because "
- "the build that holds it is complete" %
- (mutex_name,))
- self._release(mutex_name, item, job.name)
- self._acquire(mutex_name, item, job.name)
- return True
- return False
-
- def release(self, item, job):
- if not job.mutex:
- return
- mutex_name = job.mutex
- m = self.mutexes.get(mutex_name)
- if not m:
- # The mutex is not held, nothing to do
- self.log.error("Mutex can not be released for %s "
- "because the mutex is not held" %
- (item,))
- return
- held_item, held_job_name = m
- if held_item is item and held_job_name == job.name:
- # This item holds the mutex
- self._release(mutex_name, item, job.name)
- return
- self.log.error("Mutex can not be released for %s "
- "which does not hold it" %
- (item,))
-
- def _acquire(self, mutex_name, item, job_name):
- self.log.debug("Job %s of item %s acquiring mutex %s" %
- (job_name, item, mutex_name))
- self.mutexes[mutex_name] = (item, job_name)
-
- def _release(self, mutex_name, item, job_name):
- self.log.debug("Job %s of item %s releasing mutex %s" %
- (job_name, item, mutex_name))
- del self.mutexes[mutex_name]
-
-
class ManagementEvent(object):
"""An event that should be processed within the main queue run loop"""
def __init__(self):
@@ -269,7 +207,6 @@
self.connections = None
self.statsd = extras.try_import('statsd.statsd')
# TODO(jeblair): fix this
- self.mutex = MutexHandler()
# Despite triggers being part of the pipeline, there is one trigger set
# per scheduler. The pipeline handles the trigger filters but since
# the events are handled by the scheduler itself it needs to handle
@@ -452,6 +389,22 @@
os.mkdir(d)
return d
+ def _get_project_key_dir(self):
+ if self.config.has_option('zuul', 'state_dir'):
+ state_dir = os.path.expanduser(self.config.get('zuul',
+ 'state_dir'))
+ else:
+ state_dir = '/var/lib/zuul'
+ key_dir = os.path.join(state_dir, 'keys')
+ if not os.path.exists(key_dir):
+ os.mkdir(key_dir, 0o700)
+ st = os.stat(key_dir)
+ mode = st.st_mode & 0o777
+ if mode != 0o700:
+ raise Exception("Project key directory %s must be mode 0700; "
+ "current mode is %o" % (key_dir, mode))
+ return key_dir
+
def _save_queue(self):
pickle_file = self._get_queue_pickle_file()
events = []
@@ -507,6 +460,7 @@
loader = configloader.ConfigLoader()
abide = loader.loadConfig(
self.config.get('zuul', 'tenant_config'),
+ self._get_project_key_dir(),
self, self.merger, self.connections)
for tenant in abide.tenants.values():
self._reconfigureTenant(tenant)
@@ -523,6 +477,7 @@
loader = configloader.ConfigLoader()
abide = loader.reloadTenant(
self.config.get('zuul', 'tenant_config'),
+ self._get_project_key_dir(),
self, self.merger, self.connections,
self.abide, event.tenant)
tenant = abide.tenants[event.tenant.name]
@@ -575,19 +530,27 @@
except Exception:
self.log.exception(
"Exception while canceling build %s "
- "for change %s" % (build, item.change))
+ "for change %s" % (build, build.build_set.item.change))
finally:
- self.mutex.release(build.build_set.item, build.job)
+ tenant.semaphore_handler.release(
+ build.build_set.item, build.job)
def _reconfigureTenant(self, tenant):
# This is called from _doReconfigureEvent while holding the
# layout lock
old_tenant = self.abide.tenants.get(tenant.name)
+
if old_tenant:
+ # Copy over semaphore handler so we don't loose the currently
+ # held semaphores.
+ tenant.semaphore_handler = old_tenant.semaphore_handler
+
self._reenqueueTenant(old_tenant, tenant)
+
# TODOv3(jeblair): update for tenants
# self.maintainConnectionCache()
self.connections.reconfigureDrivers(tenant)
+
# TODOv3(jeblair): remove postconfig calls?
for pipeline in tenant.layout.pipelines.values():
pipeline.source.postConfig()
@@ -893,11 +856,6 @@
def formatStatusJSON(self, tenant_name):
# TODOv3(jeblair): use tenants
- if self.config.has_option('zuul', 'url_pattern'):
- url_pattern = self.config.get('zuul', 'url_pattern')
- else:
- url_pattern = None
-
data = {}
data['zuul_version'] = self.zuul_version
@@ -924,5 +882,5 @@
data['pipelines'] = pipelines
tenant = self.abide.tenants.get(tenant_name)
for pipeline in tenant.layout.pipelines.values():
- pipelines.append(pipeline.formatStatusJSON(url_pattern))
+ pipelines.append(pipeline.formatStatusJSON())
return json.dumps(data)
diff --git a/zuul/webapp.py b/zuul/webapp.py
index e16f0b4..4f040fa 100644
--- a/zuul/webapp.py
+++ b/zuul/webapp.py
@@ -23,6 +23,8 @@
import webob
from webob import dec
+from zuul.lib import encryption
+
"""Zuul main web app.
Zuul supports HTTP requests directly against it for determining the
@@ -34,6 +36,7 @@
queue / pipeline structure of the system
- /status.json (backwards compatibility): same as /status
- /status/change/X,Y: return status just for gerrit change X,Y
+ - /keys/SOURCE/PROJECT.pub: return the public key for PROJECT
When returning status for a single gerrit change you will get an
array of changes, they will not include the queue structure.
@@ -96,9 +99,31 @@
return m.group(1)
return None
+ def _handle_keys(self, request, path):
+ m = re.match('/keys/(.*?)/(.*?).pub', path)
+ if not m:
+ raise webob.exc.HTTPNotFound()
+ source_name = m.group(1)
+ project_name = m.group(2)
+ source = self.scheduler.connections.getSource(source_name)
+ if not source:
+ raise webob.exc.HTTPNotFound()
+ project = source.getProject(project_name)
+ if not project:
+ raise webob.exc.HTTPNotFound()
+
+ pem_public_key = encryption.serialize_rsa_public_key(
+ project.public_key)
+
+ response = webob.Response(body=pem_public_key,
+ content_type='text/plain')
+ return response.conditional_response_app
+
def app(self, request):
tenant_name = request.path.split('/')[1]
path = request.path.replace('/' + tenant_name, '')
+ if path.startswith('/keys'):
+ return self._handle_keys(request, path)
path = self._normalize_path(path)
if path is None:
raise webob.exc.HTTPNotFound()
diff --git a/zuul/zk.py b/zuul/zk.py
index 2009945..5cd7bee 100644
--- a/zuul/zk.py
+++ b/zuul/zk.py
@@ -34,36 +34,6 @@
pass
-class ZooKeeperConnectionConfig(object):
- '''
- Represents the connection parameters for a ZooKeeper server.
- '''
-
- def __eq__(self, other):
- if isinstance(other, ZooKeeperConnectionConfig):
- if other.__dict__ == self.__dict__:
- return True
- return False
-
- def __init__(self, host, port=2181, chroot=None):
- '''Initialize the ZooKeeperConnectionConfig object.
-
- :param str host: The hostname of the ZooKeeper server.
- :param int port: The port on which ZooKeeper is listening.
- Optional, default: 2181.
- :param str chroot: A chroot for this connection. All
- ZooKeeper nodes will be underneath this root path.
- Optional, default: None.
-
- (one per server) defining the ZooKeeper cluster servers. Only
- the 'host' attribute is required.'.
-
- '''
- self.host = host
- self.port = port
- self.chroot = chroot or ''
-
-
class ZooKeeper(object):
'''
Class implementing the ZooKeeper interface.
@@ -127,21 +97,20 @@
def resetLostFlag(self):
self._became_lost = False
- def connect(self, host_list, read_only=False):
+ def connect(self, hosts, read_only=False):
'''
Establish a connection with ZooKeeper cluster.
Convenience method if a pre-existing ZooKeeper connection is not
supplied to the ZooKeeper object at instantiation time.
- :param list host_list: A list of
- :py:class:`~nodepool.zk.ZooKeeperConnectionConfig` objects
- (one per server) defining the ZooKeeper cluster servers.
+ :param str hosts: Comma-separated list of hosts to connect to (e.g.
+ 127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
:param bool read_only: If True, establishes a read-only connection.
'''
if self.client is None:
- self.client = KazooClient(hosts=host_list, read_only=read_only)
+ self.client = KazooClient(hosts=hosts, read_only=read_only)
self.client.add_listener(self._connection_listener)
self.client.start()
@@ -157,16 +126,15 @@
self.client.close()
self.client = None
- def resetHosts(self, host_list):
+ def resetHosts(self, hosts):
'''
Reset the ZooKeeper cluster connection host list.
- :param list host_list: A list of
- :py:class:`~nodepool.zk.ZooKeeperConnectionConfig` objects
- (one per server) defining the ZooKeeper cluster servers.
+ :param str hosts: Comma-separated list of hosts to connect to (e.g.
+ 127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
'''
if self.client is not None:
- self.client.set_hosts(hosts=host_list)
+ self.client.set_hosts(hosts=hosts)
def submitNodeRequest(self, node_request, watcher):
'''