Merge "Re-enable test_live_reconfiguration_del_project" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index 98b880d..50223fa 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -31,7 +31,7 @@
 - job:
     name: tox-linters
     parent: tox
-    run: tox/docs
+    run: tox/linters
 
 - job:
     name: tox-py27
diff --git a/bindep.txt b/bindep.txt
index b34b158..6895444 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -4,6 +4,7 @@
 mysql-client [test]
 mysql-server [test]
 libjpeg-dev [test]
+openssl [test]
 zookeeperd [platform:dpkg]
 build-essential [platform:dpkg]
 gcc [platform:rpm]
diff --git a/doc/source/datamodel.rst b/doc/source/developer/datamodel.rst
similarity index 93%
rename from doc/source/datamodel.rst
rename to doc/source/developer/datamodel.rst
index 9df6505..2996ff4 100644
--- a/doc/source/datamodel.rst
+++ b/doc/source/developer/datamodel.rst
@@ -26,12 +26,12 @@
 
 A :py:class:`~zuul.model.Job` represents the definition of what to do. A
 :py:class:`~zuul.model.Build` represents a single run of a
-:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobTree` is used to
+:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobGraph` is used to
 encapsulate the dependencies between one or more :py:class:`~zuul.model.Job`
 objects.
 
 .. autoclass:: zuul.model.Job
-.. autoclass:: zuul.model.JobTree
+.. autoclass:: zuul.model.JobGraph
 .. autoclass:: zuul.model.Build
 
 The :py:class:`~zuul.manager.base.PipelineManager` enqueues each
@@ -48,7 +48,6 @@
 Changes
 ~~~~~~~
 
-.. autoclass:: zuul.model.Changeish
 .. autoclass:: zuul.model.Change
 .. autoclass:: zuul.model.Ref
 
diff --git a/doc/source/drivers.rst b/doc/source/developer/drivers.rst
similarity index 100%
rename from doc/source/drivers.rst
rename to doc/source/developer/drivers.rst
diff --git a/doc/source/developer.rst b/doc/source/developer/index.rst
similarity index 95%
rename from doc/source/developer.rst
rename to doc/source/developer/index.rst
index 527ea6e..986bbe4 100644
--- a/doc/source/developer.rst
+++ b/doc/source/developer/index.rst
@@ -12,4 +12,5 @@
 
    datamodel
    drivers
+   triggers
    testing
diff --git a/doc/source/testing.rst b/doc/source/developer/testing.rst
similarity index 92%
rename from doc/source/testing.rst
rename to doc/source/developer/testing.rst
index 092754f..4a813d0 100644
--- a/doc/source/testing.rst
+++ b/doc/source/developer/testing.rst
@@ -19,7 +19,7 @@
 .. autoclass:: tests.base.FakeGearmanServer
    :members:
 
-.. autoclass:: tests.base.RecordingLaunchServer
+.. autoclass:: tests.base.RecordingExecutorServer
    :members:
 
 .. autoclass:: tests.base.FakeBuild
diff --git a/doc/source/developer/triggers.rst b/doc/source/developer/triggers.rst
new file mode 100644
index 0000000..56f4a03
--- /dev/null
+++ b/doc/source/developer/triggers.rst
@@ -0,0 +1,19 @@
+Triggers
+========
+
+Triggers must inherit from :py:class:`~zuul.trigger.BaseTrigger` and, at a minimum,
+implement the :py:meth:`~zuul.trigger.BaseTrigger.getEventFilters` method.
+
+.. autoclass:: zuul.trigger.BaseTrigger
+   :members:
+
+Current list of triggers are:
+
+.. autoclass:: zuul.driver.gerrit.gerrittrigger.GerritTrigger
+   :members:
+
+.. autoclass:: zuul.driver.timer.timertrigger.TimerTrigger
+   :members:
+
+.. autoclass:: zuul.driver.zuul.zuultrigger.ZuulTrigger
+   :members:
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 3f903db..fb30b92 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -24,7 +24,7 @@
    executors
    statsd
    client
-   developer
+   developer/index
 
 Indices and tables
 ==================
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
index e4ce737..56cc6a8 100644
--- a/doc/source/zuul.rst
+++ b/doc/source/zuul.rst
@@ -124,13 +124,6 @@
   optional value and ``1`` is used by default.
   ``status_expiry=1``
 
-**url_pattern**
-  If you are storing build logs external to the system that originally
-  ran jobs and wish to link to those logs when Zuul makes comments on
-  Gerrit changes for completed jobs this setting configures what the
-  URLs for those links should be.  Used by zuul-server only.
-  ``http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}``
-
 **job_name_in_report**
   Boolean value (``true`` or ``false``) that indicates whether the
   job name should be included in the report (normally only the URL
@@ -644,10 +637,12 @@
   would largely defeat the parallelization of dependent change testing
   that is the main feature of Zuul.  Default: ``false``.
 
-**mutex (optional)**
-  This is a string that names a mutex that should be observed by this
-  job.  Only one build of any job that references the same named mutex
-  will be enqueued at a time.  This applies across all pipelines.
+**semaphore (optional)**
+  This is a string that names a semaphore that should be observed by this
+  job.  The semaphore defines how many jobs which reference that semaphore
+  can be enqueued at a time.  This applies across all pipelines in the same
+  tenant.  The max value of the semaphore can be specified in the config
+  repositories and defaults to 1.
 
 **branch (optional)**
   This job should only be run on matching branches.  This field is
@@ -850,6 +845,21 @@
 or specified in the project itself, the configuration defined by
 either the last template or the project itself will take priority.
 
+
+Semaphores
+""""""""""
+
+When using semaphores the maximum value of each one can be specified in their
+respective config repositories.  Unspecified semaphores default to 1::
+
+  - semaphore:
+      name: semaphore-foo
+      max: 5
+  - semaphore:
+      name: semaphore-bar
+      max: 3
+
+
 logging.conf
 ~~~~~~~~~~~~
 This file is optional.  If provided, it should be a standard
diff --git a/setup.cfg b/setup.cfg
index 86ebf65..9ee64f3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -31,6 +31,7 @@
 source-dir = doc/source
 build-dir = doc/build
 all_files = 1
+warning-is-error = 1
 
 [extras]
 mysql_reporter=
diff --git a/test-requirements.txt b/test-requirements.txt
index b99c803..6262a02 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,7 +1,7 @@
 hacking>=0.12.0,!=0.13.0,<0.14  # Apache-2.0
 
 coverage>=3.6
-sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+sphinx>=1.5.1
 sphinxcontrib-blockdiag>=1.1.0
 fixtures>=0.3.14
 python-keystoneclient>=0.4.2
diff --git a/tests/base.py b/tests/base.py
index 2816b9f..9bd44f6 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -664,8 +664,8 @@
         """Return whether this build has certain changes in its git repos.
 
         :arg FakeChange changes: One or more changes (varargs) that
-        are expected to be present (in order) in the git repository of
-        the active project.
+            are expected to be present (in order) in the git repository of
+            the active project.
 
         :returns: Whether the build has the indicated changes.
         :rtype: bool
@@ -1970,7 +1970,7 @@
             this method.
 
         :arg str connection: The name of the connection corresponding
-        to the gerrit server.
+            to the gerrit server.
         :arg str event: The JSON-encoded event.
 
         """
diff --git a/tests/encrypt_secret.py b/tests/encrypt_secret.py
index ab45018..b8524a0 100644
--- a/tests/encrypt_secret.py
+++ b/tests/encrypt_secret.py
@@ -15,10 +15,7 @@
 import sys
 import os
 
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.asymmetric import padding
-from cryptography.hazmat.primitives import serialization
-from cryptography.hazmat.primitives import hashes
+from zuul.lib import encryption
 
 FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
                            'fixtures')
@@ -27,24 +24,10 @@
 def main():
     private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
     with open(private_key_file, "rb") as f:
-        private_key = serialization.load_pem_private_key(
-            f.read(),
-            password=None,
-            backend=default_backend()
-        )
+        private_key, public_key = \
+            encryption.deserialize_rsa_keypair(f.read())
 
-    # Extract public key from private
-    public_key = private_key.public_key()
-
-    # https://cryptography.io/en/stable/hazmat/primitives/asymmetric/rsa/#encryption
-    ciphertext = public_key.encrypt(
-        sys.argv[1],
-        padding.OAEP(
-            mgf=padding.MGF1(algorithm=hashes.SHA1()),
-            algorithm=hashes.SHA1(),
-            label=None
-        )
-    )
+    ciphertext = encryption.encrypt_pkcs1_oaep(sys.argv[1], public_key)
     print(ciphertext.encode('base64'))
 
 if __name__ == '__main__':
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
index 45acb87..3371a20 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/python27.yaml
@@ -6,5 +6,8 @@
     - copy:
         src: "{{zuul._test.test_root}}/{{zuul.uuid}}.flag"
         dest: "{{zuul._test.test_root}}/{{zuul.uuid}}.copied"
+    - copy:
+        content: "{{test_secret.username}} {{test_secret.password}}"
+        dest: "{{zuul._test.test_root}}/{{zuul.uuid}}.secrets"
   roles:
     - bare-role
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index c9fba3e..0980bc1 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -2,6 +2,7 @@
     name: check
     manager: independent
     source: gerrit
+    allow-secrets: true
     trigger:
       gerrit:
         - event: patchset-created
@@ -38,7 +39,7 @@
     name: test_secret
     data:
       username: test-username
-      password: !encrypted/pkcs1 |
+      password: !encrypted/pkcs1-oaep |
         BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi1wGZZ
         L0LveZjUN0t6OU1VZKSG8R5Ly7urjaSo1pPVIq5Rtt/H7W14Lecd+cUeKb4joeusC9drN3AA8a4o
         ykcVpt1wVqUnTbMGC9ARMCQP6eopcs1l7tzMseprW4RDNhIuz3CRgd0QBMPl6VDoFgBPB8vxtJw+
@@ -57,6 +58,9 @@
       flagpath: '{{zuul._test.test_root}}/{{zuul.uuid}}.flag'
     roles:
       - zuul: bare-role
+    auth:
+      secrets:
+        - test_secret
 
 - job:
     parent: python27
diff --git a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
index d6f083d..60cd434 100644
--- a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
@@ -6,3 +6,7 @@
     tenant-one-gate:
       jobs:
         - project-test1
+
+- semaphore:
+    name: test-semaphore
+    max: 1
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml
new file mode 100644
index 0000000..d18ed46
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/common-config/zuul.yaml
@@ -0,0 +1,13 @@
+- pipeline:
+    name: check
+    manager: independent
+    source: gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README b/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README b/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/playbooks/project1-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/playbooks/project1-test1.yaml
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml
new file mode 100644
index 0000000..5e377e7
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-one-config/zuul.yaml
@@ -0,0 +1,13 @@
+- job:
+    name: project1-test1
+    semaphore: test-semaphore
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - project1-test1
+
+- semaphore:
+    name: test-semaphore
+    max: 1
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/playbooks/project2-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/playbooks/project2-test1.yaml
diff --git a/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml
new file mode 100644
index 0000000..a310532
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/git/tenant-two-config/zuul.yaml
@@ -0,0 +1,13 @@
+- job:
+    name: project2-test1
+    semaphore: test-semaphore
+
+- project:
+    name: org/project2
+    check:
+      jobs:
+        - project2-test1
+
+- semaphore:
+    name: test-semaphore
+    max: 2
diff --git a/tests/fixtures/config/multi-tenant-semaphore/main.yaml b/tests/fixtures/config/multi-tenant-semaphore/main.yaml
new file mode 100644
index 0000000..b1c47b1
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant-semaphore/main.yaml
@@ -0,0 +1,15 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-repos:
+          - common-config
+          - tenant-one-config
+
+- tenant:
+    name: tenant-two
+    source:
+      gerrit:
+        config-repos:
+          - common-config
+          - tenant-two-config
diff --git a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
index 47c173d..dff18de 100644
--- a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
@@ -151,6 +151,15 @@
 
 - project:
     name: org/project2
+    check:
+      jobs:
+        - project-merge
+        - project-test1:
+            dependencies: project-merge
+        - project-test2:
+            dependencies: project-merge
+        - project1-project2-integration:
+            dependencies: project-merge
     gate:
       queue: integrated
       jobs:
diff --git a/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml
new file mode 100644
index 0000000..4010372
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-ignore-dependencies/zuul.yaml
@@ -0,0 +1,66 @@
+- pipeline:
+    name: check
+    manager: independent
+    ignore-dependencies: true
+    source: gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name: project1-merge
+
+- job:
+    name: project1-test1
+
+- job:
+    name: project1-test2
+
+- job:
+    name: project2-merge
+
+- job:
+    name: project2-test1
+
+- job:
+    name: project2-test2
+
+- job:
+    name: project1-project2-integration
+    queue-name: integration
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - project1-merge
+        - project1-test1:
+            dependencies:
+              - project1-merge
+        - project1-test2:
+            dependencies:
+              - project1-merge
+        - project1-project2-integration:
+            dependencies:
+              - project1-merge
+
+- project:
+    name: org/project2
+    check:
+      jobs:
+        - project2-merge
+        - project2-test1:
+            dependencies:
+              - project2-merge
+        - project2-test2:
+            dependencies:
+              - project2-merge
+        - project1-project2-integration:
+            dependencies:
+              - project2-merge
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml b/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml
deleted file mode 100644
index f679dce..0000000
--- a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-two.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-- hosts: all
-  tasks: []
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml
deleted file mode 100644
index bb92b7a..0000000
--- a/tests/fixtures/config/single-tenant/git/layout-mutex/zuul.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-- pipeline:
-    name: check
-    manager: independent
-    source: gerrit
-    trigger:
-      gerrit:
-        - event: patchset-created
-    success:
-      gerrit:
-        verified: 1
-    failure:
-      gerrit:
-        verified: -1
-
-- job:
-    name: project-test1
-
-- job:
-    name: mutex-one
-    mutex: test-mutex
-
-- job:
-    name: mutex-two
-    mutex: test-mutex
-
-- project:
-    name: org/project
-    check:
-      jobs:
-        - project-test1
-        - mutex-one
-        - mutex-two
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-merge.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
rename to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-merge.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test1.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
rename to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-rate-limit/playbooks/project-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml
new file mode 100644
index 0000000..c4e00f6
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-rate-limit/zuul.yaml
@@ -0,0 +1,47 @@
+- pipeline:
+    name: gate
+    manager: dependent
+    failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
+    source: gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    start:
+      gerrit:
+        verified: 0
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    window: 2
+    window-floor: 1
+    window-increase-type: linear
+    window-increase-factor: 1
+    window-decrease-type: exponential
+    window-decrease-factor: 2
+
+- job:
+    name: project-merge
+
+- job:
+    name: project-test1
+
+- job:
+    name: project-test2
+
+- project:
+    name: org/project
+    gate:
+      jobs:
+        - project-merge
+        - project-test1:
+            dependencies:
+              - project-merge
+        - project-test2:
+            dependencies:
+              - project-merge
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/playbooks/project-test1.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/playbooks/project-test1.yaml
rename to tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/zuul.yaml
similarity index 100%
rename from tests/fixtures/config/single-tenant/git/layout-mutex-reconfiguration/zuul.yaml
rename to tests/fixtures/config/single-tenant/git/layout-semaphore-reconfiguration/zuul.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/project-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/project-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-one-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test1.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/project-test1.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test1.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test2.yaml
similarity index 100%
copy from tests/fixtures/config/single-tenant/git/layout-mutex/playbooks/mutex-one.yaml
copy to tests/fixtures/config/single-tenant/git/layout-semaphore/playbooks/semaphore-two-test2.yaml
diff --git a/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml b/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml
new file mode 100644
index 0000000..f935112
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/layout-semaphore/zuul.yaml
@@ -0,0 +1,52 @@
+- pipeline:
+    name: check
+    manager: independent
+    source: gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name: project-test1
+
+- job:
+    name: semaphore-one-test1
+    semaphore: test-semaphore
+
+- job:
+    name: semaphore-one-test2
+    semaphore: test-semaphore
+
+- job:
+    name: semaphore-two-test1
+    semaphore: test-semaphore-two
+
+- job:
+    name: semaphore-two-test2
+    semaphore: test-semaphore-two
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - project-test1
+        - semaphore-one-test1
+        - semaphore-one-test2
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - project-test1
+        - semaphore-two-test1
+        - semaphore-two-test2
+
+- semaphore:
+    name: test-semaphore-two
+    max: 2
diff --git a/tests/fixtures/config/single-tenant/main.yaml b/tests/fixtures/config/single-tenant/main.yaml
index a22ed5c..d9868fa 100644
--- a/tests/fixtures/config/single-tenant/main.yaml
+++ b/tests/fixtures/config/single-tenant/main.yaml
@@ -4,3 +4,5 @@
       gerrit:
         config-repos:
           - common-config
+        project-repos:
+          - org/project
diff --git a/tests/fixtures/config/success-url/git/common-config/zuul.yaml b/tests/fixtures/config/success-url/git/common-config/zuul.yaml
index f2d5251..b3ecf6d 100644
--- a/tests/fixtures/config/success-url/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/success-url/git/common-config/zuul.yaml
@@ -19,7 +19,7 @@
 
 - job:
     name: docs-draft-test
-    success-url: http://docs-draft.example.org/{build.parameters[LOG_PATH]}/publish-docs/
+    success-url: http://docs-draft.example.org/{change.number:.2}/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.uuid:.7}/publish-docs/
 
 - job:
     name: docs-draft-test2
diff --git a/tests/fixtures/layout-ignore-dependencies.yaml b/tests/fixtures/layout-ignore-dependencies.yaml
deleted file mode 100644
index 5c0257c..0000000
--- a/tests/fixtures/layout-ignore-dependencies.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-pipelines:
-  - name: check
-    manager: IndependentPipelineManager
-    ignore-dependencies: true
-    trigger:
-      gerrit:
-        - event: patchset-created
-    success:
-      gerrit:
-        verified: 1
-    failure:
-      gerrit:
-        verified: -1
-
-projects:
-  - name: org/project1
-    check:
-      - project1-merge:
-        - project1-test1
-        - project1-test2
-        - project1-project2-integration
-
-  - name: org/project2
-    check:
-      - project2-merge:
-        - project2-test1
-        - project2-test2
-        - project1-project2-integration
diff --git a/tests/fixtures/layout-mutex-reconfiguration.yaml b/tests/fixtures/layout-mutex-reconfiguration.yaml
deleted file mode 100644
index 76cf1e9..0000000
--- a/tests/fixtures/layout-mutex-reconfiguration.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-pipelines:
-  - name: check
-    manager: IndependentPipelineManager
-    trigger:
-      gerrit:
-        - event: patchset-created
-    success:
-      gerrit:
-        verified: 1
-    failure:
-      gerrit:
-        verified: -1
-
-jobs:
-  - name: mutex-one
-    mutex: test-mutex
-  - name: mutex-two
-    mutex: test-mutex
-
-projects:
-  - name: org/project
-    check:
-      - project-test1
diff --git a/tests/fixtures/layout-rate-limit.yaml b/tests/fixtures/layout-rate-limit.yaml
deleted file mode 100644
index 9f6748c..0000000
--- a/tests/fixtures/layout-rate-limit.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-pipelines:
-  - name: gate
-    manager: DependentPipelineManager
-    failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
-    trigger:
-      gerrit:
-        - event: comment-added
-          approval:
-            - approved: 1
-    start:
-      gerrit:
-        verified: 0
-    success:
-      gerrit:
-        verified: 2
-        submit: true
-    failure:
-      gerrit:
-        verified: -2
-    window: 2
-    window-floor: 1
-    window-increase-type: linear
-    window-increase-factor: 1
-    window-decrease-type: exponential
-    window-decrease-factor: 2
-
-projects:
-  - name: org/project
-    gate:
-      - project-merge:
-        - project-test1
-        - project-test2
diff --git a/tests/fixtures/public.pem b/tests/fixtures/public.pem
new file mode 100644
index 0000000..33a78c4
--- /dev/null
+++ b/tests/fixtures/public.pem
@@ -0,0 +1,14 @@
+-----BEGIN PUBLIC KEY-----
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsGqZLUUwV/EZJKddMS20
+6mH7qYmqYhWLo/TUlpDt2JuEaBqCYV8mF9LsjpoqM/Pp0U/r5aQLDUXbRLDn+K+N
+qbvTJajYxHJicP1CAWg1eKUNZjUaya5HP4Ow1hS7AeiF4TSRdiwtHT/gJO2NSsav
+yc30/meKt0WBgbYlrBB81HEQjYWnajf/4so5E8DdrC9tAqmmzde1qcTz7ULouIz5
+3hjp/U3yVMFbpawv194jzHvddmAX3aEUByx2t6lP7dhOAEIEmzmh15hRbacxQI5a
+YWv+ZR0z9PqdwwD+DBbb1AwiX5MJjtIoVCmkEZvcUFiDicyteNMCa5ulpj2SF0oH
+4MlialOP6MiJnmxklDYO07AM/qomcU55pCD8ctu1yD/UydecLk0Uj/9XxqmPQJFE
+cstdXJZQfr5ZNnChOEg6oQ9UImWjav8HQsA6mFW1oAKbDMrgEewooWriqGW5pYtR
+7JBfph6Mt5HGaeH4uqYpb1fveHG1ODa7HBnlNo3qMendBb2wzHGCgtUgWnGfp24T
+sUOUlndCXYhsYbOZbCTW5GwElK0Gri06KPpybY43AIaxcxqilVh5Eapmq7axBm4Z
+zbTOfv15L0FIemEGgpnklevQbZNLIrcE0cS/13qJUvFaYX4yjrtEnzZ3ntjXrpFd
+gLPBKn7Aqf6lWz6BPi07axECAwEAAQ==
+-----END PUBLIC KEY-----
diff --git a/tests/fixtures/zuul-connections-multiple-gerrits.conf b/tests/fixtures/zuul-connections-multiple-gerrits.conf
index b3182d7..d1522ec 100644
--- a/tests/fixtures/zuul-connections-multiple-gerrits.conf
+++ b/tests/fixtures/zuul-connections-multiple-gerrits.conf
@@ -3,7 +3,6 @@
 
 [zuul]
 tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
 [merger]
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
index 6156df4..8ddd0f1 100644
--- a/tests/fixtures/zuul-connections-same-gerrit.conf
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -3,7 +3,6 @@
 
 [zuul]
 tenant_config=config/zuul-connections-same-gerrit/main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
 [merger]
diff --git a/tests/fixtures/zuul-git-driver.conf b/tests/fixtures/zuul-git-driver.conf
index 0a4e230..499b564 100644
--- a/tests/fixtures/zuul-git-driver.conf
+++ b/tests/fixtures/zuul-git-driver.conf
@@ -3,7 +3,6 @@
 
 [zuul]
 tenant_config=config/zuul-connections-same-gerrit/main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
 [merger]
diff --git a/tests/fixtures/zuul-sql-driver-bad.conf b/tests/fixtures/zuul-sql-driver-bad.conf
index d91e2f6..a4df735 100644
--- a/tests/fixtures/zuul-sql-driver-bad.conf
+++ b/tests/fixtures/zuul-sql-driver-bad.conf
@@ -2,8 +2,7 @@
 server=127.0.0.1
 
 [zuul]
-tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+layout_config=layout-connections-multiple-voters.yaml
 job_name_in_report=true
 
 [merger]
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index ce29310..cd80a45 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -3,7 +3,6 @@
 
 [zuul]
 tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
 [merger]
diff --git a/tests/make_playbooks.py b/tests/make_playbooks.py
index 17acba8..93c37bc 100755
--- a/tests/make_playbooks.py
+++ b/tests/make_playbooks.py
@@ -14,7 +14,7 @@
 
 import os
 
-import yaml
+from zuul.lib import yamlutil as yaml
 
 FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
                            'fixtures')
diff --git a/tests/unit/test_encryption.py b/tests/unit/test_encryption.py
new file mode 100644
index 0000000..4dda78b
--- /dev/null
+++ b/tests/unit/test_encryption.py
@@ -0,0 +1,69 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import subprocess
+import tempfile
+
+from zuul.lib import encryption
+
+from tests.base import BaseTestCase
+
+
+class TestEncryption(BaseTestCase):
+
+    def setUp(self):
+        super(TestEncryption, self).setUp()
+        self.private, self.public = encryption.generate_rsa_keypair()
+
+    def test_serialization(self):
+        "Verify key serialization"
+        pem_private = encryption.serialize_rsa_private_key(self.private)
+        private2, public2 = encryption.deserialize_rsa_keypair(pem_private)
+
+        # cryptography public / private key objects don't implement
+        # equality testing, so we make sure they have the same numbers.
+        self.assertEqual(self.private.private_numbers(),
+                         private2.private_numbers())
+        self.assertEqual(self.public.public_numbers(),
+                         public2.public_numbers())
+
+    def test_pkcs1_oaep(self):
+        "Verify encryption and decryption"
+        orig_plaintext = "some text to encrypt"
+        ciphertext = encryption.encrypt_pkcs1_oaep(orig_plaintext, self.public)
+        plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
+        self.assertEqual(orig_plaintext, plaintext)
+
+    def test_openssl_pkcs1_oaep(self):
+        "Verify that we can decrypt something encrypted with OpenSSL"
+        orig_plaintext = "some text to encrypt"
+        pem_public = encryption.serialize_rsa_public_key(self.public)
+        public_file = tempfile.NamedTemporaryFile(delete=False)
+        try:
+            public_file.write(pem_public)
+            public_file.close()
+
+            p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
+                                  '-oaep', '-pubin', '-inkey',
+                                  public_file.name],
+                                 stdin=subprocess.PIPE,
+                                 stdout=subprocess.PIPE)
+            (stdout, stderr) = p.communicate(orig_plaintext)
+            ciphertext = stdout
+        finally:
+            os.unlink(public_file.name)
+
+        plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
+        self.assertEqual(orig_plaintext, plaintext)
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 335d7c3..2167a3b 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -18,19 +18,37 @@
 
 import fixtures
 import testtools
-import yaml
 
 from zuul import model
 from zuul import configloader
+from zuul.lib import encryption
+from zuul.lib import yamlutil as yaml
 
-from tests.base import BaseTestCase
+from tests.base import BaseTestCase, FIXTURE_DIR
+
+
+class FakeSource(object):
+    def __init__(self, name):
+        self.name = name
 
 
 class TestJob(BaseTestCase):
 
     def setUp(self):
         super(TestJob, self).setUp()
-        self.project = model.Project('project', None)
+        self.tenant = model.Tenant('tenant')
+        self.layout = model.Layout()
+        self.project = model.Project('project', 'connection')
+        self.source = FakeSource('connection')
+        self.tenant.addProjectRepo(self.source, self.project)
+        self.pipeline = model.Pipeline('gate', self.layout)
+        self.layout.addPipeline(self.pipeline)
+        self.queue = model.ChangeQueue(self.pipeline)
+
+        private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
+        with open(private_key_file, "rb") as f:
+            self.project.private_key, self.project.public_key = \
+                encryption.deserialize_rsa_keypair(f.read())
         self.context = model.SourceContext(self.project, 'master',
                                            'test', True)
         self.start_mark = yaml.Mark('name', 0, 0, 0, '', 0)
@@ -73,7 +91,7 @@
         base.pre_run = [base_pre]
         base.run = [base_run]
         base.post_run = [base_post]
-        base.auth = dict(foo='bar', inherit=False)
+        base.auth = model.AuthContext()
 
         py27 = model.Job('py27')
         self.assertEqual(None, py27.timeout)
@@ -85,7 +103,7 @@
                          [x.path for x in py27.run])
         self.assertEqual(['base-post'],
                          [x.path for x in py27.post_run])
-        self.assertEqual({}, py27.auth)
+        self.assertEqual(None, py27.auth)
 
     def test_job_variants(self):
         # This simulates freezing a job.
@@ -99,7 +117,8 @@
         py27.pre_run = [py27_pre]
         py27.run = [py27_run]
         py27.post_run = [py27_post]
-        auth = dict(foo='bar', inherit=False)
+        auth = model.AuthContext()
+        auth.secrets.append('foo')
         py27.auth = auth
 
         job = py27.copy()
@@ -307,7 +326,7 @@
     name: pypi-credentials
     data:
       username: test-username
-      password: !encrypted/pkcs1 |
+      password: !encrypted/pkcs1-oaep |
         BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi1wGZZ
         L0LveZjUN0t6OU1VZKSG8R5Ly7urjaSo1pPVIq5Rtt/H7W14Lecd+cUeKb4joeusC9drN3AA8a4o
         ykcVpt1wVqUnTbMGC9ARMCQP6eopcs1l7tzMseprW4RDNhIuz3CRgd0QBMPl6VDoFgBPB8vxtJw+
@@ -401,11 +420,11 @@
             })
         layout.addJob(in_repo_job_with_inherit_false)
 
-        self.assertNotIn('auth', in_repo_job_without_inherit.auth)
-        self.assertIn('secrets', in_repo_job_with_inherit.auth)
-        self.assertEquals(in_repo_job_with_inherit.auth['secrets'],
-                          ['pypi-credentials'])
-        self.assertNotIn('auth', in_repo_job_with_inherit_false.auth)
+        self.assertEqual(None, in_repo_job_without_inherit.auth)
+        self.assertEqual(1, len(in_repo_job_with_inherit.auth.secrets))
+        self.assertEqual(in_repo_job_with_inherit.auth.secrets[0].name,
+                         'pypi-credentials')
+        self.assertEqual(None, in_repo_job_with_inherit_false.auth)
 
     def test_job_inheritance_job_tree(self):
         tenant = model.Tenant('tenant')
@@ -560,6 +579,80 @@
                 "to shadow job base in base_project"):
             layout.addJob(base2)
 
+    def test_job_allowed_projects(self):
+        job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+            '_source_context': self.context,
+            '_start_mark': self.start_mark,
+            'name': 'job',
+            'allowed-projects': ['project'],
+        })
+        self.layout.addJob(job)
+
+        project2 = model.Project('project2', None)
+        context2 = model.SourceContext(project2, 'master',
+                                       'test', True)
+
+        project2_config = configloader.ProjectParser.fromYaml(
+            self.tenant, self.layout, [{
+                '_source_context': context2,
+                '_start_mark': self.start_mark,
+                'name': 'project2',
+                'gate': {
+                    'jobs': [
+                        'job'
+                    ]
+                }
+            }]
+        )
+        self.layout.addProjectConfig(project2_config)
+
+        change = model.Change(project2)
+        # Test master
+        change.branch = 'master'
+        item = self.queue.enqueueChange(change)
+        item.current_build_set.layout = self.layout
+        with testtools.ExpectedException(
+                Exception,
+                "Project project2 is not allowed to run job job"):
+            item.freezeJobGraph()
+
+    def test_job_pipeline_allow_secrets(self):
+        self.pipeline.allow_secrets = False
+        job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+            '_source_context': self.context,
+            '_start_mark': self.start_mark,
+            'name': 'job',
+        })
+        auth = model.AuthContext()
+        auth.secrets.append('foo')
+        job.auth = auth
+
+        self.layout.addJob(job)
+
+        project_config = configloader.ProjectParser.fromYaml(
+            self.tenant, self.layout, [{
+                '_source_context': self.context,
+                '_start_mark': self.start_mark,
+                'name': 'project',
+                'gate': {
+                    'jobs': [
+                        'job'
+                    ]
+                }
+            }]
+        )
+        self.layout.addProjectConfig(project_config)
+
+        change = model.Change(self.project)
+        # Test master
+        change.branch = 'master'
+        item = self.queue.enqueueChange(change)
+        item.current_build_set.layout = self.layout
+        with testtools.ExpectedException(
+                Exception,
+                "Pipeline gate does not allow jobs with secrets"):
+            item.freezeJobGraph()
+
 
 class TestJobTimeData(BaseTestCase):
     def setUp(self):
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 6f2e7ce..e8954df 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -15,6 +15,8 @@
 # under the License.
 
 import json
+import textwrap
+
 import os
 import re
 import shutil
@@ -2177,58 +2179,68 @@
         self.assertEqual('https://server/job/project-test2/0/',
                          status_jobs[2]['report_url'])
 
-    def test_mutex(self):
-        "Test job mutexes"
-        self.updateConfigLayout('layout-mutex')
+    def test_semaphore_one(self):
+        "Test semaphores with max=1 (mutex)"
+        self.updateConfigLayout('layout-semaphore')
         self.sched.reconfigure(self.config)
 
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
+
         self.executor_server.hold_jobs_in_build = True
+
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
+
         self.assertEqual(len(self.builds), 3)
         self.assertEqual(self.builds[0].name, 'project-test1')
-        self.assertEqual(self.builds[1].name, 'mutex-one')
+        self.assertEqual(self.builds[1].name, 'semaphore-one-test1')
         self.assertEqual(self.builds[2].name, 'project-test1')
 
-        self.executor_server.release('mutex-one')
+        self.executor_server.release('semaphore-one-test1')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test1')
-        self.assertEqual(self.builds[2].name, 'mutex-two')
-        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+        self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
 
-        self.executor_server.release('mutex-two')
+        self.executor_server.release('semaphore-one-test2')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test1')
-        self.assertEqual(self.builds[2].name, 'mutex-one')
-        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+        self.assertEqual(self.builds[2].name, 'semaphore-one-test1')
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
 
-        self.executor_server.release('mutex-one')
+        self.executor_server.release('semaphore-one-test1')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test1')
-        self.assertEqual(self.builds[2].name, 'mutex-two')
-        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+        self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
 
-        self.executor_server.release('mutex-two')
+        self.executor_server.release('semaphore-one-test2')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 2)
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test1')
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
@@ -2238,25 +2250,115 @@
 
         self.assertEqual(A.reported, 1)
         self.assertEqual(B.reported, 1)
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
-    def test_mutex_abandon(self):
-        "Test abandon with job mutexes"
-        self.updateConfigLayout('layout-mutex')
+    def test_semaphore_two(self):
+        "Test semaphores with max>1"
+        self.updateConfigLayout('layout-semaphore')
         self.sched.reconfigure(self.config)
 
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
+
+        self.executor_server.hold_jobs_in_build = True
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+        self.assertFalse('test-semaphore-two' in
+                         tenant.semaphore_handler.semaphores)
+
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 4)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'semaphore-two-test1')
+        self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
+        self.assertEqual(self.builds[3].name, 'project-test1')
+        self.assertTrue('test-semaphore-two' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore-two', [])), 2)
+
+        self.executor_server.release('semaphore-two-test1')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 4)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'semaphore-two-test2')
+        self.assertEqual(self.builds[2].name, 'project-test1')
+        self.assertEqual(self.builds[3].name, 'semaphore-two-test1')
+        self.assertTrue('test-semaphore-two' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore-two', [])), 2)
+
+        self.executor_server.release('semaphore-two-test2')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 4)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertEqual(self.builds[2].name, 'semaphore-two-test1')
+        self.assertEqual(self.builds[3].name, 'semaphore-two-test2')
+        self.assertTrue('test-semaphore-two' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore-two', [])), 2)
+
+        self.executor_server.release('semaphore-two-test1')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
+        self.assertTrue('test-semaphore-two' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore-two', [])), 1)
+
+        self.executor_server.release('semaphore-two-test2')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 2)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertFalse('test-semaphore-two' in
+                         tenant.semaphore_handler.semaphores)
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+
+        self.waitUntilSettled()
+        self.assertEqual(len(self.builds), 0)
+
+        self.assertEqual(A.reported, 1)
+        self.assertEqual(B.reported, 1)
+
+    def test_semaphore_abandon(self):
+        "Test abandon with job semaphores"
+        self.updateConfigLayout('layout-semaphore')
+        self.sched.reconfigure(self.config)
+
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
+
         self.executor_server.hold_jobs_in_build = True
 
         tenant = self.sched.abide.tenants.get('openstack')
         check_pipeline = tenant.layout.pipelines['check']
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
 
-        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
 
         self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
         self.waitUntilSettled()
@@ -2265,31 +2367,47 @@
         items = check_pipeline.getAllItems()
         self.assertEqual(len(items), 0)
 
-        # The mutex should be released
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        # The semaphore should be released
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
         self.waitUntilSettled()
 
-    def test_mutex_reconfigure(self):
-        "Test reconfigure with job mutexes"
-        self.updateConfigLayout('layout-mutex')
+    def test_semaphore_reconfigure(self):
+        "Test reconfigure with job semaphores"
+        self.updateConfigLayout('layout-semaphore')
         self.sched.reconfigure(self.config)
 
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
+
         self.executor_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
 
-        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
 
-        self.updateConfigLayout('layout-mutex-reconfiguration')
+        # reconfigure without layout change
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
+
+        # semaphore still must be held
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
+
+        self.updateConfigLayout('layout-semaphore-reconfiguration')
+        self.sched.reconfigure(self.config)
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('openstack')
 
         self.executor_server.release('project-test1')
         self.waitUntilSettled()
@@ -2297,8 +2415,9 @@
         # There should be no builds anymore
         self.assertEqual(len(self.builds), 0)
 
-        # The mutex should be released
-        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+        # The semaphore should be released
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
 
     def test_live_reconfiguration(self):
         "Test that live reconfiguration works"
@@ -3201,11 +3320,9 @@
         self.executor_server.release()
         self.waitUntilSettled()
 
-    @skip("Disabled for early v3 development")
     def test_queue_rate_limiting(self):
         "Test that DependentPipelines are rate limited with dep across window"
-        self.updateConfigLayout(
-            'tests/fixtures/layout-rate-limit.yaml')
+        self.updateConfigLayout('layout-rate-limit')
         self.sched.reconfigure(self.config)
         self.executor_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -3246,7 +3363,8 @@
         self.executor_server.release('project-.*')
         self.waitUntilSettled()
 
-        queue = self.sched.layout.pipelines['gate'].queues[0]
+        tenant = self.sched.abide.tenants.get('openstack')
+        queue = tenant.layout.pipelines['gate'].queues[0]
         # A failed so window is reduced by 1 to 1.
         self.assertEqual(queue.window, 1)
         self.assertEqual(queue.window_floor, 1)
@@ -3293,11 +3411,9 @@
         self.assertEqual(queue.window_floor, 1)
         self.assertEqual(C.data['status'], 'MERGED')
 
-    @skip("Disabled for early v3 development")
     def test_queue_rate_limiting_dependent(self):
         "Test that DependentPipelines are rate limited with dep in window"
-        self.updateConfigLayout(
-            'tests/fixtures/layout-rate-limit.yaml')
+        self.updateConfigLayout('layout-rate-limit')
         self.sched.reconfigure(self.config)
         self.executor_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -3339,7 +3455,8 @@
         self.executor_server.release('project-.*')
         self.waitUntilSettled()
 
-        queue = self.sched.layout.pipelines['gate'].queues[0]
+        tenant = self.sched.abide.tenants.get('openstack')
+        queue = tenant.layout.pipelines['gate'].queues[0]
         # A failed so window is reduced by 1 to 1.
         self.assertEqual(queue.window, 1)
         self.assertEqual(queue.window_floor, 1)
@@ -4085,13 +4202,10 @@
         self.init_repo("org/unknown")
         self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
 
-    @skip("Disabled for early v3 development")
     def test_crd_check_ignore_dependencies(self):
         "Test cross-repo dependencies can be ignored"
-        self.updateConfigLayout(
-            'tests/fixtures/layout-ignore-dependencies.yaml')
+        self.updateConfigLayout('layout-ignore-dependencies')
         self.sched.reconfigure(self.config)
-        self.registerJobs()
 
         self.gearman_server.hold_jobs_in_queue = True
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
@@ -4110,7 +4224,8 @@
 
         # Make sure none of the items share a change queue, and all
         # are live.
-        check_pipeline = self.sched.layout.pipelines['check']
+        tenant = self.sched.abide.tenants.get('openstack')
+        check_pipeline = tenant.layout.pipelines['check']
         self.assertEqual(len(check_pipeline.queues), 3)
         self.assertEqual(len(check_pipeline.getAllItems()), 3)
         for item in check_pipeline.getAllItems():
@@ -4131,7 +4246,6 @@
         for job in self.history:
             self.assertEqual(len(job.changes.split()), 1)
 
-    @skip("Disabled for early v3 development")
     def test_crd_check_transitive(self):
         "Test transitive cross-repo dependencies"
         # Specifically, if A -> B -> C, and C gets a new patchset and
@@ -4874,3 +4988,239 @@
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
         self.waitUntilSettled()
+
+
+class TestSemaphoreMultiTenant(ZuulTestCase):
+    tenant_config_file = 'config/multi-tenant-semaphore/main.yaml'
+
+    def test_semaphore_tenant_isolation(self):
+        "Test semaphores in multiple tenants"
+
+        self.waitUntilSettled()
+        tenant_one = self.sched.abide.tenants.get('tenant-one')
+        tenant_two = self.sched.abide.tenants.get('tenant-two')
+
+        self.executor_server.hold_jobs_in_build = True
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+        C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
+        D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
+        E = self.fake_gerrit.addFakeChange('org/project2', 'master', 'E')
+        self.assertFalse('test-semaphore' in
+                         tenant_one.semaphore_handler.semaphores)
+        self.assertFalse('test-semaphore' in
+                         tenant_two.semaphore_handler.semaphores)
+
+        # add patches to project1 of tenant-one
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # one build of project1-test1 must run
+        # semaphore of tenant-one must be acquired once
+        # semaphore of tenant-two must not be acquired
+        self.assertEqual(len(self.builds), 1)
+        self.assertEqual(self.builds[0].name, 'project1-test1')
+        self.assertTrue('test-semaphore' in
+                        tenant_one.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+        self.assertFalse('test-semaphore' in
+                         tenant_two.semaphore_handler.semaphores)
+
+        # add patches to project2 of tenant-two
+        self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # one build of project1-test1 must run
+        # two builds of project2-test1 must run
+        # semaphore of tenant-one must be acquired once
+        # semaphore of tenant-two must be acquired twice
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project1-test1')
+        self.assertEqual(self.builds[1].name, 'project2-test1')
+        self.assertEqual(self.builds[2].name, 'project2-test1')
+        self.assertTrue('test-semaphore' in
+                        tenant_one.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+        self.assertTrue('test-semaphore' in
+                        tenant_two.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 2)
+
+        self.executor_server.release('project1-test1')
+        self.waitUntilSettled()
+
+        # one build of project1-test1 must run
+        # two builds of project2-test1 must run
+        # semaphore of tenant-one must be acquired once
+        # semaphore of tenant-two must be acquired twice
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project2-test1')
+        self.assertEqual(self.builds[1].name, 'project2-test1')
+        self.assertEqual(self.builds[2].name, 'project1-test1')
+        self.assertTrue('test-semaphore' in
+                        tenant_one.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+        self.assertTrue('test-semaphore' in
+                        tenant_two.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 2)
+
+        self.executor_server.release('project2-test1')
+        self.waitUntilSettled()
+
+        # one build of project1-test1 must run
+        # one build of project2-test1 must run
+        # semaphore of tenant-one must be acquired once
+        # semaphore of tenant-two must be acquired once
+        self.assertEqual(len(self.builds), 2)
+        self.assertTrue('test-semaphore' in
+                        tenant_one.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_one.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+        self.assertTrue('test-semaphore' in
+                        tenant_two.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant_two.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+
+        self.waitUntilSettled()
+
+        # no build must run
+        # semaphore of tenant-one must not be acquired
+        # semaphore of tenant-two must not be acquired
+        self.assertEqual(len(self.builds), 0)
+        self.assertFalse('test-semaphore' in
+                         tenant_one.semaphore_handler.semaphores)
+        self.assertFalse('test-semaphore' in
+                         tenant_two.semaphore_handler.semaphores)
+
+        self.assertEqual(A.reported, 1)
+        self.assertEqual(B.reported, 1)
+
+
+class TestSemaphoreInRepo(ZuulTestCase):
+    tenant_config_file = 'config/in-repo/main.yaml'
+
+    def test_semaphore_in_repo(self):
+        "Test semaphores in repo config"
+
+        # This tests dynamic semaphore handling in project repos. The semaphore
+        # max value should not be evaluated dynamically but must be updated
+        # after the change lands.
+
+        self.waitUntilSettled()
+        tenant = self.sched.abide.tenants.get('tenant-one')
+
+        in_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: project-test2
+                semaphore: test-semaphore
+
+            - project:
+                name: org/project
+                tenant-one-gate:
+                  jobs:
+                    - project-test2
+
+            # the max value in dynamic layout must be ignored
+            - semaphore:
+                name: test-semaphore
+                max: 2
+            """)
+
+        in_repo_playbook = textwrap.dedent(
+            """
+            - hosts: all
+              tasks: []
+            """)
+
+        file_dict = {'.zuul.yaml': in_repo_conf,
+                     'playbooks/project-test2.yaml': in_repo_playbook}
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+                                           files=file_dict)
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
+        B.setDependsOn(A, 1)
+        C.setDependsOn(A, 1)
+
+        self.executor_server.hold_jobs_in_build = True
+
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.waitUntilSettled()
+
+        # check that the layout in a queue item still has max value of 1
+        # for test-semaphore
+        pipeline = tenant.layout.pipelines.get('tenant-one-gate')
+        queue = None
+        for queue_candidate in pipeline.queues:
+            if queue_candidate.name == 'org/project':
+                queue = queue_candidate
+                break
+        queue_item = queue.queue[0]
+        item_dynamic_layout = queue_item.current_build_set.layout
+        dynamic_test_semaphore = \
+            item_dynamic_layout.semaphores.get('test-semaphore')
+        self.assertEqual(dynamic_test_semaphore.max, 1)
+
+        # one build must be in queue, one semaphores acquired
+        self.assertEqual(len(self.builds), 1)
+        self.assertEqual(self.builds[0].name, 'project-test2')
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 1)
+
+        self.executor_server.release('project-test2')
+        self.waitUntilSettled()
+
+        # change A must be merged
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2)
+
+        # send change-merged event as the gerrit mock doesn't send it
+        self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+        self.waitUntilSettled()
+
+        # now that change A was merged, the new semaphore max must be effective
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        self.assertEqual(tenant.layout.semaphores.get('test-semaphore').max, 2)
+
+        # two builds must be in queue, two semaphores acquired
+        self.assertEqual(len(self.builds), 2)
+        self.assertEqual(self.builds[0].name, 'project-test2')
+        self.assertEqual(self.builds[1].name, 'project-test2')
+        self.assertTrue('test-semaphore' in
+                        tenant.semaphore_handler.semaphores)
+        self.assertEqual(len(tenant.semaphore_handler.semaphores.get(
+            'test-semaphore', [])), 2)
+
+        self.executor_server.release('project-test2')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 0)
+        self.assertFalse('test-semaphore' in
+                         tenant.semaphore_handler.semaphores)
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+
+        self.waitUntilSettled()
+        self.assertEqual(len(self.builds), 0)
+
+        self.assertEqual(A.reported, 2)
+        self.assertEqual(B.reported, 2)
+        self.assertEqual(C.reported, 2)
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index a4442a4..678b957 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -17,11 +17,10 @@
 import os
 import textwrap
 
-from cryptography.hazmat.primitives import serialization
-from cryptography.hazmat.backends import default_backend
 import testtools
 
 import zuul.configloader
+from zuul.lib import encryption
 from tests.base import AnsibleZuulTestCase, ZuulTestCase, FIXTURE_DIR
 
 
@@ -290,6 +289,11 @@
                                            build.uuid + '.bare-role.flag')
         self.assertTrue(os.path.exists(bare_role_flag_path))
 
+        secrets_path = os.path.join(self.test_root,
+                                    build.uuid + '.secrets')
+        with open(secrets_path) as f:
+            self.assertEqual(f.read(), "test-username test-password")
+
 
 class TestBrokenConfig(ZuulTestCase):
     # Test that we get an appropriate syntax error if we start with a
@@ -323,11 +327,8 @@
         private_key_file = os.path.join(key_root, 'gerrit/org/project.pem')
         # Make sure that a proper key was created on startup
         with open(private_key_file, "rb") as f:
-            private_key = serialization.load_pem_private_key(
-                f.read(),
-                password=None,
-                backend=default_backend()
-            )
+            private_key, public_key = \
+                encryption.deserialize_rsa_keypair(f.read())
 
         with open(os.path.join(FIXTURE_DIR, 'private.pem')) as i:
             fixture_private_key = i.read()
diff --git a/tests/unit/test_webapp.py b/tests/unit/test_webapp.py
index acff09a..8791a25 100644
--- a/tests/unit/test_webapp.py
+++ b/tests/unit/test_webapp.py
@@ -15,11 +15,12 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import os
 import json
 
 from six.moves import urllib
 
-from tests.base import ZuulTestCase
+from tests.base import ZuulTestCase, FIXTURE_DIR
 
 
 class TestWebapp(ZuulTestCase):
@@ -85,3 +86,13 @@
 
         self.assertEqual(1, len(data), data)
         self.assertEqual("org/project1", data[0]['project'], data)
+
+    def test_webapp_keys(self):
+        with open(os.path.join(FIXTURE_DIR, 'public.pem')) as f:
+            public_pem = f.read()
+
+        req = urllib.request.Request(
+            "http://localhost:%s/tenant-one/keys/gerrit/org/project.pub" %
+            self.port)
+        f = urllib.request.urlopen(req)
+        self.assertEqual(f.read(), public_pem)
diff --git a/tools/encrypt_secret.py b/tools/encrypt_secret.py
new file mode 100644
index 0000000..4865edd
--- /dev/null
+++ b/tools/encrypt_secret.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import subprocess
+import sys
+import tempfile
+from six.moves import urllib
+
+DESCRIPTION = """Encrypt a secret for Zuul.
+
+This program fetches a project-specific public key from a Zuul server and
+uses that to encrypt a secret.  The only pre-requisite is an installed
+OpenSSL binary.
+"""
+
+
+def main():
+    parser = argparse.ArgumentParser(description=DESCRIPTION)
+    parser.add_argument('url',
+                        help="The base URL of the zuul server and tenant.  "
+                        "E.g., https://zuul.example.com/tenant-name")
+    # TODO(jeblair,mordred): When projects have canonical names, use that here.
+    # TODO(jeblair): Throw a fit if SSL is not used.
+    parser.add_argument('source',
+                        help="The Zuul source of the project.")
+    parser.add_argument('project',
+                        help="The name of the project.")
+    parser.add_argument('--infile',
+                        default=None,
+                        help="A filename whose contents will be encrypted.  "
+                        "If not supplied, the value will be read from "
+                        "standard input.")
+    parser.add_argument('--outfile',
+                        default=None,
+                        help="A filename to which the encrypted value will be "
+                        "written.  If not supplied, the value will be written "
+                        "to standard output.")
+    args = parser.parse_args()
+
+    req = urllib.request.Request("%s/keys/%s/%s.pub" % (
+        args.url, args.source, args.project))
+    pubkey = urllib.request.urlopen(req)
+
+    if args.infile:
+        with open(args.infile) as f:
+            plaintext = f.read()
+    else:
+        plaintext = sys.stdin.read()
+
+    pubkey_file = tempfile.NamedTemporaryFile(delete=False)
+    try:
+        pubkey_file.write(pubkey.read())
+        pubkey_file.close()
+
+        p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
+                              '-oaep', '-pubin', '-inkey',
+                              pubkey_file.name],
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE)
+        (stdout, stderr) = p.communicate(plaintext)
+        if p.returncode != 0:
+            raise Exception("Return code %s from openssl" % p.returncode)
+        ciphertext = stdout.encode('base64')
+    finally:
+        os.unlink(pubkey_file.name)
+
+    if args.outfile:
+        with open(args.outfile, "w") as f:
+            f.write(ciphertext)
+    else:
+        print(ciphertext)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/zuul/ansible/lookup/__init__.py b/zuul/ansible/lookup/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/lookup/__init__.py
diff --git a/zuul/ansible/lookup/_banned.py b/zuul/ansible/lookup/_banned.py
new file mode 100644
index 0000000..65708f8
--- /dev/null
+++ b/zuul/ansible/lookup/_banned.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+    def run(self, *args, **kwargs):
+        raise AnsibleError(
+            "Use of lookup modules that perform local actions on the executor"
+            " is forbidden.")
diff --git a/zuul/ansible/lookup/consul_kv.py b/zuul/ansible/lookup/consul_kv.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/consul_kv.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/credstash.py b/zuul/ansible/lookup/credstash.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/credstash.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/csvfile.py b/zuul/ansible/lookup/csvfile.py
new file mode 100644
index 0000000..6506aa2
--- /dev/null
+++ b/zuul/ansible/lookup/csvfile.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+csvfile = paths._import_ansible_lookup_plugin("csvfile")
+
+
+class LookupModule(csvfile.LookupModule):
+
+    def read_csv(self, filename, *args, **kwargs):
+        paths._fail_if_unsafe(filename)
+        return super(LookupModule, self).read_csv(filename, *args, **kwargs)
diff --git a/zuul/ansible/lookup/dig.py b/zuul/ansible/lookup/dig.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/dig.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/dnstxt.py b/zuul/ansible/lookup/dnstxt.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/dnstxt.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/env.py b/zuul/ansible/lookup/env.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/env.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/etcd.py b/zuul/ansible/lookup/etcd.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/etcd.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/file.py b/zuul/ansible/lookup/file.py
new file mode 100644
index 0000000..7403535
--- /dev/null
+++ b/zuul/ansible/lookup/file.py
@@ -0,0 +1,28 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+file_mod = paths._import_ansible_lookup_plugin("file")
+
+
+class LookupModule(file_mod.LookupModule):
+
+    def run(self, terms, variables=None, **kwargs):
+        for term in terms:
+            lookupfile = self.find_file_in_search_path(
+                variables, 'files', term)
+            paths._fail_if_unsafe(lookupfile)
+        return super(LookupModule, self).run(terms, variables, **kwargs)
diff --git a/zuul/ansible/lookup/fileglob.py b/zuul/ansible/lookup/fileglob.py
new file mode 100644
index 0000000..4b9b449
--- /dev/null
+++ b/zuul/ansible/lookup/fileglob.py
@@ -0,0 +1,45 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+# Forked from lib/ansible/plugins/lookup/fileglob.py in ansible
+
+import os
+import glob
+
+from zuul.ansible import paths
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+    def run(self, terms, variables=None, **kwargs):
+
+        ret = []
+        for term in terms:
+            term_file = os.path.basename(term)
+            dwimmed_path = self.find_file_in_search_path(
+                variables, 'files', os.path.dirname(term))
+            if dwimmed_path:
+                paths._fail_if_unsafe(dwimmed_path)
+                globbed = glob.glob(to_bytes(
+                    os.path.join(dwimmed_path, term_file),
+                    errors='surrogate_or_strict'))
+                ret.extend(
+                    to_text(g, errors='surrogate_or_strict')
+                    for g in globbed if os.path.isfile(g))
+        return ret
diff --git a/zuul/ansible/lookup/filetree.py b/zuul/ansible/lookup/filetree.py
new file mode 100644
index 0000000..0c054a3
--- /dev/null
+++ b/zuul/ansible/lookup/filetree.py
@@ -0,0 +1,32 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from zuul.ansible import paths
+filetree = paths._import_ansible_lookup_plugin("filetree")
+
+
+class LookupModule(filetree.LookupModule):
+
+    def run(self, terms, variables=None, **kwargs):
+        basedir = self.get_basedir(variables)
+        for term in terms:
+            term_file = os.path.basename(term)
+            dwimmed_path = self._loader.path_dwim_relative(
+                basedir, 'files', os.path.dirname(term))
+            path = os.path.join(dwimmed_path, term_file)
+            paths._fail_if_unsafe(path)
+        return super(LookupModule, self).run(terms, variables, **kwargs)
diff --git a/zuul/ansible/lookup/first_found.py b/zuul/ansible/lookup/first_found.py
new file mode 100644
index 0000000..d741df0
--- /dev/null
+++ b/zuul/ansible/lookup/first_found.py
@@ -0,0 +1,201 @@
+# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+# take a list of files and (optionally) a list of paths
+# return the first existing file found in the paths
+# [file1, file2, file3], [path1, path2, path3]
+# search order is:
+# path1/file1
+# path1/file2
+# path1/file3
+# path2/file1
+# path2/file2
+# path2/file3
+# path3/file1
+# path3/file2
+# path3/file3
+
+# first file found with os.path.exists() is returned
+# no file matches raises ansibleerror
+# EXAMPLES
+#  - name: copy first existing file found to /some/file
+#    action: copy src=$item dest=/some/file
+#    with_first_found:
+#     - files: foo ${inventory_hostname} bar
+#       paths: /tmp/production /tmp/staging
+
+# that will look for files in this order:
+# /tmp/production/foo
+#                 ${inventory_hostname}
+#                 bar
+# /tmp/staging/foo
+#              ${inventory_hostname}
+#              bar
+
+#  - name: copy first existing file found to /some/file
+#    action: copy src=$item dest=/some/file
+#    with_first_found:
+#     - files: /some/place/foo ${inventory_hostname} /some/place/else
+
+#  that will look for files in this order:
+#  /some/place/foo
+#  $relative_path/${inventory_hostname}
+#  /some/place/else
+
+# example - including tasks:
+#  tasks:
+#  - include: $item
+#    with_first_found:
+#     - files: generic
+#       paths: tasks/staging tasks/production
+# this will include the tasks in the file generic where it is found first
+# (staging or production)
+
+# example simple file lists
+# tasks:
+# - name: first found file
+#   action: copy src=$item dest=/etc/file.cfg
+#   with_first_found:
+#   - files: foo.${inventory_hostname} foo
+
+
+# example skipping if no matched files
+# First_found also offers the ability to control whether or not failing
+# to find a file returns an error or not
+#
+# - name: first found file - or skip
+#   action: copy src=$item dest=/etc/file.cfg
+#   with_first_found:
+#   - files: foo.${inventory_hostname}
+#     skip: true
+
+# example a role with default configuration and configuration per host
+# you can set multiple terms with their own files and paths to look through.
+# consider a role that sets some configuration per host falling back on a
+# default config.
+#
+# - name: some configuration template
+#   template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
+#   with_first_found:
+#    - files:
+#       - ${inventory_hostname}/etc/file.cfg
+#      paths:
+#       - ../../../templates.overwrites
+#       - ../../../templates
+#    - files:
+#       - etc/file.cfg
+#      paths:
+#       - templates
+
+# the above will return an empty list if the files cannot be found at all
+# if skip is unspecificed or if it is set to false then it will return a list
+# error which can be caught bye ignore_errors: true for that action.
+
+# finally - if you want you can use it, in place to replace
+# first_available_file:
+# you simply cannot use the - files, path or skip options. simply replace
+# first_available_file with with_first_found and leave the file listing in
+# place
+#
+#
+#  - name: with_first_found like first_available_file
+#    action: copy src=$item dest=/tmp/faftest
+#    with_first_found:
+#     - ../files/foo
+#     - ../files/bar
+#     - ../files/baz
+#    ignore_errors: true
+
+import os
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.constants import mk_boolean as boolean
+from ansible.errors import AnsibleLookupError
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+from zuul.ansible import paths as zuul_paths
+
+
+class LookupModule(LookupBase):
+
+    def run(self, terms, variables, **kwargs):
+
+        anydict = False
+        skip = False
+
+        for term in terms:
+            if isinstance(term, dict):
+                anydict = True
+
+        total_search = []
+        if anydict:
+            for term in terms:
+                if isinstance(term, dict):
+                    files = term.get('files', [])
+                    paths = term.get('paths', [])
+                    skip = boolean(term.get('skip', False))
+
+                    filelist = files
+                    if isinstance(files, string_types):
+                        files = files.replace(',', ' ')
+                        files = files.replace(';', ' ')
+                        filelist = files.split(' ')
+
+                    pathlist = paths
+                    if paths:
+                        if isinstance(paths, string_types):
+                            paths = paths.replace(',', ' ')
+                            paths = paths.replace(':', ' ')
+                            paths = paths.replace(';', ' ')
+                            pathlist = paths.split(' ')
+
+                    if not pathlist:
+                        total_search = filelist
+                    else:
+                        for path in pathlist:
+                            for fn in filelist:
+                                f = os.path.join(path, fn)
+                                total_search.append(f)
+                else:
+                    total_search.append(term)
+        else:
+            total_search = self._flatten(terms)
+
+        for fn in total_search:
+            zuul_paths._fail_if_unsafe(fn)
+            try:
+                fn = self._templar.template(fn)
+            except (AnsibleUndefinedVariable, UndefinedError):
+                continue
+
+            # get subdir if set by task executor, default to files otherwise
+            subdir = getattr(self, '_subdir', 'files')
+            path = None
+            path = self.find_file_in_search_path(
+                variables, subdir, fn, ignore_missing=True)
+            if path is not None:
+                return [path]
+        else:
+            if skip:
+                return []
+            else:
+                raise AnsibleLookupError(
+                    "No file was found when using with_first_found. Use the"
+                    " 'skip: true' option to allow this task to be skipped if"
+                    " no files are found")
diff --git a/zuul/ansible/lookup/hashi_valut.py b/zuul/ansible/lookup/hashi_valut.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/hashi_valut.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/ini.py b/zuul/ansible/lookup/ini.py
new file mode 100644
index 0000000..51127ff
--- /dev/null
+++ b/zuul/ansible/lookup/ini.py
@@ -0,0 +1,31 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+
+from zuul.ansible import paths
+ini = paths._import_ansible_lookup_plugin("ini")
+
+
+class LookupModule(ini.LookupModule):
+
+    def read_properties(self, filename, *args, **kwargs):
+        paths._fail_if_unsafe(filename)
+        return super(LookupModule, self).read_properties(
+            filename, *args, **kwargs)
+
+    def read_ini(self, filename, *args, **kwargs):
+        paths._fail_if_unsafe(filename)
+        return super(LookupModule, self).read_ini(
+            filename, *args, **kwargs)
diff --git a/zuul/ansible/lookup/keyring.py b/zuul/ansible/lookup/keyring.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/keyring.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/lastpass.py b/zuul/ansible/lookup/lastpass.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/lastpass.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/lines.py b/zuul/ansible/lookup/lines.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/lines.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/mongodb.py b/zuul/ansible/lookup/mongodb.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/mongodb.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/password.py b/zuul/ansible/lookup/password.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/password.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/passwordstore.py b/zuul/ansible/lookup/passwordstore.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/passwordstore.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/pipe.py b/zuul/ansible/lookup/pipe.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/pipe.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/redis_kv.py b/zuul/ansible/lookup/redis_kv.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/redis_kv.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/shelvefile.py b/zuul/ansible/lookup/shelvefile.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/shelvefile.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/template.py b/zuul/ansible/lookup/template.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/template.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/lookup/url.py b/zuul/ansible/lookup/url.py
new file mode 120000
index 0000000..d45b9c4
--- /dev/null
+++ b/zuul/ansible/lookup/url.py
@@ -0,0 +1 @@
+_banned.py
\ No newline at end of file
diff --git a/zuul/ansible/paths.py b/zuul/ansible/paths.py
index e387732..bc61975 100644
--- a/zuul/ansible/paths.py
+++ b/zuul/ansible/paths.py
@@ -16,7 +16,9 @@
 import imp
 import os
 
+from ansible.errors import AnsibleError
 import ansible.plugins.action
+import ansible.plugins.lookup
 
 
 def _is_safe_path(path):
@@ -35,6 +37,12 @@
             curdir=os.path.abspath(os.path.curdir)))
 
 
+def _fail_if_unsafe(path):
+    if not _is_safe_path(path):
+        msg_dict = _fail_dict(path)
+        raise AnsibleError(msg_dict['msg'])
+
+
 def _import_ansible_action_plugin(name):
     # Ansible forces the import of our action plugins
     # (zuul.ansible.action.foo) as ansible.plugins.action.foo, which
@@ -51,3 +59,11 @@
     return imp.load_module(
         'zuul.ansible.protected.action.' + name,
         *imp.find_module(name, ansible.plugins.action.__path__))
+
+
+def _import_ansible_lookup_plugin(name):
+    # See _import_ansible_action_plugin
+
+    return imp.load_module(
+        'zuul.ansible.protected.lookup.' + name,
+        *imp.find_module(name, ansible.plugins.lookup.__path__))
diff --git a/zuul/cmd/__init__.py b/zuul/cmd/__init__.py
index 9fa4c03..f2a2612 100644
--- a/zuul/cmd/__init__.py
+++ b/zuul/cmd/__init__.py
@@ -24,10 +24,10 @@
 import sys
 import traceback
 
-import yaml
 yappi = extras.try_import('yappi')
 
 import zuul.lib.connections
+from zuul.lib import yamlutil as yaml
 
 # Do not import modules that will pull in paramiko which must not be
 # imported until after the daemonization.
diff --git a/zuul/configloader.py b/zuul/configloader.py
index ae980ac..5e88ee7 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -15,19 +15,17 @@
 import os
 import logging
 import six
-import yaml
 import pprint
 import textwrap
 
 import voluptuous as vs
 
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.asymmetric import rsa
-from cryptography.hazmat.primitives import serialization
 from zuul import model
+from zuul.lib import yamlutil as yaml
 import zuul.manager.dependent
 import zuul.manager.independent
 from zuul import change_matcher
+from zuul.lib import encryption
 
 
 # Several forms accept either a single item or a list, this makes
@@ -88,7 +86,8 @@
 
 class ZuulSafeLoader(yaml.SafeLoader):
     zuul_node_types = frozenset(('job', 'nodeset', 'secret', 'pipeline',
-                                 'project', 'project-template'))
+                                 'project', 'project-template',
+                                 'semaphore'))
 
     def __init__(self, stream, context):
         super(ZuulSafeLoader, self).__init__(stream)
@@ -125,17 +124,28 @@
         loader.dispose()
 
 
-class EncryptedPKCS1(yaml.YAMLObject):
-    yaml_tag = u'!encrypted/pkcs1'
+class EncryptedPKCS1_OAEP(yaml.YAMLObject):
+    yaml_tag = u'!encrypted/pkcs1-oaep'
     yaml_loader = yaml.SafeLoader
 
     def __init__(self, ciphertext):
-        self.ciphertext = ciphertext
+        self.ciphertext = ciphertext.decode('base64')
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __eq__(self, other):
+        if not isinstance(other, EncryptedPKCS1_OAEP):
+            return False
+        return (self.ciphertext == other.ciphertext)
 
     @classmethod
     def from_yaml(cls, loader, node):
         return cls(node.value)
 
+    def decrypt(self, private_key):
+        return encryption.decrypt_pkcs1_oaep(self.ciphertext, private_key)
+
 
 class NodeSetParser(object):
     @staticmethod
@@ -166,7 +176,7 @@
 class SecretParser(object):
     @staticmethod
     def getSchema():
-        data = {str: vs.Any(str, EncryptedPKCS1)}
+        data = {str: vs.Any(str, EncryptedPKCS1_OAEP)}
 
         secret = {vs.Required('name'): str,
                   vs.Required('data'): data,
@@ -180,7 +190,7 @@
     def fromYaml(layout, conf):
         with configuration_exceptions('secret', conf):
             SecretParser.getSchema()(conf)
-        s = model.Secret(conf['name'])
+        s = model.Secret(conf['name'], conf['_source_context'])
         s.secret_data = conf['data']
         return s
 
@@ -213,7 +223,7 @@
                'success-url': str,
                'hold-following-changes': bool,
                'voting': bool,
-               'mutex': str,
+               'semaphore': str,
                'tags': to_list(str),
                'branches': to_list(str),
                'files': to_list(str),
@@ -231,6 +241,7 @@
                'repos': to_list(str),
                'vars': dict,
                'dependencies': to_list(str),
+               'allowed-projects': to_list(str),
                }
 
         return vs.Schema(job)
@@ -240,7 +251,7 @@
         'workspace',
         'voting',
         'hold-following-changes',
-        'mutex',
+        'semaphore',
         'attempts',
         'failure-message',
         'success-message',
@@ -261,7 +272,19 @@
         job = model.Job(conf['name'])
         job.source_context = conf.get('_source_context')
         if 'auth' in conf:
-            job.auth = conf.get('auth')
+            job.auth = model.AuthContext()
+            if 'inherit' in conf['auth']:
+                job.auth.inherit = conf['auth']['inherit']
+
+            for secret_name in conf['auth'].get('secrets', []):
+                secret = layout.secrets[secret_name]
+                if secret.source_context != job.source_context:
+                    raise Exception(
+                        "Unable to use secret %s.  Secrets must be "
+                        "defined in the same project in which they "
+                        "are used" % secret_name)
+                job.auth.secrets.append(secret.decrypt(
+                    job.source_context.project.private_key))
 
         if 'parent' in conf:
             parent = layout.getJob(conf['parent'])
@@ -328,6 +351,19 @@
         if variables:
             job.updateVariables(variables)
 
+        allowed_projects = conf.get('allowed-projects', None)
+        if allowed_projects:
+            allowed = []
+            for p in as_list(allowed_projects):
+                # TODOv3(jeblair): this limits allowed_projects to the same
+                # source; we should remove that limitation.
+                source = job.source_context.project.connection_name
+                (trusted, project) = tenant.getRepo(source, p)
+                if project is None:
+                    raise Exception("Unknown project %s" % (p,))
+                allowed.append(project.name)
+            job.allowed_projects = frozenset(allowed)
+
         # If the definition for this job came from a project repo,
         # implicitly apply a branch matcher for the branch it was on.
         if (not job.source_context.trusted):
@@ -573,6 +609,7 @@
                     'footer-message': str,
                     'dequeue-on-new-patchset': bool,
                     'ignore-dependencies': bool,
+                    'allow-secrets': bool,
                     'disable-after-consecutive-failures':
                         vs.All(int, vs.Range(min=1)),
                     'window': window,
@@ -620,6 +657,7 @@
             'dequeue-on-new-patchset', True)
         pipeline.ignore_dependencies = conf.get(
             'ignore-dependencies', False)
+        pipeline.allow_secrets = conf.get('allow-secrets', False)
 
         for conf_key, action in PipelineParser.reporter_actions.items():
             reporter_set = []
@@ -683,6 +721,25 @@
         return pipeline
 
 
+class SemaphoreParser(object):
+    @staticmethod
+    def getSchema():
+        semaphore = {vs.Required('name'): str,
+                     'max': int,
+                     '_source_context': model.SourceContext,
+                     '_start_mark': yaml.Mark,
+                     }
+
+        return vs.Schema(semaphore)
+
+    @staticmethod
+    def fromYaml(conf):
+        SemaphoreParser.getSchema()(conf)
+        semaphore = model.Semaphore(conf['name'], conf.get('max', 1))
+        semaphore.source_context = conf.get('_source_context')
+        return semaphore
+
+
 class TenantParser(object):
     log = logging.getLogger("zuul.TenantParser")
 
@@ -759,26 +816,15 @@
         TenantParser.log.info(
             "Generating RSA keypair for project %s" % (project.name,)
         )
+        private_key, public_key = encryption.generate_rsa_keypair()
+        pem_private_key = encryption.serialize_rsa_private_key(private_key)
 
-        # Generate private RSA key
-        private_key = rsa.generate_private_key(
-            public_exponent=65537,
-            key_size=4096,
-            backend=default_backend()
-        )
-        # Serialize private key
-        pem_private_key = private_key.private_bytes(
-            encoding=serialization.Encoding.PEM,
-            format=serialization.PrivateFormat.TraditionalOpenSSL,
-            encryption_algorithm=serialization.NoEncryption()
-        )
-
+        # Dump keys to filesystem.  We only save the private key
+        # because the public key can be constructed from it.
         TenantParser.log.info(
             "Saving RSA keypair for project %s to %s" % (
                 project.name, project.private_key_file)
         )
-
-        # Dump keys to filesystem
         with open(project.private_key_file, 'wb') as f:
             f.write(pem_private_key)
 
@@ -790,16 +836,10 @@
                 'Private key file {0} not found'.format(
                     project.private_key_file))
 
-        # Load private key
+        # Load keypair
         with open(project.private_key_file, "rb") as f:
-            project.private_key = serialization.load_pem_private_key(
-                f.read(),
-                password=None,
-                backend=default_backend()
-            )
-
-        # Extract public key from private
-        project.public_key = project.private_key.public_key()
+            (project.private_key, project.public_key) = \
+                encryption.deserialize_rsa_keypair(f.read())
 
     @staticmethod
     def _loadTenantConfigRepos(project_key_dir, connections, conf_tenant):
@@ -946,6 +986,9 @@
         for config_job in data.jobs:
             layout.addJob(JobParser.fromYaml(tenant, layout, config_job))
 
+        for config_semaphore in data.semaphores:
+            layout.addSemaphore(SemaphoreParser.fromYaml(config_semaphore))
+
         for config_template in data.project_templates:
             layout.addProjectTemplate(ProjectTemplateParser.fromYaml(
                 tenant, layout, config_template))
@@ -1052,6 +1095,12 @@
         # or deleting pipelines in dynamic layout changes.
         layout.pipelines = tenant.layout.pipelines
 
+        # NOTE: the semaphore definitions are copied from the static layout
+        # here. For semaphores there should be no per patch max value but
+        # exactly one value at any time. So we do not support dynamic semaphore
+        # configuration changes.
+        layout.semaphores = tenant.layout.semaphores
+
         for config_job in config.jobs:
             layout.addJob(JobParser.fromYaml(tenant, layout, config_job))
 
diff --git a/zuul/driver/__init__.py b/zuul/driver/__init__.py
index 36e83bd..1cc5235 100644
--- a/zuul/driver/__init__.py
+++ b/zuul/driver/__init__.py
@@ -117,25 +117,28 @@
 class TriggerInterface(object):
     """The trigger interface.
 
-    A driver which is able to supply a Trigger should implement this
+    A driver which is able to supply a trigger should implement this
     interface.
 
     """
 
     @abc.abstractmethod
     def getTrigger(self, connection, config=None):
-        """Create and return a new Trigger object.
+        """Create and return a new trigger object.
 
         This method is required by the interface.
 
+        The trigger object returned should inherit from the
+        :py:class:`~zuul.trigger.BaseTrigger` class.
+
         :arg Connection connection: The Connection object associated
             with the trigger (as previously returned by getConnection)
             or None.
         :arg dict config: The configuration information supplied along
             with the trigger in the layout.
 
-        :returns: A new Trigger object.
-        :rtype: Trigger
+        :returns: A new trigger object.
+        :rtype: :py:class:`~zuul.trigger.BaseTrigger`
 
         """
         pass
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 514aa1f..e3c726f 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -93,6 +93,9 @@
             event.ref = refupdate.get('refName')
             event.oldrev = refupdate.get('oldRev')
             event.newrev = refupdate.get('newRev')
+        if event.project_name is None:
+            # ref-replica* events
+            event.project_name = data.get('project')
         # Map the event types to a field name holding a Gerrit
         # account attribute. See Gerrit stream-event documentation
         # in cmd-stream-events.html
diff --git a/zuul/driver/sql/alembic_reporter.ini b/zuul/driver/sql/alembic.ini
similarity index 100%
rename from zuul/driver/sql/alembic_reporter.ini
rename to zuul/driver/sql/alembic.ini
diff --git a/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py b/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py
new file mode 100644
index 0000000..b153cab
--- /dev/null
+++ b/zuul/driver/sql/alembic_reporter/versions/1dd914d4a482_allow_score_to_be_null.py
@@ -0,0 +1,25 @@
+"""Allow score to be null
+
+Revision ID: 1dd914d4a482
+Revises: 4d3ebd7f06b9
+Create Date: 2017-03-28 08:09:32.908643
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1dd914d4a482'
+down_revision = '4d3ebd7f06b9'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+    op.alter_column('zuul_buildset', 'score', nullable=True,
+                    existing_type=sa.Integer)
+
+
+def downgrade():
+    raise Exception("Downgrades not supported")
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 69e53df..31bc13a 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -80,7 +80,7 @@
             sa.Column('change', sa.Integer, nullable=True),
             sa.Column('patchset', sa.Integer, nullable=True),
             sa.Column('ref', sa.String(255)),
-            sa.Column('score', sa.Integer),
+            sa.Column('score', sa.Integer, nullable=True),
             sa.Column('message', sa.TEXT()),
         )
 
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index 2129f53..d6e547d 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -28,6 +28,7 @@
     def __init__(self, driver, connection, config={}):
         super(SQLReporter, self).__init__(
             driver, connection, config)
+        # TODO(jeblair): document this is stored as NULL if unspecified
         self.result_score = config.get('score', None)
 
     def report(self, source, pipeline, item):
@@ -37,13 +38,6 @@
             self.log.warn("SQL reporter (%s) is disabled " % self)
             return
 
-        if self.driver.sched.config.has_option('zuul', 'url_pattern'):
-            url_pattern = self.driver.sched.config.get('zuul', 'url_pattern')
-        else:
-            url_pattern = None
-
-        score = self.config.get('score', 0)
-
         with self.connection.engine.begin() as conn:
             buildset_ins = self.connection.zuul_buildset_table.insert().values(
                 zuul_ref=item.current_build_set.ref,
@@ -52,7 +46,7 @@
                 change=item.change.number,
                 patchset=item.change.patchset,
                 ref=item.change.refspec,
-                score=score,
+                score=self.result_score,
                 message=self._formatItemReport(
                     pipeline, item, with_jobs=False),
             )
@@ -67,7 +61,7 @@
                     # information about the change.
                     continue
 
-                (result, url) = item.formatJobResult(job, url_pattern)
+                (result, url) = item.formatJobResult(job)
 
                 build_inserts.append({
                     'buildset_id': buildset_ins_result.inserted_primary_key,
diff --git a/zuul/executor/ansiblelaunchserver.py b/zuul/executor/ansiblelaunchserver.py
index 875cf2b..0202bdd 100644
--- a/zuul/executor/ansiblelaunchserver.py
+++ b/zuul/executor/ansiblelaunchserver.py
@@ -35,13 +35,13 @@
 import Queue
 
 import gear
-import yaml
 import jenkins_jobs.builder
 import jenkins_jobs.formatter
 import zmq
 
 import zuul.ansible.library
 from zuul.lib import commandsocket
+from zuul.lib import yamlutil as yaml
 
 ANSIBLE_WATCHDOG_GRACE = 5 * 60
 ANSIBLE_DEFAULT_TIMEOUT = 2 * 60 * 60
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index fd92dd9..90cfa9b 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -16,7 +16,6 @@
 import gear
 import json
 import logging
-import os
 import time
 import threading
 from uuid import uuid4
@@ -266,13 +265,6 @@
             params['ZUUL_REF'] = item.change.ref
             params['ZUUL_COMMIT'] = item.change.newrev
 
-        # The destination_path is a unique path for this build request
-        # and generally where the logs are expected to be placed
-        destination_path = os.path.join(item.change.getBasePath(),
-                                        pipeline.name, job.name, uuid[:7])
-        params['BASE_LOG_PATH'] = item.change.getBasePath()
-        params['LOG_PATH'] = destination_path
-
         # This is what we should be heading toward for parameters:
 
         # required:
@@ -319,6 +311,9 @@
                               public_ipv4=node.public_ipv4))
         params['nodes'] = nodes
         params['vars'] = copy.deepcopy(job.variables)
+        if job.auth:
+            for secret in job.auth.secrets:
+                params['vars'][secret.name] = copy.deepcopy(secret.secret_data)
         params['vars']['zuul'] = zuul_params
         projects = set()
         if job.repos:
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 60b30c7..582d099 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -24,15 +24,17 @@
 import threading
 import time
 import traceback
-import yaml
+from zuul.lib.yamlutil import yaml
 
 import gear
 import git
+from six.moves import shlex_quote
 
 import zuul.merger.merger
 import zuul.ansible.action
 import zuul.ansible.callback
 import zuul.ansible.library
+import zuul.ansible.lookup
 from zuul.lib import commandsocket
 
 COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
@@ -274,6 +276,10 @@
         if not os.path.exists(self.callback_dir):
             os.makedirs(self.callback_dir)
 
+        self.lookup_dir = os.path.join(ansible_dir, 'lookup')
+        if not os.path.exists(self.lookup_dir):
+            os.makedirs(self.lookup_dir)
+
         library_path = os.path.dirname(os.path.abspath(
             zuul.ansible.library.__file__))
         for fn in os.listdir(library_path):
@@ -289,6 +295,11 @@
         for fn in os.listdir(callback_path):
             shutil.copy(os.path.join(callback_path, fn), self.callback_dir)
 
+        lookup_path = os.path.dirname(os.path.abspath(
+            zuul.ansible.lookup.__file__))
+        for fn in os.listdir(lookup_path):
+            shutil.copy(os.path.join(lookup_path, fn), self.lookup_dir)
+
         self.job_workers = {}
 
     def _getMerger(self, root):
@@ -634,22 +645,31 @@
         return result
 
     def getHostList(self, args):
-        # TODO(clarkb): This prefers v4 because we're not sure if we
-        # expect v6 to work.  If we can determine how to prefer v6
         hosts = []
         for node in args['nodes']:
-            ip = node.get('public_ipv4')
-            if not ip:
-                ip = node.get('public_ipv6')
+            # NOTE(mordred): This assumes that the nodepool launcher
+            # and the zuul executor both have similar network
+            # characteristics, as the launcher will do a test for ipv6
+            # viability and if so, and if the node has an ipv6
+            # address, it will be the interface_ip.  force-ipv4 can be
+            # set to True in the clouds.yaml for a cloud if this
+            # results in the wrong thing being in interface_ip
+            # TODO(jeblair): Move this notice to the docs.
+            ip = node.get('interface_ip')
             host_vars = dict(
                 ansible_host=ip,
                 nodepool_az=node.get('az'),
                 nodepool_provider=node.get('provider'),
                 nodepool_region=node.get('region'))
+
+            host_keys = []
+            for key in node.get('host_keys'):
+                host_keys.append("%s %s" % (ip, key))
+
             hosts.append(dict(
                 name=node['name'],
                 host_vars=host_vars,
-                host_keys=node.get('host_keys')))
+                host_keys=host_keys))
         return hosts
 
     def _blockPluginDirs(self, path):
@@ -862,6 +882,8 @@
             if not trusted:
                 config.write('action_plugins = %s\n'
                              % self.executor_server.action_dir)
+                config.write('lookup_plugins = %s\n'
+                             % self.executor_server.lookup_dir)
 
             # On trusted jobs, we want to prevent the printing of args,
             # since trusted jobs might have access to secrets that they may
@@ -907,14 +929,17 @@
         env_copy['LOGNAME'] = 'zuul'
 
         if trusted:
-            env_copy['ANSIBLE_CONFIG'] = self.jobdir.trusted_config
+            config_file = self.jobdir.trusted_config
         else:
-            env_copy['ANSIBLE_CONFIG'] = self.jobdir.untrusted_config
+            config_file = self.jobdir.untrusted_config
+
+        env_copy['ANSIBLE_CONFIG'] = config_file
 
         with self.proc_lock:
             if self.aborted:
                 return (self.RESULT_ABORTED, None)
-            self.log.debug("Ansible command: %s" % (cmd,))
+            self.log.debug("Ansible command: ANSIBLE_CONFIG=%s %s",
+                           config_file, " ".join(shlex_quote(c) for c in cmd))
             self.proc = subprocess.Popen(
                 cmd,
                 cwd=self.jobdir.work_root,
diff --git a/zuul/lib/cloner.py b/zuul/lib/cloner.py
index 18dea91..bec8ebe 100644
--- a/zuul/lib/cloner.py
+++ b/zuul/lib/cloner.py
@@ -17,13 +17,13 @@
 import logging
 import os
 import re
-import yaml
 
 import six
 
 from git import GitCommandError
 from zuul import exceptions
 from zuul.lib.clonemapper import CloneMapper
+from zuul.lib import yamlutil as yaml
 from zuul.merger.merger import Repo
 
 
diff --git a/zuul/lib/encryption.py b/zuul/lib/encryption.py
new file mode 100644
index 0000000..24224d8
--- /dev/null
+++ b/zuul/lib/encryption.py
@@ -0,0 +1,138 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives import hashes
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#generation
+def generate_rsa_keypair():
+    """Generate an RSA keypair.
+
+    :returns: A tuple (private_key, public_key)
+
+    """
+    private_key = rsa.generate_private_key(
+        public_exponent=65537,
+        key_size=4096,
+        backend=default_backend()
+    )
+    public_key = private_key.public_key()
+    return (private_key, public_key)
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#key-serialization
+def serialize_rsa_private_key(private_key):
+    """Serialize an RSA private key
+
+    This returns a PEM-encoded serialized form of an RSA private key
+    suitable for storing on disk.  It is not password-protected.
+
+    :arg private_key: A private key object as returned by
+        :func:generate_rsa_keypair()
+
+    :returns: A PEM-encoded string representation of the private key.
+
+    """
+    return private_key.private_bytes(
+        encoding=serialization.Encoding.PEM,
+        format=serialization.PrivateFormat.TraditionalOpenSSL,
+        encryption_algorithm=serialization.NoEncryption()
+    )
+
+
+def serialize_rsa_public_key(public_key):
+    """Serialize an RSA public key
+
+    This returns a PEM-encoded serialized form of an RSA public key
+    suitable for distribution.
+
+    :arg public_key: A pubilc key object as returned by
+        :func:generate_rsa_keypair()
+
+    :returns: A PEM-encoded string representation of the public key.
+
+    """
+    return public_key.public_bytes(
+        encoding=serialization.Encoding.PEM,
+        format=serialization.PublicFormat.SubjectPublicKeyInfo
+    )
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#key-loading
+def deserialize_rsa_keypair(data):
+    """Deserialize an RSA private key
+
+    This deserializes an RSA private key and returns the keypair
+    (private and public) for use in decryption.
+
+    :arg data: A PEM-encoded serialized private key
+
+    :returns: A tuple (private_key, public_key)
+
+    """
+    private_key = serialization.load_pem_private_key(
+        data,
+        password=None,
+        backend=default_backend()
+    )
+    public_key = private_key.public_key()
+    return (private_key, public_key)
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#decryption
+def decrypt_pkcs1_oaep(ciphertext, private_key):
+    """Decrypt PKCS#1 (RSAES-OAEP) encoded ciphertext
+
+    :arg ciphertext: A string previously encrypted with PKCS#1
+        (RSAES-OAEP).
+    :arg private_key: A private key object as returned by
+        :func:generate_rsa_keypair()
+
+    :returns: The decrypted form of the ciphertext as a string.
+
+    """
+    return private_key.decrypt(
+        ciphertext,
+        padding.OAEP(
+            mgf=padding.MGF1(algorithm=hashes.SHA1()),
+            algorithm=hashes.SHA1(),
+            label=None
+        )
+    )
+
+
+# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#encryption
+def encrypt_pkcs1_oaep(plaintext, public_key):
+    """Encrypt data with PKCS#1 (RSAES-OAEP)
+
+    :arg plaintext: A string to encrypt with PKCS#1 (RSAES-OAEP).
+
+    :arg public_key: A public key object as returned by
+        :func:generate_rsa_keypair()
+
+    :returns: The encrypted form of the plaintext.
+
+    """
+    return public_key.encrypt(
+        plaintext,
+        padding.OAEP(
+            mgf=padding.MGF1(algorithm=hashes.SHA1()),
+            algorithm=hashes.SHA1(),
+            label=None
+        )
+    )
diff --git a/zuul/lib/yamlutil.py b/zuul/lib/yamlutil.py
new file mode 100644
index 0000000..2419906
--- /dev/null
+++ b/zuul/lib/yamlutil.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import yaml
+from yaml import YAMLObject, YAMLError  # noqa: F401
+
+try:
+    from yaml import cyaml
+    import _yaml
+    SafeLoader = cyaml.CSafeLoader
+    SafeDumper = cyaml.CSafeDumper
+    Mark = _yaml.Mark
+except ImportError:
+    SafeLoader = yaml.SafeLoader
+    SafeDumper = yaml.SafeDumper
+    Mark = yaml.Mark
+
+
+def safe_load(stream, *args, **kwargs):
+    return yaml.load(stream, *args, Loader=SafeLoader, **kwargs)
+
+
+def safe_dump(stream, *args, **kwargs):
+    return yaml.dump(stream, *args, Dumper=SafeDumper, **kwargs)
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 32f0cbb..75e8edb 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -81,8 +81,8 @@
                         tags.append('[hold]')
                     if not variant.voting:
                         tags.append('[nonvoting]')
-                    if variant.mutex:
-                        tags.append('[mutex: %s]' % variant.mutex)
+                    if variant.semaphore:
+                        tags.append('[semaphore: %s]' % variant.semaphore)
                     tags = ' '.join(tags)
                     self.log.info("      %s%s %s" % (repr(variant),
                                                      efilters, tags))
@@ -386,7 +386,8 @@
         if not item.current_build_set.layout:
             return False
 
-        jobs = item.findJobsToRun(self.sched.mutex)
+        jobs = item.findJobsToRun(
+            item.pipeline.layout.tenant.semaphore_handler)
         if jobs:
             self._executeJobs(item, jobs)
 
@@ -411,7 +412,8 @@
                 self.log.exception("Exception while canceling build %s "
                                    "for change %s" % (build, item.change))
             finally:
-                self.sched.mutex.release(build.build_set.item, build.job)
+                old_build_set.layout.tenant.semaphore_handler.release(
+                    old_build_set.item, build.job)
 
             if not was_running:
                 try:
@@ -663,7 +665,7 @@
         item = build.build_set.item
 
         item.setResult(build)
-        self.sched.mutex.release(item, build.job)
+        item.pipeline.layout.tenant.semaphore_handler.release(item, build.job)
         self.log.debug("Item %s status is now:\n %s" %
                        (item, item.formatStatus()))
 
diff --git a/zuul/model.py b/zuul/model.py
index 0f9e021..744c0f3 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -14,6 +14,8 @@
 
 import abc
 import copy
+
+import logging
 import os
 import re
 import struct
@@ -96,6 +98,13 @@
     return re.sub(' ', '-', name)
 
 
+class Attributes(object):
+    """A class to hold attributes for string formatting."""
+
+    def __init__(self, **kw):
+        setattr(self, '__dict__', kw)
+
+
 class Pipeline(object):
     """A configuration that ties triggers, reporters, managers and sources.
 
@@ -121,6 +130,7 @@
         self.success_message = None
         self.footer_message = None
         self.start_message = None
+        self.allow_secrets = False
         self.dequeue_on_new_patchset = True
         self.ignore_dependencies = False
         self.manager = None
@@ -156,6 +166,9 @@
     def __repr__(self):
         return '<Pipeline %s>' % self.name
 
+    def getSafeAttributes(self):
+        return Attributes(name=self.name)
+
     def setManager(self, manager):
         self.manager = manager
 
@@ -183,7 +196,7 @@
             items.extend(shared_queue.queue)
         return items
 
-    def formatStatusJSON(self, url_pattern=None):
+    def formatStatusJSON(self):
         j_pipeline = dict(name=self.name,
                           description=self.description)
         j_queues = []
@@ -200,7 +213,7 @@
                     if j_changes:
                         j_queue['heads'].append(j_changes)
                     j_changes = []
-                j_changes.append(e.formatJSON(url_pattern))
+                j_changes.append(e.formatJSON())
                 if (len(j_changes) > 1 and
                         (j_changes[-2]['remaining_time'] is not None) and
                         (j_changes[-1]['remaining_time'] is not None)):
@@ -524,8 +537,9 @@
 
     """
 
-    def __init__(self, name):
+    def __init__(self, name, source_context):
         self.name = name
+        self.source_context = source_context
         # The secret data may or may not be encrypted.  This attribute
         # is named 'secret_data' to make it easy to search for and
         # spot where it is directly used.
@@ -538,11 +552,26 @@
         if not isinstance(other, Secret):
             return False
         return (self.name == other.name and
+                self.source_context == other.source_context and
                 self.secret_data == other.secret_data)
 
     def __repr__(self):
         return '<Secret %s>' % (self.name,)
 
+    def decrypt(self, private_key):
+        """Return a copy of this secret with any encrypted data decrypted.
+        Note that the original remains encrypted."""
+
+        r = copy.deepcopy(self)
+        decrypted_secret_data = {}
+        for k, v in r.secret_data.items():
+            if hasattr(v, 'decrypt'):
+                decrypted_secret_data[k] = v.decrypt(private_key)
+            else:
+                decrypted_secret_data[k] = v
+        r.secret_data = decrypted_secret_data
+        return r
+
 
 class SourceContext(object):
     """A reference to the branch of a project in configuration.
@@ -673,6 +702,28 @@
         return d
 
 
+class AuthContext(object):
+    """The authentication information for a job.
+
+    Authentication information (both the actual data and metadata such
+    as whether it should be inherited) for a job is grouped together
+    in this object.
+    """
+
+    def __init__(self, inherit=False):
+        self.inherit = inherit
+        self.secrets = []
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __eq__(self, other):
+        if not isinstance(other, AuthContext):
+            return False
+        return (self.inherit == other.inherit and
+                self.secrets == other.secrets)
+
+
 class Job(object):
 
     """A Job represents the defintion of actions to perform.
@@ -715,17 +766,18 @@
             timeout=None,
             variables={},
             nodeset=NodeSet(),
-            auth={},
+            auth=None,
             workspace=None,
             pre_run=(),
             post_run=(),
             run=(),
             implied_run=(),
-            mutex=None,
+            semaphore=None,
             attempts=3,
             final=False,
             roles=frozenset(),
             repos=frozenset(),
+            allowed_projects=None,
         )
 
         # These are generally internal attributes which are not
@@ -778,6 +830,9 @@
     def _get(self, name):
         return self.__dict__.get(name)
 
+    def getSafeAttributes(self):
+        return Attributes(name=self.name)
+
     def setRun(self):
         if not self.run:
             self.run = self.implied_run
@@ -805,7 +860,7 @@
             raise Exception("Job unable to inherit from %s" % (other,))
 
         do_not_inherit = set()
-        if other.auth and not other.auth.get('inherit'):
+        if other.auth and not other.auth.inherit:
             do_not_inherit.add('auth')
 
         # copy all attributes
@@ -1007,6 +1062,9 @@
         return ('<Build %s of %s on %s>' %
                 (self.uuid, self.job.name, self.worker))
 
+    def getSafeAttributes(self):
+        return Attributes(uuid=self.uuid)
+
 
 class Worker(object):
     """Information about the specific worker executing a Build."""
@@ -1329,7 +1387,7 @@
             return False
         return self.item_ahead.isHoldingFollowingChanges()
 
-    def findJobsToRun(self, mutex):
+    def findJobsToRun(self, semaphore_handler):
         torun = []
         if not self.live:
             return []
@@ -1368,9 +1426,9 @@
                     # The nodes for this job are not ready, skip
                     # it for now.
                     continue
-                if mutex.acquire(self, job):
-                    # If this job needs a mutex, either acquire it or make
-                    # sure that we have it before running the job.
+                if semaphore_handler.acquire(self, job):
+                    # If this job needs a semaphore, either acquire it or
+                    # make sure that we have it before running the job.
                     torun.append(job)
         return torun
 
@@ -1447,10 +1505,10 @@
             fakebuild.result = 'SKIPPED'
             self.addBuild(fakebuild)
 
-    def formatJobResult(self, job, url_pattern=None):
+    def formatJobResult(self, job):
         build = self.current_build_set.getBuild(job.name)
         result = build.result
-        pattern = url_pattern
+        pattern = None
         if result == 'SUCCESS':
             if job.success_message:
                 result = job.success_message
@@ -1462,19 +1520,27 @@
             if job.failure_url:
                 pattern = job.failure_url
         url = None
+        # Produce safe versions of objects which may be useful in
+        # result formatting, but don't allow users to crawl through
+        # the entire data structure where they might be able to access
+        # secrets, etc.
+        safe_change = self.change.getSafeAttributes()
+        safe_pipeline = self.pipeline.getSafeAttributes()
+        safe_job = job.getSafeAttributes()
+        safe_build = build.getSafeAttributes()
         if pattern:
             try:
-                url = pattern.format(change=self.change,
-                                     pipeline=self.pipeline,
-                                     job=job,
-                                     build=build)
+                url = pattern.format(change=safe_change,
+                                     pipeline=safe_pipeline,
+                                     job=safe_job,
+                                     build=safe_build)
             except Exception:
                 pass  # FIXME: log this or something?
         if not url:
             url = build.url or job.name
         return (result, url)
 
-    def formatJSON(self, url_pattern=None):
+    def formatJSON(self):
         changeish = self.change
         ret = {}
         ret['active'] = self.active
@@ -1517,7 +1583,7 @@
             if build:
                 result = build.result
                 build_url = build.url
-                (unused, report_url) = self.formatJobResult(job, url_pattern)
+                (unused, report_url) = self.formatJobResult(job)
                 if build.start_time:
                     if build.end_time:
                         elapsed = int((build.end_time -
@@ -1660,6 +1726,12 @@
     def updatesConfig(self):
         return False
 
+    def getSafeAttributes(self):
+        return Attributes(project=self.project,
+                          ref=self.ref,
+                          oldrev=self.oldrev,
+                          newrev=self.newrev)
+
 
 class Change(Ref):
     """A proposed new state for a Project."""
@@ -1723,6 +1795,11 @@
             return True
         return False
 
+    def getSafeAttributes(self):
+        return Attributes(project=self.project,
+                          number=self.number,
+                          patchset=self.patchset)
+
 
 class TriggerEvent(object):
     """Incoming event from an external system."""
@@ -2134,6 +2211,7 @@
         self.projects = {}
         self.nodesets = []
         self.secrets = []
+        self.semaphores = []
 
     def copy(self):
         r = UnparsedTenantConfig()
@@ -2143,6 +2221,7 @@
         r.projects = copy.deepcopy(self.projects)
         r.nodesets = copy.deepcopy(self.nodesets)
         r.secrets = copy.deepcopy(self.secrets)
+        r.semaphores = copy.deepcopy(self.semaphores)
         return r
 
     def extend(self, conf):
@@ -2154,6 +2233,7 @@
                 self.projects.setdefault(k, []).extend(v)
             self.nodesets.extend(conf.nodesets)
             self.secrets.extend(conf.secrets)
+            self.semaphores.extend(conf.semaphores)
             return
 
         if not isinstance(conf, list):
@@ -2184,6 +2264,8 @@
                 self.nodesets.append(value)
             elif key == 'secret':
                 self.secrets.append(value)
+            elif key == 'semaphore':
+                self.semaphores.append(value)
             else:
                 raise Exception("Configuration item `%s` not recognized "
                                 "(when parsing %s)" %
@@ -2207,6 +2289,7 @@
         self.jobs = {'noop': [Job('noop')]}
         self.nodesets = {}
         self.secrets = {}
+        self.semaphores = {}
 
     def getJob(self, name):
         if name in self.jobs:
@@ -2245,6 +2328,11 @@
             raise Exception("Secret %s already defined" % (secret.name,))
         self.secrets[secret.name] = secret
 
+    def addSemaphore(self, semaphore):
+        if semaphore.name in self.semaphores:
+            raise Exception("Semaphore %s already defined" % (semaphore.name,))
+        self.semaphores[semaphore.name] = semaphore
+
     def addPipeline(self, pipeline):
         self.pipelines[pipeline.name] = pipeline
 
@@ -2254,7 +2342,9 @@
     def addProjectConfig(self, project_config):
         self.project_configs[project_config.name] = project_config
 
-    def _createJobGraph(self, change, job_list, job_graph):
+    def _createJobGraph(self, item, job_list, job_graph):
+        change = item.change
+        pipeline = item.pipeline
         for jobname in job_list.jobs:
             # This is the final job we are constructing
             frozen_job = None
@@ -2276,7 +2366,7 @@
             # If the job does not allow auth inheritance, do not allow
             # the project-pipeline variants to update its execution
             # attributes.
-            if frozen_job.auth and not frozen_job.auth.get('inherit'):
+            if frozen_job.auth and not frozen_job.auth.inherit:
                 frozen_job.final = True
             # Whether the change matches any of the project pipeline
             # variants
@@ -2289,6 +2379,15 @@
                 # A change must match at least one project pipeline
                 # job variant.
                 continue
+            if (frozen_job.allowed_projects and
+                change.project.name not in frozen_job.allowed_projects):
+                raise Exception("Project %s is not allowed to run job %s" %
+                                (change.project.name, frozen_job.name))
+            if ((not pipeline.allow_secrets) and frozen_job.auth and
+                frozen_job.auth.secrets):
+                raise Exception("Pipeline %s does not allow jobs with "
+                                "secrets (job %s)" % (
+                                    pipeline.name, frozen_job.name))
             job_graph.addJob(frozen_job)
 
     def createJobGraph(self, item):
@@ -2300,10 +2399,99 @@
         if project_config and item.pipeline.name in project_config.pipelines:
             project_job_list = \
                 project_config.pipelines[item.pipeline.name].job_list
-            self._createJobGraph(item.change, project_job_list, ret)
+            self._createJobGraph(item, project_job_list, ret)
         return ret
 
 
+class Semaphore(object):
+    def __init__(self, name, max=1):
+        self.name = name
+        self.max = int(max)
+
+
+class SemaphoreHandler(object):
+    log = logging.getLogger("zuul.SemaphoreHandler")
+
+    def __init__(self):
+        self.semaphores = {}
+
+    def acquire(self, item, job):
+        if not job.semaphore:
+            return True
+
+        semaphore_key = job.semaphore
+
+        m = self.semaphores.get(semaphore_key)
+        if not m:
+            # The semaphore is not held, acquire it
+            self._acquire(semaphore_key, item, job.name)
+            return True
+        if (item, job.name) in m:
+            # This item already holds the semaphore
+            return True
+
+        # semaphore is there, check max
+        if len(m) < self._max_count(item, job.semaphore):
+            self._acquire(semaphore_key, item, job.name)
+            return True
+
+        return False
+
+    def release(self, item, job):
+        if not job.semaphore:
+            return
+
+        semaphore_key = job.semaphore
+
+        m = self.semaphores.get(semaphore_key)
+        if not m:
+            # The semaphore is not held, nothing to do
+            self.log.error("Semaphore can not be released for %s "
+                           "because the semaphore is not held" %
+                           item)
+            return
+        if (item, job.name) in m:
+            # This item is a holder of the semaphore
+            self._release(semaphore_key, item, job.name)
+            return
+        self.log.error("Semaphore can not be released for %s "
+                       "which does not hold it" % item)
+
+    def _acquire(self, semaphore_key, item, job_name):
+        self.log.debug("Semaphore acquire {semaphore}: job {job}, item {item}"
+                       .format(semaphore=semaphore_key,
+                               job=job_name,
+                               item=item))
+        if semaphore_key not in self.semaphores:
+            self.semaphores[semaphore_key] = []
+        self.semaphores[semaphore_key].append((item, job_name))
+
+    def _release(self, semaphore_key, item, job_name):
+        self.log.debug("Semaphore release {semaphore}: job {job}, item {item}"
+                       .format(semaphore=semaphore_key,
+                               job=job_name,
+                               item=item))
+        sem_item = (item, job_name)
+        if sem_item in self.semaphores[semaphore_key]:
+            self.semaphores[semaphore_key].remove(sem_item)
+
+        # cleanup if there is no user of the semaphore anymore
+        if len(self.semaphores[semaphore_key]) == 0:
+            del self.semaphores[semaphore_key]
+
+    @staticmethod
+    def _max_count(item, semaphore_name):
+        if not item.current_build_set.layout:
+            # This should not occur as the layout of the item must already be
+            # built when acquiring or releasing a semaphore for a job.
+            raise Exception("Item {} has no layout".format(item))
+
+        # find the right semaphore
+        default_semaphore = Semaphore(semaphore_name, 1)
+        semaphores = item.current_build_set.layout.semaphores
+        return semaphores.get(semaphore_name, default_semaphore).max
+
+
 class Tenant(object):
     def __init__(self, name):
         self.name = name
@@ -2324,6 +2512,8 @@
         # A mapping of source -> {config_repos: {}, project_repos: {}}
         self.sources = {}
 
+        self.semaphore_handler = SemaphoreHandler()
+
     def addConfigRepo(self, source, project):
         sd = self.sources.setdefault(source.name,
                                      {'config_repos': {},
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 6df3f1b..5e25e7c 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -111,14 +111,10 @@
         ret = ''
 
         config = self.connection.sched.config
-        if config.has_option('zuul', 'url_pattern'):
-            url_pattern = config.get('zuul', 'url_pattern')
-        else:
-            url_pattern = None
 
         for job in item.getJobs():
             build = item.current_build_set.getBuild(job.name)
-            (result, url) = item.formatJobResult(job, url_pattern)
+            (result, url) = item.formatJobResult(job)
             if not job.voting:
                 voting = ' (non-voting)'
             else:
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 6ae8492..0fa1763 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -33,68 +33,6 @@
 from zuul import version as zuul_version
 
 
-class MutexHandler(object):
-    log = logging.getLogger("zuul.MutexHandler")
-
-    def __init__(self):
-        self.mutexes = {}
-
-    def acquire(self, item, job):
-        if not job.mutex:
-            return True
-        mutex_name = job.mutex
-        m = self.mutexes.get(mutex_name)
-        if not m:
-            # The mutex is not held, acquire it
-            self._acquire(mutex_name, item, job.name)
-            return True
-        held_item, held_job_name = m
-        if held_item is item and held_job_name == job.name:
-            # This item already holds the mutex
-            return True
-        held_build = held_item.current_build_set.getBuild(held_job_name)
-        if held_build and held_build.result:
-            # The build that held the mutex is complete, release it
-            # and let the new item have it.
-            self.log.error("Held mutex %s being released because "
-                           "the build that holds it is complete" %
-                           (mutex_name,))
-            self._release(mutex_name, item, job.name)
-            self._acquire(mutex_name, item, job.name)
-            return True
-        return False
-
-    def release(self, item, job):
-        if not job.mutex:
-            return
-        mutex_name = job.mutex
-        m = self.mutexes.get(mutex_name)
-        if not m:
-            # The mutex is not held, nothing to do
-            self.log.error("Mutex can not be released for %s "
-                           "because the mutex is not held" %
-                           (item,))
-            return
-        held_item, held_job_name = m
-        if held_item is item and held_job_name == job.name:
-            # This item holds the mutex
-            self._release(mutex_name, item, job.name)
-            return
-        self.log.error("Mutex can not be released for %s "
-                       "which does not hold it" %
-                       (item,))
-
-    def _acquire(self, mutex_name, item, job_name):
-        self.log.debug("Job %s of item %s acquiring mutex %s" %
-                       (job_name, item, mutex_name))
-        self.mutexes[mutex_name] = (item, job_name)
-
-    def _release(self, mutex_name, item, job_name):
-        self.log.debug("Job %s of item %s releasing mutex %s" %
-                       (job_name, item, mutex_name))
-        del self.mutexes[mutex_name]
-
-
 class ManagementEvent(object):
     """An event that should be processed within the main queue run loop"""
     def __init__(self):
@@ -269,7 +207,6 @@
         self.connections = None
         self.statsd = extras.try_import('statsd.statsd')
         # TODO(jeblair): fix this
-        self.mutex = MutexHandler()
         # Despite triggers being part of the pipeline, there is one trigger set
         # per scheduler. The pipeline handles the trigger filters but since
         # the events are handled by the scheduler itself it needs to handle
@@ -593,19 +530,27 @@
                 except Exception:
                     self.log.exception(
                         "Exception while canceling build %s "
-                        "for change %s" % (build, item.change))
+                        "for change %s" % (build, build.build_set.item.change))
                 finally:
-                    self.mutex.release(build.build_set.item, build.job)
+                    tenant.semaphore_handler.release(
+                        build.build_set.item, build.job)
 
     def _reconfigureTenant(self, tenant):
         # This is called from _doReconfigureEvent while holding the
         # layout lock
         old_tenant = self.abide.tenants.get(tenant.name)
+
         if old_tenant:
+            # Copy over semaphore handler so we don't loose the currently
+            # held semaphores.
+            tenant.semaphore_handler = old_tenant.semaphore_handler
+
             self._reenqueueTenant(old_tenant, tenant)
+
         # TODOv3(jeblair): update for tenants
         # self.maintainConnectionCache()
         self.connections.reconfigureDrivers(tenant)
+
         # TODOv3(jeblair): remove postconfig calls?
         for pipeline in tenant.layout.pipelines.values():
             pipeline.source.postConfig()
@@ -911,11 +856,6 @@
 
     def formatStatusJSON(self, tenant_name):
         # TODOv3(jeblair): use tenants
-        if self.config.has_option('zuul', 'url_pattern'):
-            url_pattern = self.config.get('zuul', 'url_pattern')
-        else:
-            url_pattern = None
-
         data = {}
 
         data['zuul_version'] = self.zuul_version
@@ -942,5 +882,5 @@
         data['pipelines'] = pipelines
         tenant = self.abide.tenants.get(tenant_name)
         for pipeline in tenant.layout.pipelines.values():
-            pipelines.append(pipeline.formatStatusJSON(url_pattern))
+            pipelines.append(pipeline.formatStatusJSON())
         return json.dumps(data)
diff --git a/zuul/webapp.py b/zuul/webapp.py
index e16f0b4..4f040fa 100644
--- a/zuul/webapp.py
+++ b/zuul/webapp.py
@@ -23,6 +23,8 @@
 import webob
 from webob import dec
 
+from zuul.lib import encryption
+
 """Zuul main web app.
 
 Zuul supports HTTP requests directly against it for determining the
@@ -34,6 +36,7 @@
    queue / pipeline structure of the system
  - /status.json (backwards compatibility): same as /status
  - /status/change/X,Y: return status just for gerrit change X,Y
+ - /keys/SOURCE/PROJECT.pub: return the public key for PROJECT
 
 When returning status for a single gerrit change you will get an
 array of changes, they will not include the queue structure.
@@ -96,9 +99,31 @@
             return m.group(1)
         return None
 
+    def _handle_keys(self, request, path):
+        m = re.match('/keys/(.*?)/(.*?).pub', path)
+        if not m:
+            raise webob.exc.HTTPNotFound()
+        source_name = m.group(1)
+        project_name = m.group(2)
+        source = self.scheduler.connections.getSource(source_name)
+        if not source:
+            raise webob.exc.HTTPNotFound()
+        project = source.getProject(project_name)
+        if not project:
+            raise webob.exc.HTTPNotFound()
+
+        pem_public_key = encryption.serialize_rsa_public_key(
+            project.public_key)
+
+        response = webob.Response(body=pem_public_key,
+                                  content_type='text/plain')
+        return response.conditional_response_app
+
     def app(self, request):
         tenant_name = request.path.split('/')[1]
         path = request.path.replace('/' + tenant_name, '')
+        if path.startswith('/keys'):
+            return self._handle_keys(request, path)
         path = self._normalize_path(path)
         if path is None:
             raise webob.exc.HTTPNotFound()