Merge "Add project related type hints to gerritconnection" into feature/zuulv3
diff --git a/doc/source/admin/client.rst b/doc/source/admin/client.rst
index 6b62360..961b205 100644
--- a/doc/source/admin/client.rst
+++ b/doc/source/admin/client.rst
@@ -22,6 +22,14 @@
 
 The following subcommands are supported:
 
+Autohold
+^^^^^^^^
+.. program-output:: zuul autohold --help
+
+Example::
+
+  zuul autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
+
 Enqueue
 ^^^^^^^
 .. program-output:: zuul enqueue --help
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index c11b5cb..dcaa7b4 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -229,6 +229,8 @@
 To start the merger, run ``zuul-merger``.  To stop it, kill the
 PID which was saved in the pidfile specified in the configuration.
 
+.. _executor:
+
 Executor
 --------
 
@@ -354,6 +356,33 @@
 
      variables=/etc/zuul/variables.yaml
 
+**disk_limit_per_job**
+  This integer is the maximum number of megabytes that any one job is
+  allowed to consume on disk while it is running. If a job's scratch
+  space has more than this much space consumed, it will be aborted::
+
+      disk_limit_per_job=100
+
+**trusted_ro_paths**
+
+  List of paths, separated by ':' to read-only bind mount into trusted
+  bubblewrap contexts.
+
+**trusted_rw_paths**
+
+  List of paths, separated by ':' to read-write bind mount into trusted
+  bubblewrap contexts.
+
+**untrusted_ro_paths**
+
+  List of paths, separated by ':' to read-only bind mount into untrusted
+  bubblewrap contexts.
+
+**untrusted_rw_paths**
+
+  List of paths, separated by ':' to read-write bind mount into untrusted
+  bubblewrap contexts.
+
 merger
 """"""
 
diff --git a/doc/source/admin/drivers/gerrit.rst b/doc/source/admin/drivers/gerrit.rst
index 29e136b..454f8d0 100644
--- a/doc/source/admin/drivers/gerrit.rst
+++ b/doc/source/admin/drivers/gerrit.rst
@@ -105,7 +105,7 @@
 **approval**
   This is only used for ``comment-added`` events.  It only matches if
   the event has a matching approval associated with it.  Example:
-  ``code-review: 2`` matches a ``+2`` vote on the code review
+  ``Code-Review: 2`` matches a ``+2`` vote on the code review
   category.  Multiple approvals may be listed.
 
 **email**
@@ -145,9 +145,9 @@
   This may be used for any event.  It requires that a certain kind of
   approval be present for the current patchset of the change (the
   approval could be added by the event in question).  It follows the
-  same syntax as the :ref:`"approval" pipeline requirement
-  <pipeline-require-approval>`. For each specified criteria there must
-  exist a matching approval.
+  same syntax as :attr:`pipeline.require.<gerrit
+  source>.approval`. For each specified criteria there must exist a
+  matching approval.
 
 **reject-approval**
   This takes a list of approvals in the same format as
@@ -170,3 +170,99 @@
 
 A :ref:`connection<connections>` that uses the gerrit driver must be
 supplied to the trigger.
+
+Requirements Configuration
+--------------------------
+
+As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
+pipelines may specify that items meet certain conditions in order to
+be enqueued into the pipeline.  These conditions vary according to the
+source of the project in question.  To supply requirements for changes
+from a Gerrit source named *my-gerrit*, create a configuration such as
+the following::
+
+  pipeline:
+    require:
+      my-gerrit:
+        approval:
+          - Code-Review: 2
+
+This indicates that changes originating from the Gerrit connection
+named *my-gerrit* must have a Code Review vote of +2 in order to be
+enqueued into the pipeline.
+
+.. attr:: pipeline.require.<gerrit source>
+
+   The dictionary passed to the Gerrit pipeline `require` attribute
+   supports the following attributes:
+
+   .. attr:: approval
+
+      This requires that a certain kind of approval be present for the
+      current patchset of the change (the approval could be added by
+      the event in question).  It takes several sub-parameters, all of
+      which are optional and are combined together so that there must
+      be an approval matching all specified requirements.
+
+      .. attr:: username
+
+         If present, an approval from this username is required.  It is
+         treated as a regular expression.
+
+      .. attr:: email
+
+         If present, an approval with this email address is required.  It is
+         treated as a regular expression.
+
+      .. attr:: older-than
+
+         If present, the approval must be older than this amount of time
+         to match.  Provide a time interval as a number with a suffix of
+         "w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
+         (seconds).  Example ``48h`` or ``2d``.
+
+      .. attr:: newer-than
+
+         If present, the approval must be newer than this amount
+         of time to match.  Same format as "older-than".
+
+      Any other field is interpreted as a review category and value
+      pair.  For example ``Verified: 1`` would require that the
+      approval be for a +1 vote in the "Verified" column.  The value
+      may either be a single value or a list: ``Verified: [1, 2]``
+      would match either a +1 or +2 vote.
+
+   .. attr:: open
+
+      A boolean value (``true`` or ``false``) that indicates whether
+      the change must be open or closed in order to be enqueued.
+
+   .. attr:: current-patchset
+
+      A boolean value (``true`` or ``false``) that indicates whether the
+      change must be the current patchset in order to be enqueued.
+
+   .. attr:: status
+
+      A string value that corresponds with the status of the change
+      reported by the trigger.
+
+.. attr:: pipeline.reject.<gerrit source>
+
+   The `reject` attribute is the mirror of the `require` attribute.  It
+   also accepts a dictionary under the connection name.  This
+   dictionary supports the following attributes:
+
+   .. attr:: approval
+
+      This takes a list of approvals. If an approval matches the
+      provided criteria the change can not be entered into the
+      pipeline. It follows the same syntax as
+      :attr:`pipeline.require.<gerrit source>.approval`.
+
+      Example to reject a change with any negative vote::
+
+        reject:
+          my-gerrit:
+            approval:
+              - Code-Review: [-1, -2]
diff --git a/doc/source/admin/drivers/github.rst b/doc/source/admin/drivers/github.rst
index 91cef1b..cbbc5cc 100644
--- a/doc/source/admin/drivers/github.rst
+++ b/doc/source/admin/drivers/github.rst
@@ -75,6 +75,12 @@
   job's working directory, they appear under this directory name.
   ``canonical_hostname=git.example.com``
 
+**verify_ssl**
+  Optional: Enable or disable ssl verification for GitHub Enterprise.  This is
+  useful for a connection to a test installation. If not specified, defaults
+  to ``true``.
+  ``verify_ssl=true``
+
 Trigger Configuration
 ---------------------
 GitHub webhook events can be configured as triggers.
@@ -194,3 +200,108 @@
   Request based events.  ``unlabel: 'test failed'``
 
 .. _Github App: https://developer.github.com/apps/
+
+Requirements Configuration
+--------------------------
+
+As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
+pipelines may specify that items meet certain conditions in order to
+be enqueued into the pipeline.  These conditions vary according to the
+source of the project in question.  To supply requirements for changes
+from a GitHub source named *my-github*, create a congfiguration such
+as the following::
+
+  pipeline:
+    require:
+      my-github:
+        review:
+          - type: approval
+
+This indicates that changes originating from the GitHub connection
+named *my-github* must have an approved code review in order to be
+enqueued into the pipeline.
+
+.. attr:: pipeline.require.<github source>
+
+   The dictionary passed to the GitHub pipeline `require` attribute
+   supports the following attributes:
+
+   .. attr:: review
+
+      This requires that a certain kind of code review be present for
+      the pull request (it could be added by the event in question).
+      It takes several sub-parameters, all of which are optional and
+      are combined together so that there must be a code review
+      matching all specified requirements.
+
+      .. attr:: username
+
+         If present, a code review from this username is required.  It
+         is treated as a regular expression.
+
+      .. attr:: email
+
+         If present, a code review with this email address is
+         required.  It is treated as a regular expression.
+
+      .. attr:: older-than
+
+         If present, the code review must be older than this amount of
+         time to match.  Provide a time interval as a number with a
+         suffix of "w" (weeks), "d" (days), "h" (hours), "m"
+         (minutes), "s" (seconds).  Example ``48h`` or ``2d``.
+
+      .. attr:: newer-than
+
+         If present, the code review must be newer than this amount of
+         time to match.  Same format as "older-than".
+
+      .. attr:: type
+
+         If present, the code review must match this type (or types).
+
+         .. TODO: what types are valid?
+
+      .. attr:: permission
+
+         If present, the author of the code review must have this
+         permission (or permissions).  The available values are
+         ``read``, ``write``, and ``admin``.
+
+   .. attr:: open
+
+      A boolean value (``true`` or ``false``) that indicates whether
+      the change must be open or closed in order to be enqueued.
+
+   .. attr:: current-patchset
+
+      A boolean value (``true`` or ``false``) that indicates whether
+      the item must be associated with the latest commit in the pull
+      request in order to be enqueued.
+
+      .. TODO: this could probably be expanded upon -- under what
+         circumstances might this happen with github
+
+   .. attr:: status
+
+      A string value that corresponds with the status of the pull
+      request.  The syntax is ``user:status:value``.
+
+   .. attr:: label
+
+      A string value indicating that the pull request must have the
+      indicated label (or labels).
+
+
+.. attr:: pipeline.reject.<github source>
+
+   The `reject` attribute is the mirror of the `require` attribute.  It
+   also accepts a dictionary under the connection name.  This
+   dictionary supports the following attributes:
+
+   .. attr:: review
+
+      This takes a list of code reviews.  If a code review matches the
+      provided criteria the pull request can not be entered into the
+      pipeline.  It follows the same syntax as
+      :attr:`pipeline.require.<github source>.review`
diff --git a/doc/source/admin/drivers/zuul.rst b/doc/source/admin/drivers/zuul.rst
index a23c875..b531754 100644
--- a/doc/source/admin/drivers/zuul.rst
+++ b/doc/source/admin/drivers/zuul.rst
@@ -25,16 +25,3 @@
 **pipeline**
   Only available for ``parent-change-enqueued`` events.  This is the
   name of the pipeline in which the parent change was enqueued.
-
-**require-approval**
-  This may be used for any event.  It requires that a certain kind of
-  approval be present for the current patchset of the change (the
-  approval could be added by the event in question).  It follows the
-  same syntax as the :ref:`"approval" pipeline requirement
-  <pipeline-require-approval>`. For each specified criteria there must
-  exist a matching approval.
-
-**reject-approval**
-  This takes a list of approvals in the same format as
-  *require-approval* but will fail to enter the pipeline if there is a
-  matching approval.
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 18ec381..b3b2d9c 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -28,6 +28,8 @@
 
   - tenant:
       name: my-tenant
+      max-nodes-per-job: 5
+      exclude-unprotected-branches: false
       source:
         gerrit:
           config-projects:
@@ -38,7 +40,8 @@
             - zuul-jobs:
                 shadow: common-config
             - project1
-            - project2
+            - project2:
+                exclude-unprotected-branches: true
 
 The following attributes are supported:
 
@@ -48,6 +51,20 @@
   characters (ASCII letters, numbers, hyphen and underscore) and you
   should avoid changing it unless necessary.
 
+**max-nodes-per-job** (optional)
+  The maximum number of nodes a job can request, default to 5.
+  A '-1' value removes the limit.
+
+**exclude-unprotected-branches** (optional)
+  When using a branch and pull model on a shared github repository there are
+  usually one or more protected branches which are gated and a dynamic number of
+  personal/feature branches which are the source for the pull requests. These
+  branches can potentially include broken zuul config and therefore break the
+  global tenant wide configuration. In order to deal with this zuul's operations
+  can be limited to the protected branches which are gated. This is a tenant
+  wide setting and can be overridden per project. If not specified, defaults
+  to ``false``.
+
 **source** (required)
   A dictionary of sources to consult for projects.  A tenant may
   contain projects from multiple sources; each of those sources must
@@ -99,6 +116,10 @@
     "zuul-jobs" projects, the definition in "common-config" will be
     used.
 
+    **exclude-unprotected-branches**
+    Define if unprotected github branches should be processed. Defaults to the
+    tenant wide setting of exclude-unprotected-branches.
+
   The order of the projects listed in a tenant is important.  A job
   which is defined in one project may not be redefined in another
   project; therefore, once a job appears in one project, a project
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 71c7697..7c0d587 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -29,11 +29,13 @@
     'sphinx.ext.autodoc',
     'sphinxcontrib.blockdiag',
     'sphinxcontrib.programoutput',
-    'oslosphinx'
+    'zuul.sphinx.zuul',
 ]
 #extensions = ['sphinx.ext.intersphinx']
 #intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
 
+primary_domain = 'zuul'
+
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
 
@@ -89,7 +91,7 @@
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'default'
+#html_theme = 'alabaster'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
new file mode 100644
index 0000000..98bee10
--- /dev/null
+++ b/doc/source/glossary.rst
@@ -0,0 +1,57 @@
+.. _glossary:
+
+Glossary
+========
+
+.. glossary::
+   :sorted:
+
+   check
+
+      By convention, the name of a pipeline which performs pre-merge
+      tests.  Such a pipeline might be triggered by creating a new
+      change or pull request.  It may run with changes which have not
+      yet seen any human review, so care must be taken in selecting
+      the kinds of jobs to run, and what resources will be available
+      to them in order to avoid misuse of the system or credential
+      compromise.
+
+   config-project
+
+      One of two types of projects which may be specified by the
+      administrator in the tenant config file.  A config-project is
+      primarily tasked with holding configuration information and job
+      content for Zuul.  Jobs which are defined in a config-project
+      are run with elevated privileges, and all Zuul configuration
+      items are available for use.  It is expected that changes to
+      config-projects will undergo careful scrutiny before being
+      merged.
+
+   gate
+
+      By convention, the name of a pipeline which performs project
+      gating.  Such a pipeline might be triggered by a core team
+      member approving a change or pull request.  It should have a
+      :value:`dependent <pipeline.manager.dependent>` pipeline manager
+      so that it can combine and sequence changes as they are
+      approved.
+
+   reporter
+
+      A reporter is a :ref:`pipeline attribute <reporters>` which
+      describes the action performed when an item is dequeued after
+      its jobs complete.  Reporters are implemented by :ref:`drivers`
+      so their actions may be quite varied.  For example, a reporter
+      might leave feedback in a remote system on a proposed change,
+      send email, or store information in a database.
+
+   untrusted-project
+
+      One of two types of projects which may be specified by the
+      administrator in the tenant config file.  An untrusted-project
+      is one whose primary focus is not to operate Zuul, but rather it
+      is one of the projects being tested or deployed.  The Zuul
+      configuration language available to these projects is somewhat
+      restricted, and jobs defined in these projects run in a
+      restricted execution environment since they may be operating on
+      changes which have not yet undergone review.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 24ab31b..677e958 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -21,10 +21,15 @@
    admin/index
    developer/index
 
+.. toctree::
+   :hidden:
+
+   glossary
+
 Indices and tables
 ==================
 
 * :ref:`genindex`
-* :ref:`modindex`
 * :ref:`search`
+* :ref:`glossary`
 
diff --git a/doc/source/user/concepts.rst b/doc/source/user/concepts.rst
index 6197396..318de09 100644
--- a/doc/source/user/concepts.rst
+++ b/doc/source/user/concepts.rst
@@ -40,7 +40,8 @@
 configured with any number of reporters.  See :ref:`drivers` for a
 full list of available reporters.
 
-The items enqueued into a pipeline are each associated with a git ref.
+The items enqueued into a pipeline are each associated with a
+`git ref <https://git-scm.com/book/en/v2/Git-Internals-Git-References>`_.
 That ref may point to a proposed change, or it may be the tip of a
 branch or a tag.  The triggering event determines the ref, and whether
 it represents a proposed or merged commit.  Zuul prepares the ref for
@@ -67,7 +68,7 @@
 change appears.
 
 Jobs specify the type and quantity of nodes which they require.
-Before executing each job, Zuul will contact it's companion program,
+Before executing each job, Zuul will contact its companion program,
 Nodepool, to supply them.  Nodepool may be configured to supply static
 nodes or contact cloud providers to create or delete nodes as
 necessary.  The types of nodes available to Zuul are determined by the
@@ -80,6 +81,6 @@
 script) or sophisticated deployment scenarios.  When Zuul runs
 Ansible, it attempts to do so in a manner most similar to the way that
 Ansible might be used to orchestrate remote systems.  Ansible itself
-is run on the executor and acts remotely upon the test nodes supplied
-to a job.  This facilitates continuous delivery by making it possible
-to use the same Ansible playbooks in testing and production.
+is run on the :ref:`executor <executor>` and acts remotely upon the test
+nodes supplied to a job.  This facilitates continuous delivery by making it
+possible to use the same Ansible playbooks in testing and production.
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 6b63e49..34d23f9 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -16,15 +16,15 @@
 they specify one of two security contexts for that project.  A
 *config-project* is one which is primarily tasked with holding
 configuration information and job content for Zuul.  Jobs which are
-defined in a *config-project* are run with elevated privileges, and
-all Zuul configuration items are available for use.  It is expected
-that changes to *config-projects* will undergo careful scrutiny before
-being merged.
+defined in a config-project are run with elevated privileges, and all
+Zuul configuration items are available for use.  It is expected that
+changes to config-projects will undergo careful scrutiny before being
+merged.
 
 An *untrusted-project* is a project whose primary focus is not to
 operate Zuul, but rather it is one of the projects being tested or
 deployed.  The Zuul configuration language available to these projects
-is somewhat restricted (as detailed in individual section below), and
+is somewhat restricted (as detailed in individual sections below), and
 jobs defined in these projects run in a restricted execution
 environment since they may be operating on changes which have not yet
 undergone review.
@@ -33,23 +33,23 @@
 ---------------------
 
 When Zuul starts, it examines all of the git repositories which are
-specified by the system administrator in :ref:`tenant-config` and searches
-for files in the root of each repository. Zuul looks first for a file named
-`zuul.yaml` or a directory named `zuul.d`, and if they are not found,
-`.zuul.yaml` or `.zuul.d` (with a leading dot). In the case of an
-*untrusted-project*, the configuration from every branch is included,
-however, in the case of a *config-project*, only the `master` branch is
-examined.
+specified by the system administrator in :ref:`tenant-config` and
+searches for files in the root of each repository. Zuul looks first
+for a file named ``zuul.yaml`` or a directory named ``zuul.d``, and if
+they are not found, ``.zuul.yaml`` or ``.zuul.d`` (with a leading
+dot). In the case of an :term:`untrusted-project`, the configuration
+from every branch is included, however, in the case of a
+:term:`config-project`, only the ``master`` branch is examined.
 
 When a change is proposed to one of these files in an
-*untrusted-project*, the configuration proposed in the change is
-merged into the running configuration so that any changes to Zuul's
+untrusted-project, the configuration proposed in the change is merged
+into the running configuration so that any changes to Zuul's
 configuration are self-testing as part of that change.  If there is a
 configuration error, no jobs will be run and the error will be
 reported by any applicable pipelines.  In the case of a change to a
-*config-project*, the new configuration is parsed and examined for
+config-project, the new configuration is parsed and examined for
 errors, but the new configuration is not used in testing the change.
-This is because configuration in *config-projects* is able to access
+This is because configuration in config-projects is able to access
 elevated privileges and should always be reviewed before being merged.
 
 As soon as a change containing a Zuul configuration change merges to
@@ -59,14 +59,15 @@
 Configuration Items
 -------------------
 
-The `zuul.yaml` and `.zuul.yaml` configuration files are
+The ``zuul.yaml`` and ``.zuul.yaml`` configuration files are
 YAML-formatted and are structured as a series of items, each of which
 is described below.
 
-In the case of a `zuul.d` directory, Zuul recurses the directory and extends
-the configuration using all the .yaml files in the sorted path order.
-For example, to keep job's variants in a separate file, it needs to be loaded
-after the main entries, for example using number prefixes in file's names::
+In the case of a ``zuul.d`` directory, Zuul recurses the directory and
+extends the configuration using all the .yaml files in the sorted path
+order.  For example, to keep job's variants in a separate file, it
+needs to be loaded after the main entries, for example using number
+prefixes in file's names::
 
 * zuul.d/pipelines.yaml
 * zuul.d/projects.yaml
@@ -87,320 +88,333 @@
 projects.
 
 By way of example, one of the primary uses of Zuul is to perform
-project gating.  To do so, one can create a *gate* pipeline which
-tells Zuul that when a certain event (such as approval by a code
+project gating.  To do so, one can create a :term:`gate` pipeline
+which tells Zuul that when a certain event (such as approval by a code
 reviewer) occurs, the corresponding change or pull request should be
 enqueued into the pipeline.  When that happens, the jobs which have
-been configured to run for that project in the *gate* pipeline are
-run, and when they complete, the pipeline reports the results to the
-user.
+been configured to run for that project in the gate pipeline are run,
+and when they complete, the pipeline reports the results to the user.
 
-Pipeline configuration items may only appear in *config-projects*.
+Pipeline configuration items may only appear in :term:`config-projects
+<config-project>`.
 
 Generally, a Zuul administrator would define a small number of
 pipelines which represent the workflow processes used in their
 environment.  Each project can then be added to the available
 pipelines as appropriate.
 
-Here is an example *check* pipeline, which runs whenever a new
+Here is an example :term:`check` pipeline, which runs whenever a new
 patchset is created in Gerrit.  If the associated jobs all report
-success, the pipeline reports back to Gerrit with a *Verified* vote of
-+1, or if at least one of them fails, a -1::
+success, the pipeline reports back to Gerrit with ``Verified`` vote of
++1, or if at least one of them fails, a -1:
 
-  - pipeline:
-      name: check
-      manager: independent
-      trigger:
-        my_gerrit:
-          - event: patchset-created
-      success:
-        my_gerrit:
-          Verified: 1
-      failure:
-        my_gerrit
-          Verified: -1
+.. code-block:: yaml
+
+   - pipeline:
+       name: check
+       manager: independent
+       trigger:
+         my_gerrit:
+           - event: patchset-created
+       success:
+         my_gerrit:
+           Verified: 1
+       failure:
+         my_gerrit
+           Verified: -1
 
 .. TODO: See TODO for more annotated examples of common pipeline configurations.
 
-The attributes available on a pipeline are as follows (all are
-optional unless otherwise specified):
+.. attr:: pipeline
 
-**name** (required)
-  This is used later in the project definition to indicate what jobs
-  should be run for events in the pipeline.
+   The attributes available on a pipeline are as follows (all are
+   optional unless otherwise specified):
 
-**manager** (required)
-  There are currently two schemes for managing pipelines:
+   .. attr:: name
+      :required:
 
-  .. _independent_pipeline_manager:
+      This is used later in the project definition to indicate what jobs
+      should be run for events in the pipeline.
 
-  *independent*
-    Every event in this pipeline should be treated as independent of
-    other events in the pipeline.  This is appropriate when the order of
-    events in the pipeline doesn't matter because the results of the
-    actions this pipeline performs can not affect other events in the
-    pipeline.  For example, when a change is first uploaded for review,
-    you may want to run tests on that change to provide early feedback
-    to reviewers.  At the end of the tests, the change is not going to
-    be merged, so it is safe to run these tests in parallel without
-    regard to any other changes in the pipeline.  They are independent.
+   .. attr:: manager
+      :required:
 
-    Another type of pipeline that is independent is a post-merge
-    pipeline. In that case, the changes have already merged, so the
-    results can not affect any other events in the pipeline.
+      There are currently two schemes for managing pipelines:
 
-  .. _dependent_pipeline_manager:
+      .. value:: independent
 
-  *dependent*
-    The dependent pipeline manager is designed for gating.  It ensures
-    that every change is tested exactly as it is going to be merged
-    into the repository.  An ideal gating system would test one change
-    at a time, applied to the tip of the repository, and only if that
-    change passed tests would it be merged.  Then the next change in
-    line would be tested the same way.  In order to achieve parallel
-    testing of changes, the dependent pipeline manager performs
-    speculative execution on changes.  It orders changes based on
-    their entry into the pipeline.  It begins testing all changes in
-    parallel, assuming that each change ahead in the pipeline will pass
-    its tests.  If they all succeed, all the changes can be tested and
-    merged in parallel.  If a change near the front of the pipeline
-    fails its tests, each change behind it ignores whatever tests have
-    been completed and are tested again without the change in front.
-    This way gate tests may run in parallel but still be tested
-    correctly, exactly as they will appear in the repository when
-    merged.
+         Every event in this pipeline should be treated as independent
+         of other events in the pipeline.  This is appropriate when
+         the order of events in the pipeline doesn't matter because
+         the results of the actions this pipeline performs can not
+         affect other events in the pipeline.  For example, when a
+         change is first uploaded for review, you may want to run
+         tests on that change to provide early feedback to reviewers.
+         At the end of the tests, the change is not going to be
+         merged, so it is safe to run these tests in parallel without
+         regard to any other changes in the pipeline.  They are
+         independent.
 
-    For more detail on the theory and operation of Zuul's dependent
-    pipeline manager, see: :doc:`gating`.
+         Another type of pipeline that is independent is a post-merge
+         pipeline. In that case, the changes have already merged, so
+         the results can not affect any other events in the pipeline.
 
-**allow-secrets**
-  This is a boolean which can be used to prevent jobs which require
-  secrets from running in this pipeline.  Some pipelines run on
-  proposed changes and therefore execute code which has not yet been
-  reviewed.  In such a case, allowing a job to use a secret could
-  result in that secret being exposed.  The default is False, meaning
-  that in order to run jobs with secrets, this must be explicitly
-  enabled on each Pipeline where that is safe.
+      .. value:: dependent
 
-  For more information, see :ref:`secret`.
+         The dependent pipeline manager is designed for gating.  It
+         ensures that every change is tested exactly as it is going to
+         be merged into the repository.  An ideal gating system would
+         test one change at a time, applied to the tip of the
+         repository, and only if that change passed tests would it be
+         merged.  Then the next change in line would be tested the
+         same way.  In order to achieve parallel testing of changes,
+         the dependent pipeline manager performs speculative execution
+         on changes.  It orders changes based on their entry into the
+         pipeline.  It begins testing all changes in parallel,
+         assuming that each change ahead in the pipeline will pass its
+         tests.  If they all succeed, all the changes can be tested
+         and merged in parallel.  If a change near the front of the
+         pipeline fails its tests, each change behind it ignores
+         whatever tests have been completed and are tested again
+         without the change in front.  This way gate tests may run in
+         parallel but still be tested correctly, exactly as they will
+         appear in the repository when merged.
 
-**description**
-  This field may be used to provide a textual description of the
-  pipeline.  It may appear in the status page or in documentation.
+         For more detail on the theory and operation of Zuul's
+         dependent pipeline manager, see: :doc:`gating`.
 
-**success-message**
-  The introductory text in reports when all the voting jobs are
-  successful.  Defaults to "Build successful."
+   .. attr:: allow-secrets
+      :default: false
 
-**failure-message**
-  The introductory text in reports when at least one voting job fails.
-  Defaults to "Build failed."
+      This is a boolean which can be used to prevent jobs which
+      require secrets from running in this pipeline.  Some pipelines
+      run on proposed changes and therefore execute code which has not
+      yet been reviewed.  In such a case, allowing a job to use a
+      secret could result in that secret being exposed.  The default
+      is ``false``, meaning that in order to run jobs with secrets,
+      this must be explicitly enabled on each Pipeline where that is
+      safe.
 
-**merge-failure-message**
-  The introductory text in the message reported when a change fails to
-  merge with the current state of the repository.  Defaults to "Merge
-  failed."
+      For more information, see :ref:`secret`.
 
-**footer-message**
-  Supplies additional information after test results.  Useful for
-  adding information about the CI system such as debugging and contact
-  details.
+   .. attr:: description
 
-**trigger**
-  At least one trigger source must be supplied for each pipeline.
-  Triggers are not exclusive -- matching events may be placed in
-  multiple pipelines, and they will behave independently in each of
-  the pipelines they match.
+      This field may be used to provide a textual description of the
+      pipeline.  It may appear in the status page or in documentation.
 
-  Triggers are loaded from their connection name. The driver type of
-  the connection will dictate which options are available.
-  See :ref:`drivers`.
+   .. attr:: success-message
+      :default: Build successful.
 
-**require**
-  If this section is present, it established pre-requisites for any
-  kind of item entering the Pipeline.  Regardless of how the item is
-  to be enqueued (via any trigger or automatic dependency resolution),
-  the conditions specified here must be met or the item will not be
-  enqueued.
+      The introductory text in reports when all the voting jobs are
+      successful.
 
-.. _pipeline-require-approval:
+   .. attr:: failure-message
+      :default: Build failed.
 
-  **approval**
-  This requires that a certain kind of approval be present for the
-  current patchset of the change (the approval could be added by the
-  event in question).  It takes several sub-parameters, all of which
-  are optional and are combined together so that there must be an
-  approval matching all specified requirements.
+      The introductory text in reports when at least one voting job
+      fails.
 
-    *username*
-    If present, an approval from this username is required.  It is
-    treated as a regular expression.
+   .. attr:: merge-failure-message
+      :default: Merge failed.
 
-    *email*
-    If present, an approval with this email address is required.  It
-    is treated as a regular expression.
+      The introductory text in the message reported when a change
+      fails to merge with the current state of the repository.
+      Defaults to "Merge failed."
 
-    *email-filter* (deprecated)
-    A deprecated alternate spelling of *email*.  Only one of *email* or
-    *email_filter* should be used.
+   .. attr:: footer-message
 
-    *older-than*
-    If present, the approval must be older than this amount of time
-    to match.  Provide a time interval as a number with a suffix of
-    "w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
-    (seconds).  Example ``48h`` or ``2d``.
+      Supplies additional information after test results.  Useful for
+      adding information about the CI system such as debugging and
+      contact details.
 
-    *newer-than*
-    If present, the approval must be newer than this amount of time
-    to match.  Same format as "older-than".
+   .. attr:: trigger
 
-    Any other field is interpreted as a review category and value
-    pair.  For example ``Verified: 1`` would require that the approval
-    be for a +1 vote in the "Verified" column.  The value may either
-    be a single value or a list: ``Verified: [1, 2]`` would match
-    either a +1 or +2 vote.
+      At least one trigger source must be supplied for each pipeline.
+      Triggers are not exclusive -- matching events may be placed in
+      multiple pipelines, and they will behave independently in each
+      of the pipelines they match.
 
-  **open**
-  A boolean value (``true`` or ``false``) that indicates whether the change
-  must be open or closed in order to be enqueued.
+      Triggers are loaded from their connection name. The driver type
+      of the connection will dictate which options are available.  See
+      :ref:`drivers`.
 
-  **current-patchset**
-  A boolean value (``true`` or ``false``) that indicates whether the change
-  must be the current patchset in order to be enqueued.
+   .. attr:: require
 
-  **status**
-  A string value that corresponds with the status of the change
-  reported by the trigger.
+      If this section is present, it establishes prerequisites for
+      any kind of item entering the Pipeline.  Regardless of how the
+      item is to be enqueued (via any trigger or automatic dependency
+      resolution), the conditions specified here must be met or the
+      item will not be enqueued.  These requirements may vary
+      depending on the source of the item being enqueued.
 
-**reject**
-  If this section is present, it establishes pre-requisites that can
-  block an item from being enqueued. It can be considered a negative
-  version of **require**.
+      Requirements are loaded from their connection name. The driver
+      type of the connection will dictate which options are available.
+      See :ref:`drivers`.
 
-  **approval**
-  This takes a list of approvals. If an approval matches the provided
-  criteria the change can not be entered into the pipeline. It follows
-  the same syntax as the :ref:`"require approval" pipeline above
-  <pipeline-require-approval>`.
+   .. attr:: reject
 
-  Example to reject a change with any negative vote::
+      If this section is present, it establishes prerequisites that
+      can block an item from being enqueued. It can be considered a
+      negative version of :attr:`pipeline.require`.
 
-    reject:
-      approval:
-        - Code-Review: [-1, -2]
+      Requirements are loaded from their connection name. The driver
+      type of the connection will dictate which options are available.
+      See :ref:`drivers`.
 
-**dequeue-on-new-patchset**
-  Normally, if a new patchset is uploaded to a change that is in a
-  pipeline, the existing entry in the pipeline will be removed (with
-  jobs canceled and any dependent changes that can no longer merge as
-  well.  To suppress this behavior (and allow jobs to continue
-  running), set this to ``false``.  Default: ``true``.
+   .. attr:: dequeue-on-new-patchset
+      :default: true
 
-**ignore-dependencies**
-  In any kind of pipeline (dependent or independent), Zuul will
-  attempt to enqueue all dependencies ahead of the current change so
-  that they are tested together (independent pipelines report the
-  results of each change regardless of the results of changes ahead).
-  To ignore dependencies completely in an independent pipeline, set
-  this to ``true``.  This option is ignored by dependent pipelines.
-  The default is: ``false``.
+      Normally, if a new patchset is uploaded to a change that is in a
+      pipeline, the existing entry in the pipeline will be removed
+      (with jobs canceled and any dependent changes that can no longer
+      merge as well.  To suppress this behavior (and allow jobs to
+      continue running), set this to ``false``.
 
-**precedence**
-  Indicates how the build scheduler should prioritize jobs for
-  different pipelines.  Each pipeline may have one precedence, jobs
-  for pipelines with a higher precedence will be run before ones with
-  lower.  The value should be one of ``high``, ``normal``, or ``low``.
-  Default: ``normal``.
+   .. attr:: ignore-dependencies
+      :default: false
 
-The following options configure *reporters*.  Reporters are
-complementary to triggers; where a trigger is an event on a connection
-which causes Zuul to enqueue an item, a reporter is the action
-performed on a connection when an item is dequeued after its jobs
-complete.  The actual syntax for a reporter is defined by the driver
-which implements it.  See :ref:`drivers` for more information.
+      In any kind of pipeline (dependent or independent), Zuul will
+      attempt to enqueue all dependencies ahead of the current change
+      so that they are tested together (independent pipelines report
+      the results of each change regardless of the results of changes
+      ahead).  To ignore dependencies completely in an independent
+      pipeline, set this to ``true``.  This option is ignored by
+      dependent pipelines.
 
-**success**
-  Describes where Zuul should report to if all the jobs complete
-  successfully.  This section is optional; if it is omitted, Zuul will
-  run jobs and do nothing on success -- it will not report at all.  If
-  the section is present, the listed reporters will be asked to report
-  on the jobs.  The reporters are listed by their connection name. The
-  options available depend on the driver for the supplied connection.
+   .. attr:: precedence
+      :default: normal
 
-**failure**
-  These reporters describe what Zuul should do if at least one job
-  fails.
+      Indicates how the build scheduler should prioritize jobs for
+      different pipelines.  Each pipeline may have one precedence,
+      jobs for pipelines with a higher precedence will be run before
+      ones with lower.  The value should be one of ``high``,
+      ``normal``, or ``low``.  Default: ``normal``.
 
-**merge-failure**
-  These reporters describe what Zuul should do if it is unable to
-  merge in the patchset. If no merge-failure reporters are listed then
-  the ``failure`` reporters will be used to notify of unsuccessful
-  merges.
+   .. _reporters:
 
-**start**
-  These reporters describe what Zuul should do when a change is added
-  to the pipeline.  This can be used, for example, to reset a
-  previously reported result.
+   The following options configure :term:`reporters <reporter>`.
+   Reporters are complementary to triggers; where a trigger is an
+   event on a connection which causes Zuul to enqueue an item, a
+   reporter is the action performed on a connection when an item is
+   dequeued after its jobs complete.  The actual syntax for a reporter
+   is defined by the driver which implements it.  See :ref:`drivers`
+   for more information.
 
-**disabled**
-  These reporters describe what Zuul should do when a pipeline is
-  disabled.  See ``disable-after-consecutive-failures``.
+   .. attr:: success
 
-The following options can be used to alter Zuul's behavior to mitigate
-situations in which jobs are failing frequently (perhaps due to a
-problem with an external dependency, or unusually high
-non-deterministic test failures).
+      Describes where Zuul should report to if all the jobs complete
+      successfully.  This section is optional; if it is omitted, Zuul
+      will run jobs and do nothing on success -- it will not report at
+      all.  If the section is present, the listed :term:`reporters
+      <reporter>` will be asked to report on the jobs.  The reporters
+      are listed by their connection name. The options available
+      depend on the driver for the supplied connection.
 
-**disable-after-consecutive-failures**
-  If set, a pipeline can enter a ''disabled'' state if too many changes
-  in a row fail. When this value is exceeded the pipeline will stop
-  reporting to any of the ``success``, ``failure`` or ``merge-failure``
-  reporters and instead only report to the ``disabled`` reporters.
-  (No ``start`` reports are made when a pipeline is disabled).
+   .. attr:: failure
 
-**window**
-  Dependent pipeline managers only. Zuul can rate limit dependent
-  pipelines in a manner similar to TCP flow control.  Jobs are only
-  started for items in the queue if they are within the actionable
-  window for the pipeline. The initial length of this window is
-  configurable with this value. The value given should be a positive
-  integer value. A value of ``0`` disables rate limiting on the
-  DependentPipelineManager.  Default: ``20``.
+      These reporters describe what Zuul should do if at least one job
+      fails.
 
-**window-floor**
-  Dependent pipeline managers only. This is the minimum value for the
-  window described above. Should be a positive non zero integer value.
-  Default: ``3``.
+   .. attr:: merge-failure
 
-**window-increase-type**
-  Dependent pipeline managers only. This value describes how the window
-  should grow when changes are successfully merged by zuul. A value of
-  ``linear`` indicates that ``window-increase-factor`` should be added
-  to the previous window value. A value of ``exponential`` indicates
-  that ``window-increase-factor`` should be multiplied against the
-  previous window value and the result will become the window size.
-  Default: ``linear``.
+      These reporters describe what Zuul should do if it is unable to
+      merge in the patchset. If no merge-failure reporters are listed
+      then the ``failure`` reporters will be used to notify of
+      unsuccessful merges.
 
-**window-increase-factor**
-  Dependent pipeline managers only. The value to be added or multiplied
-  against the previous window value to determine the new window after
-  successful change merges.
-  Default: ``1``.
+   .. attr:: start
 
-**window-decrease-type**
-  Dependent pipeline managers only. This value describes how the window
-  should shrink when changes are not able to be merged by Zuul. A value
-  of ``linear`` indicates that ``window-decrease-factor`` should be
-  subtracted from the previous window value. A value of ``exponential``
-  indicates that ``window-decrease-factor`` should be divided against
-  the previous window value and the result will become the window size.
-  Default: ``exponential``.
+      These reporters describe what Zuul should do when a change is
+      added to the pipeline.  This can be used, for example, to reset
+      a previously reported result.
 
-**window-decrease-factor**
-  Dependent pipline managers only. The value to be subtracted or divided
-  against the previous window value to determine the new window after
-  unsuccessful change merges.
-  Default: ``2``.
+   .. attr:: disabled
+
+      These reporters describe what Zuul should do when a pipeline is
+      disabled.  See ``disable-after-consecutive-failures``.
+
+   The following options can be used to alter Zuul's behavior to
+   mitigate situations in which jobs are failing frequently (perhaps
+   due to a problem with an external dependency, or unusually high
+   non-deterministic test failures).
+
+   .. attr:: disable-after-consecutive-failures
+
+      If set, a pipeline can enter a *disabled* state if too many
+      changes in a row fail. When this value is exceeded the pipeline
+      will stop reporting to any of the **success**, **failure** or
+      **merge-failure** reporters and instead only report to the
+      **disabled** reporters.  (No **start** reports are made when a
+      pipeline is disabled).
+
+   .. attr:: window
+      :default: 20
+
+      Dependent pipeline managers only. Zuul can rate limit dependent
+      pipelines in a manner similar to TCP flow control.  Jobs are
+      only started for items in the queue if they are within the
+      actionable window for the pipeline. The initial length of this
+      window is configurable with this value. The value given should
+      be a positive integer value. A value of ``0`` disables rate
+      limiting on the :value:`dependent pipeline manager
+      <pipeline.manager.dependent>`.
+
+   .. attr:: window-floor
+      :default: 3
+
+      Dependent pipeline managers only. This is the minimum value for
+      the window described above. Should be a positive non zero
+      integer value.
+
+   .. attr:: window-increase-type
+      :default: linear
+
+      Dependent pipeline managers only. This value describes how the
+      window should grow when changes are successfully merged by zuul.
+
+      .. value:: linear
+
+         Indicates that **window-increase-factor** should be added to
+         the previous window value.
+
+      .. value:: exponential
+
+         Indicates that **window-increase-factor** should be
+         multiplied against the previous window value and the result
+         will become the window size.
+
+   .. attr:: window-increase-factor
+      :default: 1
+
+      Dependent pipeline managers only. The value to be added or
+      multiplied against the previous window value to determine the
+      new window after successful change merges.
+
+   .. attr:: window-decrease-type
+      :default: exponential
+
+      Dependent pipeline managers only. This value describes how the
+      window should shrink when changes are not able to be merged by
+      Zuul.
+
+      .. value:: linear
+
+         Indicates that **window-decrease-factor** should be
+         subtracted from the previous window value.
+
+      .. value:: exponential
+
+         Indicates that **window-decrease-factor** should be divided
+         against the previous window value and the result will become
+         the window size.
+
+   .. attr:: window-decrease-factor
+      :default: 2
+
+      :value:`Dependent pipeline managers
+      <pipeline.manager.dependent>` only. The value to be subtracted
+      or divided against the previous window value to determine the
+      new window after unsuccessful change merges.
 
 
 .. _job:
@@ -423,7 +437,7 @@
 jobs on the system should have, progressing through stages of
 specialization before arriving at a particular job.  A job may inherit
 from any other job in any project (however, if the other job is marked
-as `final`, some attributes may not be overidden).
+as ``final``, some attributes may not be overidden).
 
 Jobs also support a concept called variance.  The first time a job
 definition appears is called the reference definition of the job.
@@ -461,365 +475,424 @@
 
 Further inheritance would nest even deeper.
 
-Here is an example of two job definitions::
+Here is an example of two job definitions:
 
-  - job:
-      name: base
-      pre-run: copy-git-repos
-      post-run: copy-logs
+.. code-block:: yaml
 
-  - job:
-      name: run-tests
-      parent: base
-      nodes:
-        - name: test-node
-	  image: fedora
+   - job:
+       name: base
+       pre-run: copy-git-repos
+       post-run: copy-logs
 
-The following attributes are available on a job; all are optional
-unless otherwise specified:
+   - job:
+       name: run-tests
+       parent: base
+       nodes:
+         - name: test-node
+           image: fedora
 
-**name** (required)
-  The name of the job.  By default, Zuul looks for a playbook with
-  this name to use as the main playbook for the job.  This name is
-  also referenced later in a project pipeline configuration.
+.. attr:: job
 
-**parent**
-  Specifies a job to inherit from.  The parent job can be defined in
-  this or any other project.  Any attributes not specified on a job
-  will be collected from its parent.
+   The following attributes are available on a job; all are optional
+   unless otherwise specified:
 
-**description**
-  A textual description of the job.  Not currently used directly by
-  Zuul, but it is used by the zuul-sphinx extension to Sphinx to
-  auto-document Zuul jobs (in which case it is interpreted as
-  ReStructuredText.
+   .. attr:: name
+      :required:
 
-**success-message**
-  Normally when a job succeeds, the string "SUCCESS" is reported as
-  the result for the job.  If set, this option may be used to supply a
-  different string.  Default: "SUCCESS".
+      The name of the job.  By default, Zuul looks for a playbook with
+      this name to use as the main playbook for the job.  This name is
+      also referenced later in a project pipeline configuration.
 
-**failure-message**
-  Normally when a job fails, the string "FAILURE" is reported as
-  the result for the job.  If set, this option may be used to supply a
-  different string.  Default: "FAILURE".
+   .. attr:: parent
 
-**success-url**
-  When a job succeeds, this URL is reported along with the result.  If
-  this value is not supplied, Zuul uses the content of the job
-  :ref:`return value <return_values>` **zuul.log_url**.  This is
-  recommended as it allows the code which stores the URL to the job
-  artifacts to report exactly where they were stored.  To override
-  this value, or if it is not set, supply an absolute URL in this
-  field.  If a relative URL is supplied in this field, and
-  **zuul.log_url** is set, then the two will be combined to produce
-  the URL used for the report.  This can be used to specify that
-  certain jobs should "deep link" into the stored job artifacts.
-  Default: none.
+      Specifies a job to inherit from.  The parent job can be defined
+      in this or any other project.  Any attributes not specified on
+      a job will be collected from its parent.
 
-**failure-url**
-  When a job fails, this URL is reported along with the result.
-  Otherwise behaves the same as **success-url**.
+   .. attr:: description
 
-**hold-following-changes**
-  In a dependent pipeline, this option may be used to indicate that no
-  jobs should start on any items which depend on the current item
-  until this job has completed successfully.  This may be used to
-  conserve build resources, at the expense of inhibiting the
-  parallelization which speeds the processing of items in a dependent
-  pipeline.  A boolean value, default: false.
+      A textual description of the job.  Not currently used directly
+      by Zuul, but it is used by the zuul-sphinx extension to Sphinx
+      to auto-document Zuul jobs (in which case it is interpreted as
+      ReStructuredText.
 
-**voting**
-  Indicates whether the result of this job should be used in
-  determining the overall result of the item.  A boolean value,
-  default: true.
+   .. attr:: success-message
+      :default: SUCCESS
 
-**semaphore**
-  The name of a :ref:`semaphore` which should be acquired and released
-  when the job begins and ends.  If the semaphore is at maximum
-  capacity, then Zuul will wait until it can be acquired before
-  starting the job.  Default: none.
+      Normally when a job succeeds, the string ``SUCCESS`` is reported
+      as the result for the job.  If set, this option may be used to
+      supply a different string.
 
-**tags**
-  Metadata about this job.  Tags are units of information attached to
-  the job; they do not affect Zuul's behavior, but they can be used
-  within the job to characterize the job.  For example, a job which
-  tests a certain subsystem could be tagged with the name of that
-  subsystem, and if the job's results are reported into a database,
-  then the results of all jobs affecting that subsystem could be
-  queried.  This attribute is specified as a list of strings, and when
-  inheriting jobs or applying variants, tags accumulate in a set, so
-  the result is always a set of all the tags from all the jobs and
-  variants used in constructing the frozen job, with no duplication.
-  Default: none.
+   .. attr:: failure-message
+      :default: FAILURE
 
-**branches**
-  A regular expression (or list of regular expressions) which describe
-  on what branches a job should run (or in the case of variants: to
-  alter the behavior of a job for a certain branch).
+      Normally when a job fails, the string ``FAILURE`` is reported as
+      the result for the job.  If set, this option may be used to
+      supply a different string.
 
-  If there is no job definition for a given job which matches the
-  branch of an item, then that job is not run for the item.
-  Otherwise, all of the job variants which match that branch (and any
-  other selection criteria) are used when freezing the job.
+   .. attr:: success-url
 
-  This example illustrates a job called *run-tests* which uses a
-  nodeset based on the current release of an operating system to
-  perform its tests, except when testing changes to the stable/2.0
-  branch, in which case it uses an older release::
+      When a job succeeds, this URL is reported along with the result.
+      If this value is not supplied, Zuul uses the content of the job
+      :ref:`return value <return_values>` **zuul.log_url**.  This is
+      recommended as it allows the code which stores the URL to the
+      job artifacts to report exactly where they were stored.  To
+      override this value, or if it is not set, supply an absolute URL
+      in this field.  If a relative URL is supplied in this field, and
+      **zuul.log_url** is set, then the two will be combined to
+      produce the URL used for the report.  This can be used to
+      specify that certain jobs should "deep link" into the stored job
+      artifacts.
 
-    - job:
-        name: run-tests
-        nodes: current-release
+   .. attr:: failure-url
 
-    - job:
-        name: run-tests
-        branch: stable/2.0
-        nodes: old-release
+      When a job fails, this URL is reported along with the result.
+      Otherwise behaves the same as **success-url**.
 
-  In some cases, Zuul uses an implied value for the branch specifier
-  if none is supplied:
+   .. attr:: hold-following-changes
+      :default: false
 
-  * For a job definition in a *config-project*, no implied branch
-    specifier is used.  If no branch specifier appears, the job
-    applies to all branches.
+      In a dependent pipeline, this option may be used to indicate
+      that no jobs should start on any items which depend on the
+      current item until this job has completed successfully.  This
+      may be used to conserve build resources, at the expense of
+      inhibiting the parallelization which speeds the processing of
+      items in a dependent pipeline.
 
-  * In the case of an *untrusted-project*, no implied branch specifier
-    is applied to the reference definition of a job.  That is to say,
-    that if the first appearance of the job definition appears without
-    a branch specifier, then it will apply to all branches.  Note that
-    when collecting its configuration, Zuul reads the `master` branch
-    of a given project first, then other branches in alphabetical
-    order.
+   .. attr:: voting
+      :default: true
 
-  * Any further job variants other than the reference definition in an
-    *untrusted-project* will, if they do not have a branch specifier,
-    will have an implied branch specifier for the current branch
-    applied.
+      Indicates whether the result of this job should be used in
+      determining the overall result of the item.
 
-  This allows for the very simple and expected workflow where if a
-  project defines a job on the master branch with no branch specifier,
-  and then creates a new branch based on master, any changes to that
-  job definition within the new branch only affect that branch.
+   .. attr:: semaphore
 
-**files**
-  This attribute indicates that the job should only run on changes
-  where the specified files are modified.  This is a regular
-  expression or list of regular expressions.  Default: none.
+      The name of a :ref:`semaphore` which should be acquired and
+      released when the job begins and ends.  If the semaphore is at
+      maximum capacity, then Zuul will wait until it can be acquired
+      before starting the job.
 
-**irrelevant-files**
-  This is a negative complement of `files`.  It indicates that the job
-  should run unless *all* of the files changed match this list.  In
-  other words, if the regular expression `docs/.*` is supplied, then
-  this job will not run if the only files changed are in the docs
-  directory.  A regular expression or list of regular expressions.
-  Default: none.
+   .. attr:: tags
 
-**auth**
-  Authentication information to be made available to the job.  This is
-  a dictionary with two potential keys:
+      Metadata about this job.  Tags are units of information attached
+      to the job; they do not affect Zuul's behavior, but they can be
+      used within the job to characterize the job.  For example, a job
+      which tests a certain subsystem could be tagged with the name of
+      that subsystem, and if the job's results are reported into a
+      database, then the results of all jobs affecting that subsystem
+      could be queried.  This attribute is specified as a list of
+      strings, and when inheriting jobs or applying variants, tags
+      accumulate in a set, so the result is always a set of all the
+      tags from all the jobs and variants used in constructing the
+      frozen job, with no duplication.
 
-  **inherit**
-  A boolean indicating that the authentication information referenced
-  by this job should be able to be inherited by child jobs.  Normally
-  when a job inherits from another job, the auth section is not
-  included.  This permits jobs to inherit the same basic structure and
-  playbook, but ensures that secret information is unable to be
-  exposed by a child job which may alter the job's behavior.  If it is
-  safe for the contents of the authentication section to be used by
-  child jobs, set this to ``true``.  Default: ``false``.
+   .. attr:: branches
 
-  **secrets**
-  A list of secrets which may be used by the job.  A :ref:`secret` is
-  a named collection of private information defined separately in the
-  configuration.  The secrets that appear here must be defined in the
-  same project as this job definition.
+      A regular expression (or list of regular expressions) which
+      describe on what branches a job should run (or in the case of
+      variants: to alter the behavior of a job for a certain branch).
 
-  In the future, other types of authentication information may be
-  added.
+      If there is no job definition for a given job which matches the
+      branch of an item, then that job is not run for the item.
+      Otherwise, all of the job variants which match that branch (and
+      any other selection criteria) are used when freezing the job.
 
-**nodes**
-  A list of nodes which should be supplied to the job.  This parameter
-  may be supplied either as a string, in which case it references a
-  :ref:`nodeset` definition which appears elsewhere in the
-  configuration, or a list, in which case it is interpreted in the
-  same way as a Nodeset definition (in essence, it is an anonymous
-  Node definition unique to this job).  See the :ref:`nodeset`
-  reference for the syntax to use in that case.
+      This example illustrates a job called *run-tests* which uses a
+      nodeset based on the current release of an operating system to
+      perform its tests, except when testing changes to the stable/2.0
+      branch, in which case it uses an older release:
 
-  If a job has an empty or no node definition, it will still run and
-  may be able to perform actions on the Zuul executor.
+      .. code-block:: yaml
 
-**override-branch**
-  When Zuul runs jobs for a proposed change, it normally checks out
-  the branch associated with that change on every project present in
-  the job.  If jobs are running on a ref (such as a branch tip or
-  tag), then that ref is normally checked out.  This attribute is used
-  to override that behavior and indicate that this job should,
-  regardless of the branch for the queue item, use the indicated
-  branch instead.  This can be used, for example, to run a previous
-  version of the software (from a stable maintenance branch) under
-  test even if the change being tested applies to a different branch
-  (this is only likely to be useful if there is some cross-branch
-  interaction with some component of the system being tested).  See
-  also the project-specific **override-branch** attribute under
-  **required-projects** to apply this behavior to a subset of a job's
-  projects.
+         - job:
+             name: run-tests
+             nodes: current-release
 
-**timeout**
-  The time in minutes that the job should be allowed to run before it
-  is automatically aborted and failure is reported.  If no timeout is
-  supplied, the job may run indefinitely.  Supplying a timeout is
-  highly recommended.
+         - job:
+             name: run-tests
+             branch: stable/2.0
+             nodes: old-release
 
-**attempts**
-  When Zuul encounters an error running a job's pre-run playbook, Zuul
-  will stop and restart the job.  Errors during the main or
-  post-run -playbook phase of a job are not affected by this parameter
-  (they are reported immediately).  This parameter controls the number
-  of attempts to make before an error is reported.  Default: 3.
+      In some cases, Zuul uses an implied value for the branch
+      specifier if none is supplied:
 
-**pre-run**
-  The name of a playbook or list of playbooks without file extension
-  to run before the main body of a job.  The full path to the playbook
-  in the repo where the job is defined is expected.
+      * For a job definition in a :term:`config-project`, no implied
+        branch specifier is used.  If no branch specifier appears, the
+        job applies to all branches.
 
-  When a job inherits from a parent, the child's pre-run playbooks are
-  run after the parent's.  See :ref:`job` for more information.
+      * In the case of an :term:`untrusted-project`, no implied branch
+        specifier is applied to the reference definition of a job.
+        That is to say, that if the first appearance of the job
+        definition appears without a branch specifier, then it will
+        apply to all branches.  Note that when collecting its
+        configuration, Zuul reads the ``master`` branch of a given
+        project first, then other branches in alphabetical order.
 
-**post-run**
-  The name of a playbook or list of playbooks without file extension
-  to run after the main body of a job.  The full path to the playbook
-  in the repo where the job is defined is expected.
+      * Any further job variants other than the reference definition
+        in an untrusted-project will, if they do not have a branch
+        specifier, will have an implied branch specifier for the
+        current branch applied.
 
-  When a job inherits from a parent, the child's post-run playbooks
-  are run before the parent's.  See :ref:`job` for more information.
+      This allows for the very simple and expected workflow where if a
+      project defines a job on the ``master`` branch with no branch
+      specifier, and then creates a new branch based on ``master``,
+      any changes to that job definition within the new branch only
+      affect that branch.
 
-**run**
-  The name of the main playbook for this job.  This parameter is
-  not normally necessary, as it defaults to a playbook with the
-  same name as the job inside of the `playbooks/` directory (e.g.,
-  the `foo` job would default to `playbooks/foo`.  However, if a
-  playbook with a different name is needed, it can be specified
-  here.  The file extension is not required, but the full path
-  within the repo is.  When a child inherits from a parent, a
-  playbook with the name of the child job is implicitly searched
-  first, before falling back on the playbook used by the parent
-  job (unless the child job specifies a ``run`` attribute, in which
-  case that value is used).  Example::
+   .. attr:: files
 
-     run: playbooks/<name of the job>
+      This attribute indicates that the job should only run on changes
+      where the specified files are modified.  This is a regular
+      expression or list of regular expressions.
 
-**roles**
-  A list of Ansible roles to prepare for the job.  Because a job runs
-  an Ansible playbook, any roles which are used by the job must be
-  prepared and installed by Zuul before the job begins.  This value is
-  a list of dictionaries, each of which indicates one of two types of
-  roles: a Galaxy role, which is simply a role that is installed from
-  Ansible Galaxy, or a Zuul role, which is a role provided by a
-  project managed by Zuul.  Zuul roles are able to benefit from
-  speculative merging and cross-project dependencies when used by
-  playbooks in untrusted projects.  Roles are added to the Ansible
-  role path in the order they appear on the job -- roles earlier in
-  the list will take precedence over those which follow.
+   .. attr:: irrelevant-files
 
-  In the case of job inheritance or variance, the roles used for each
-  of the playbooks run by the job will be only those which were
-  defined along with that playbook.  If a child job inherits from a
-  parent which defines a pre and post playbook, then the pre and post
-  playbooks it inherits from the parent job will run only with the
-  roles that were defined on the parent.  If the child adds its own
-  pre and post playbooks, then any roles added by the child will be
-  available to the child's playbooks.  This is so that a job which
-  inherits from a parent does not inadvertantly alter the behavior of
-  the parent's playbooks by the addition of conflicting roles.  Roles
-  added by a child will appear before those it inherits from its
-  parent.
+      This is a negative complement of **files**.  It indicates that
+      the job should run unless *all* of the files changed match this
+      list.  In other words, if the regular expression ``docs/.*`` is
+      supplied, then this job will not run if the only files changed
+      are in the docs directory.  A regular expression or list of
+      regular expressions.
 
-  A project which supplies a role may be structured in one of two
-  configurations: a bare role (in which the role exists at the root of
-  the project), or a contained role (in which the role exists within
-  the `roles/` directory of the project, perhaps along with other
-  roles).  In the case of a contained role, the `roles/` directory of
-  the project is added to the role search path.  In the case of a bare
-  role, the project itself is added to the role search path.  In case
-  the name of the project is not the name under which the role should
-  be installed (and therefore referenced from Ansible), the `name`
-  attribute may be used to specify an alternate.
+   .. attr:: auth
 
-  A job automatically has the project in which it is defined added to
-  the roles path if that project appears to contain a role or `roles/`
-  directory.  By default, the project is added to the path under its
-  own name, however, that may be changed by explicitly listing the
-  project in the roles list in the usual way.
+      Authentication information to be made available to the job.
+      This is a dictionary with two potential keys:
 
-  .. note:: galaxy roles are not yet implemented
+      .. attr:: inherit
+         :default: false
 
-  **galaxy**
-    The name of the role in Ansible Galaxy.  If this attribute is
-    supplied, Zuul will search Ansible Galaxy for a role by this name
-    and install it.  Mutually exclusive with ``zuul``; either
-    ``galaxy`` or ``zuul`` must be supplied.
+         A boolean indicating that the authentication information
+         referenced by this job should be able to be inherited by
+         child jobs.  Normally when a job inherits from another job,
+         the auth section is not included.  This permits jobs to
+         inherit the same basic structure and playbook, but ensures
+         that secret information is unable to be exposed by a child
+         job which may alter the job's behavior.  If it is safe for
+         the contents of the authentication section to be used by
+         child jobs, set this to ``true``.
 
-  **zuul**
-    The name of a Zuul project which supplies the role.  Mutually
-    exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must be
-    supplied.
+      .. attr:: secrets
 
-  **name**
-    The installation name of the role.  In the case of a bare role,
-    the role will be made available under this name.  Ignored in the
-    case of a contained role.
+         A list of secrets which may be used by the job.  A
+         :ref:`secret` is a named collection of private information
+         defined separately in the configuration.  The secrets that
+         appear here must be defined in the same project as this job
+         definition.
 
-**required-projects**
-  A list of other projects which are used by this job.  Any Zuul
-  projects specified here will also be checked out by Zuul into the
-  working directory for the job.  Speculative merging and cross-repo
-  dependencies will be honored.
+         In the future, other types of authentication information may
+         be added.
 
-  The format for this attribute is either a list of strings or
-  dictionaries.  Strings are interpreted as project names,
-  dictionaries may have the following attributes:
+   .. attr:: nodes
 
-  **name**
-    The name of the required project.
+      A list of nodes which should be supplied to the job.  This
+      parameter may be supplied either as a string, in which case it
+      references a :ref:`nodeset` definition which appears elsewhere
+      in the configuration, or a list, in which case it is interpreted
+      in the same way as a Nodeset definition (in essence, it is an
+      anonymous Node definition unique to this job).  See the
+      :ref:`nodeset` reference for the syntax to use in that case.
 
-  **override-branch**
-    When Zuul runs jobs for a proposed change, it normally checks out
-    the branch associated with that change on every project present in
-    the job.  If jobs are running on a ref (such as a branch tip or
-    tag), then that ref is normally checked out.  This attribute is
-    used to override that behavior and indicate that this job should,
-    regardless of the branch for the queue item, use the indicated
-    branch instead, for only this project.  See also the
-    **override-branch** attribute of jobs to apply the same behavior
-    to all projects in a job.
+      If a job has an empty or no node definition, it will still run
+      and may be able to perform actions on the Zuul executor.
 
-**vars**
+   .. attr:: override-branch
 
-A dictionary of variables to supply to Ansible.  When inheriting from
-a job (or creating a variant of a job) vars are merged with previous
-definitions.  This means a variable definition with the same name will
-override a previously defined variable, but new variable names will be
-added to the set of defined variables.
+      When Zuul runs jobs for a proposed change, it normally checks
+      out the branch associated with that change on every project
+      present in the job.  If jobs are running on a ref (such as a
+      branch tip or tag), then that ref is normally checked out.  This
+      attribute is used to override that behavior and indicate that
+      this job should, regardless of the branch for the queue item,
+      use the indicated branch instead.  This can be used, for
+      example, to run a previous version of the software (from a
+      stable maintenance branch) under test even if the change being
+      tested applies to a different branch (this is only likely to be
+      useful if there is some cross-branch interaction with some
+      component of the system being tested).  See also the
+      project-specific :attr:`job.required-projects.override-branch`
+      attribute to apply this behavior to a subset of a job's
+      projects.
 
-**dependencies**
-  A list of other jobs upon which this job depends.  Zuul will not
-  start executing this job until all of its dependencies have
-  completed successfully, and if one or more of them fail, this job
-  will not be run.
+   .. attr:: timeout
 
-**allowed-projects**
-  A list of Zuul projects which may use this job.  By default, a job
-  may be used by any other project known to Zuul, however, some jobs
-  use resources or perform actions which are not appropriate for other
-  projects.  In these cases, a list of projects which are allowed to
-  use this job may be supplied.  If this list is not empty, then it
-  must be an exhaustive list of all projects permitted to use the job.
-  The current project (where the job is defined) is not automatically
-  included, so if it should be able to run this job, then it must be
-  explicitly listed.  Default: the empty list (all projects may use
-  the job).
+      The time in minutes that the job should be allowed to run before
+      it is automatically aborted and failure is reported.  If no
+      timeout is supplied, the job may run indefinitely.  Supplying a
+      timeout is highly recommended.
+
+   .. attr:: attempts
+      :default: 3
+
+      When Zuul encounters an error running a job's pre-run playbook,
+      Zuul will stop and restart the job.  Errors during the main or
+      post-run -playbook phase of a job are not affected by this
+      parameter (they are reported immediately).  This parameter
+      controls the number of attempts to make before an error is
+      reported.
+
+   .. attr:: pre-run
+
+      The name of a playbook or list of playbooks without file
+      extension to run before the main body of a job.  The full path
+      to the playbook in the repo where the job is defined is
+      expected.
+
+      When a job inherits from a parent, the child's pre-run playbooks
+      are run after the parent's.  See :ref:`job` for more
+      information.
+
+   .. attr:: post-run
+
+      The name of a playbook or list of playbooks without file
+      extension to run after the main body of a job.  The full path to
+      the playbook in the repo where the job is defined is expected.
+
+      When a job inherits from a parent, the child's post-run
+      playbooks are run before the parent's.  See :ref:`job` for more
+      information.
+
+   .. attr:: run
+
+      The name of the main playbook for this job.  This parameter is
+      not normally necessary, as it defaults to a playbook with the
+      same name as the job inside of the ``playbooks/`` directory
+      (e.g., the ``foo`` job would default to ``playbooks/foo``.
+      However, if a playbook with a different name is needed, it can
+      be specified here.  The file extension is not required, but the
+      full path within the repo is.  When a child inherits from a
+      parent, a playbook with the name of the child job is implicitly
+      searched first, before falling back on the playbook used by the
+      parent job (unless the child job specifies a ``run`` attribute,
+      in which case that value is used).  Example:
+
+      .. code-block:: yaml
+
+         run: playbooks/<name of the job>
+
+   .. attr:: roles
+
+      A list of Ansible roles to prepare for the job.  Because a job
+      runs an Ansible playbook, any roles which are used by the job
+      must be prepared and installed by Zuul before the job begins.
+      This value is a list of dictionaries, each of which indicates
+      one of two types of roles: a Galaxy role, which is simply a role
+      that is installed from Ansible Galaxy, or a Zuul role, which is
+      a role provided by a project managed by Zuul.  Zuul roles are
+      able to benefit from speculative merging and cross-project
+      dependencies when used by playbooks in untrusted projects.
+      Roles are added to the Ansible role path in the order they
+      appear on the job -- roles earlier in the list will take
+      precedence over those which follow.
+
+      In the case of job inheritance or variance, the roles used for
+      each of the playbooks run by the job will be only those which
+      were defined along with that playbook.  If a child job inherits
+      from a parent which defines a pre and post playbook, then the
+      pre and post playbooks it inherits from the parent job will run
+      only with the roles that were defined on the parent.  If the
+      child adds its own pre and post playbooks, then any roles added
+      by the child will be available to the child's playbooks.  This
+      is so that a job which inherits from a parent does not
+      inadvertently alter the behavior of the parent's playbooks by
+      the addition of conflicting roles.  Roles added by a child will
+      appear before those it inherits from its parent.
+
+      A project which supplies a role may be structured in one of two
+      configurations: a bare role (in which the role exists at the
+      root of the project), or a contained role (in which the role
+      exists within the ``roles/`` directory of the project, perhaps
+      along with other roles).  In the case of a contained role, the
+      ``roles/`` directory of the project is added to the role search
+      path.  In the case of a bare role, the project itself is added
+      to the role search path.  In case the name of the project is not
+      the name under which the role should be installed (and therefore
+      referenced from Ansible), the ``name`` attribute may be used to
+      specify an alternate.
+
+      A job automatically has the project in which it is defined added
+      to the roles path if that project appears to contain a role or
+      ``roles/`` directory.  By default, the project is added to the
+      path under its own name, however, that may be changed by
+      explicitly listing the project in the roles list in the usual
+      way.
+
+      .. note:: Galaxy roles are not yet implemented.
+
+      .. attr:: galaxy
+
+         The name of the role in Ansible Galaxy.  If this attribute is
+         supplied, Zuul will search Ansible Galaxy for a role by this
+         name and install it.  Mutually exclusive with ``zuul``;
+         either ``galaxy`` or ``zuul`` must be supplied.
+
+      .. attr:: zuul
+
+         The name of a Zuul project which supplies the role.  Mutually
+         exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must
+         be supplied.
+
+      .. attr:: name
+
+         The installation name of the role.  In the case of a bare
+         role, the role will be made available under this name.
+         Ignored in the case of a contained role.
+
+   .. attr:: required-projects
+
+      A list of other projects which are used by this job.  Any Zuul
+      projects specified here will also be checked out by Zuul into
+      the working directory for the job.  Speculative merging and
+      cross-repo dependencies will be honored.
+
+      The format for this attribute is either a list of strings or
+      dictionaries.  Strings are interpreted as project names,
+      dictionaries, if used, may have the following attributes:
+
+      .. attr:: name
+         :required:
+
+         The name of the required project.
+
+      .. attr:: override-branch
+
+         When Zuul runs jobs for a proposed change, it normally checks
+         out the branch associated with that change on every project
+         present in the job.  If jobs are running on a ref (such as a
+         branch tip or tag), then that ref is normally checked out.
+         This attribute is used to override that behavior and indicate
+         that this job should, regardless of the branch for the queue
+         item, use the indicated branch instead, for only this
+         project.  See also the :attr:`job.override-branch` attribute
+         to apply the same behavior to all projects in a job.
+
+   .. attr:: vars
+
+      A dictionary of variables to supply to Ansible.  When inheriting
+      from a job (or creating a variant of a job) vars are merged with
+      previous definitions.  This means a variable definition with the
+      same name will override a previously defined variable, but new
+      variable names will be added to the set of defined variables.
+
+   .. attr:: dependencies
+
+      A list of other jobs upon which this job depends.  Zuul will not
+      start executing this job until all of its dependencies have
+      completed successfully, and if one or more of them fail, this
+      job will not be run.
+
+   .. attr:: allowed-projects
+
+      A list of Zuul projects which may use this job.  By default, a
+      job may be used by any other project known to Zuul, however,
+      some jobs use resources or perform actions which are not
+      appropriate for other projects.  In these cases, a list of
+      projects which are allowed to use this job may be supplied.  If
+      this list is not empty, then it must be an exhaustive list of
+      all projects permitted to use the job.  The current project
+      (where the job is defined) is not automatically included, so if
+      it should be able to run this job, then it must be explicitly
+      listed.  By default, all projects may use the job.
 
 
 .. _project:
@@ -828,14 +901,14 @@
 ~~~~~~~
 
 A project corresponds to a source code repository with which Zuul is
-configured to interact.  The main responsibility of the `Project`
+configured to interact.  The main responsibility of the project
 configuration item is to specify which jobs should run in which
-pipelines for a given project.  Within each `Project` definition, a
-section for each `Pipeline` may appear.  This project-pipeline
-definition is what determines how a project participates in a
-pipeline.
+pipelines for a given project.  Within each project definition, a
+section for each :ref:`pipeline <pipeline>` may appear.  This
+project-pipeline definition is what determines how a project
+participates in a pipeline.
 
-Consider the following `Project` definition::
+Consider the following project definition::
 
   - project:
       name: yoyodyne
@@ -849,13 +922,13 @@
           - unit-tests
           - integration-tests
 
-The project has two project-pipeline stanzas, one for the `check`
-pipeline, and one for `gate`.  Each specifies which jobs shuld run
-when a change for that project enteres the respective pipeline -- when
-a change enters `check`, the `check-syntax` and `unit-test` jobs are
-run.
+The project has two project-pipeline stanzas, one for the ``check``
+pipeline, and one for ``gate``.  Each specifies which jobs should run
+when a change for that project enters the respective pipeline -- when
+a change enters ``check``, the ``check-syntax`` and ``unit-test`` jobs
+are run.
 
-Pipelines which use the dependent pipeline manager (e.g., the `gate`
+Pipelines which use the dependent pipeline manager (e.g., the ``gate``
 example shown earlier) maintain separate queues for groups of
 projects.  When Zuul serializes a set of changes which represent
 future potential project states, it must know about all of the
@@ -875,24 +948,80 @@
 for a dependent pipeline, set the ``queue`` parameter to the same
 value for those projects.
 
-The `gate` project-pipeline definition above specifies that this
-project participates in the `integrated` shared queue for that
+The ``gate`` project-pipeline definition above specifies that this
+project participates in the ``integrated`` shared queue for that
 pipeline.
 
-In addition to a project-pipeline definition for one or more
-`Pipelines`, the following attributes may appear in a Project:
+.. attr:: project
 
-**name** (required)
-  The name of the project.  If Zuul is configured with two or more
-  unique projects with the same name, the canonical hostname for the
-  project should be included (e.g., `git.example.com/foo`).
+   In addition to a project-pipeline definition for one or more
+   pipelines, the following attributes may appear in a project:
 
-**templates**
-  A list of :ref:`project-template` references; the project-pipeline
-  definitions of each Project Template will be applied to this
-  project.  If more than one template includes jobs for a given
-  pipeline, they will be combined, as will any jobs specified in
-  project-pipeline definitions on the project itself.
+   .. attr:: name
+      :required:
+
+      The name of the project.  If Zuul is configured with two or more
+      unique projects with the same name, the canonical hostname for
+      the project should be included (e.g., `git.example.com/foo`).
+
+   .. attr:: templates
+
+      A list of :ref:`project-template` references; the
+      project-pipeline definitions of each Project Template will be
+      applied to this project.  If more than one template includes
+      jobs for a given pipeline, they will be combined, as will any
+      jobs specified in project-pipeline definitions on the project
+      itself.
+
+   .. attr:: merge-mode
+      :default: merge-resolve
+
+      The merge mode which is used by Git for this project.  Be sure
+      this matches what the remote system which performs merges (i.e.,
+      Gerrit or GitHub).  Must be one of the following values:
+
+      .. value:: merge
+
+         Uses the default git merge strategy (recursive).
+
+      .. value:: merge-resolve
+
+         Uses the resolve git merge strategy.  This is a very
+         conservative merge strategy which most closely matches the
+         behavior of Gerrit.
+
+      .. value:: cherry-pick
+
+         Cherry-picks each change onto the branch rather than
+         performing any merges.
+
+   .. attr:: <pipeline>
+
+      Each pipeline that the project participates in should have an
+      entry in the project.  The value for this key should be a
+      dictionary with the following format:
+
+      .. attr:: jobs
+         :required:
+
+         A list of jobs that should be run when items for this project
+         are enqueued into the pipeline.  Each item of this list may
+         be a string, in which case it is treated as a job name, or it
+         may be a dictionary, in which case it is treated as a job
+         variant local to this project and pipeline.  In that case,
+         the format of the dictionary is the same as the top level
+         :attr:`job` definition.  Any attributes set on the job here
+         will override previous versions of the job.
+
+      .. attr:: queue
+
+         If this pipeline is a :value:`dependent
+         <pipeline.manager.dependent>` pipeline, this specifies the
+         name of the shared queue this project is in.  Any projects
+         which interact with each other in tests should be part of the
+         same shared queue in order to ensure that they don't merge
+         changes which break the others.  This is a free-form string;
+         just set the same value for each group of projects.
 
 .. _project-template:
 
@@ -903,9 +1032,10 @@
 which can be re-used by multiple projects.
 
 A Project Template uses the same syntax as a :ref:`project`
-definition, however, in the case of a template, the ``name`` attribute
-does not refer to the name of a project, but rather names the template
-so that it can be referenced in a `Project` definition.
+definition, however, in the case of a template, the
+:attr:`project.name` attribute does not refer to the name of a
+project, but rather names the template so that it can be referenced in
+a `Project` definition.
 
 .. _secret:
 
@@ -936,16 +1066,23 @@
 secrets at all in order to protect against someone proposing a change
 which exposes a secret.
 
-The following attributes are required:
+.. attr:: secret
 
-**name** (required)
-  The name of the secret, used in a :ref:`Job` definition to request
-  the secret.
+   The following attributes must appear on a secret:
 
-**data** (required)
-  A dictionary which will be added to the Ansible variables available
-  to the job.  The values can either be plain text strings, or
-  encrypted values.  See :ref:`encryption` for more information.
+   .. attr:: name
+      :required:
+
+      The name of the secret, used in a :ref:`Job` definition to
+      request the secret.
+
+   .. attr:: data
+      :required:
+
+      A dictionary which will be added to the Ansible variables
+      available to the job.  The values can either be plain text
+      strings, or encrypted values.  See :ref:`encryption` for more
+      information.
 
 .. _nodeset:
 
@@ -957,21 +1094,68 @@
 groups of node types once and referring to them by name, job
 configuration may be simplified.
 
-A Nodeset requires two attributes:
+.. code-block:: yaml
 
-**name** (required)
-  The name of the Nodeset, to be referenced by a :ref:`job`.
+   - nodeset:
+       name: nodeset1
+       nodes:
+         - name: controller
+           label: controller-label
+         - name: compute1
+           label: compute-label
+         - name: compute2
+           label: compute-label
+       groups:
+         - name: ceph-osd
+           nodes:
+             - controller
+         - name: ceph-monitor
+           nodes:
+             - controller
+             - compute1
+             - compute2
 
-**nodes** (required)
-  A list of node definitions, each of which has the following format:
+.. attr:: nodeset
 
-  **name** (required)
-    The name of the node.  This will appear in the Ansible inventory
-    for the job.
+   A Nodeset requires two attributes:
 
-  **label** (required)
-    The Nodepool label for the node.  Zuul will request a node with
-    this label.
+   .. attr:: name
+      :required:
+
+      The name of the Nodeset, to be referenced by a :ref:`job`.
+
+   .. attr:: nodes
+      :required:
+
+      A list of node definitions, each of which has the following format:
+
+      .. attr:: name
+         :required:
+
+         The name of the node.  This will appear in the Ansible inventory
+         for the job.
+
+      .. attr:: label
+         :required:
+
+         The Nodepool label for the node.  Zuul will request a node with
+         this label.
+
+   .. attr:: groups
+
+      Additional groups can be defined which are accessible from the ansible
+      playbooks.
+
+      .. attr:: name
+         :required:
+
+         The name of the group to be referenced by an ansible playbook.
+
+      .. attr:: nodes
+         :required:
+
+         The nodes that shall be part of the group. This is specified as a list
+         of strings.
 
 .. _semaphore:
 
@@ -986,20 +1170,27 @@
 
 Semaphores are never subject to dynamic reconfiguration.  If the value
 of a semaphore is changed, it will take effect only when the change
-where it is updated is merged.  An example follows::
+where it is updated is merged.  An example follows:
 
-  - semaphore:
-      name: semaphore-foo
-      max: 5
-  - semaphore:
-      name: semaphore-bar
-      max: 3
+.. code-block:: yaml
 
-The following attributes are available:
+   - semaphore:
+       name: semaphore-foo
+       max: 5
+   - semaphore:
+       name: semaphore-bar
+       max: 3
 
-**name** (required)
-  The name of the semaphore, referenced by jobs.
+.. attr:: semaphore
 
-**max**
-  The maximum number of running jobs which can use this semaphore.
-  Defaults to 1.
+   The following attributes are available:
+
+   .. attr:: name
+      :required:
+
+      The name of the semaphore, referenced by jobs.
+
+   .. attr:: max
+      :default: 1
+
+      The maximum number of running jobs which can use this semaphore.
diff --git a/doc/source/user/gating.rst b/doc/source/user/gating.rst
index 3398892..795df72 100644
--- a/doc/source/user/gating.rst
+++ b/doc/source/user/gating.rst
@@ -41,7 +41,7 @@
 developers to create changes at a rate faster than they can be tested
 and merged.
 
-Zuul's :ref:`dependent pipeline manager<dependent_pipeline_manager>`
+Zuul's :value:`dependent pipeline manager<pipeline.manager.dependent>`
 allows for parallel execution of test jobs for gating while ensuring
 changes are tested correctly, exactly as if they had been tested one
 at a time.  It does this by performing speculative execution of test
@@ -227,7 +227,8 @@
 
 A given dependent pipeline may have as many shared change queues as
 necessary, so groups of related projects may share a change queue
-without interfering with unrelated projects.  Independent pipelines do
+without interfering with unrelated projects.
+:value:`Independent pipelines <pipeline.manager.independent>` do
 not use shared change queues, however, they may still be used to test
 changes across projects using cross-project dependencies.
 
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index 3eca04b..8c7308b 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -4,7 +4,7 @@
 This guide is for all users of Zuul.  If you work on a project where
 Zuul is used to drive automation (whether that's testing proposed
 changes, building artifacts, or deploying builds), this guide will
-help you understand the concepts that underly Zuul, and how to
+help you understand the concepts that underlie Zuul, and how to
 configure it to meet your needs.
 
 
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 80ce3f9..577d147 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -91,23 +91,25 @@
 Job Variables
 ~~~~~~~~~~~~~
 
-Any variables specified in the job definition are available as Ansible
-host variables.  They are added to the `vars` section of the inventory
-file under the `all` hosts group, so they are available to all hosts.
-Simply refer to them by the name specified in the job's `vars`
-section.
+Any variables specified in the job definition (using the
+:attr:`job.vars` attribute) are available as Ansible host variables.
+They are added to the ``vars`` section of the inventory file under the
+``all`` hosts group, so they are available to all hosts.  Simply refer
+to them by the name specified in the job's ``vars`` section.
 
 Secrets
 ~~~~~~~
 
-Secrets also appear as variables available to Ansible.  Unlike job
-variables, these are not added to the inventory file (so that the
-inventory file may be kept for debugging purposes without revealing
-secrets).  But they are still available to Ansible as normal
+:ref:`Secrets <secret>` also appear as variables available to Ansible.
+Unlike job variables, these are not added to the inventory file (so
+that the inventory file may be kept for debugging purposes without
+revealing secrets).  But they are still available to Ansible as normal
 variables.  Because secrets are groups of variables, they will appear
 as a dictionary structure in templates, with the dictionary itself
 being the name of the secret, and its members the individual items in
-the secret.  For example, a secret defined as::
+the secret.  For example, a secret defined as:
+
+.. code-block:: yaml
 
   - secret:
       name: credentials
@@ -119,13 +121,12 @@
 
  {{ credentials.username }} {{ credentials.password }}
 
-.. TODO: xref job vars
 
 Zuul Variables
 ~~~~~~~~~~~~~~
 
 Zuul supplies not only the variables specified by the job definition
-to Ansible, but also some variables from the Zuul itself.
+to Ansible, but also some variables from Zuul itself.
 
 When a pipeline is triggered by an action, it enqueues items which may
 vary based on the pipeline's configuration.  For example, when a new
@@ -137,85 +138,123 @@
 attributes in common.  But other attributes may vary based on the type
 of item.
 
-All items provide the following information as Ansible variables:
+.. var:: zuul
 
-**zuul.build**
-  The UUID of the build.  A build is a single execution of a job.
-  When an item is enqueued into a pipeline, this usually results in
-  one build of each job configured for that item's project.  However,
-  items may be re-enqueued in which case another build may run.  In
-  dependent pipelines, the same job may run multiple times for the
-  same item as circumstances change ahead in the queue.  Each time a
-  job is run, for whatever reason, it is acompanied with a new
-  unique id.
+   All items provide the following information as Ansible variables
+   under the ``zuul`` key:
 
-**zuul.buildset**
-  The build set UUID.  When Zuul runs jobs for an item, the collection
-  of those jobs is known as a buildset.  If the configuration of items
-  ahead in a dependent pipeline changes, Zuul creates a new buildset
-  and restarts all of the jobs.
+   .. var:: build
 
-**zuul.ref**
-  The git ref of the item.  This will be the full path (e.g.,
-  'refs/heads/master' or 'refs/changes/...').
+      The UUID of the build.  A build is a single execution of a job.
+      When an item is enqueued into a pipeline, this usually results
+      in one build of each job configured for that item's project.
+      However, items may be re-enqueued in which case another build
+      may run.  In dependent pipelines, the same job may run multiple
+      times for the same item as circumstances change ahead in the
+      queue.  Each time a job is run, for whatever reason, it is
+      acompanied with a new unique id.
 
-**zuul.pipeline**
-  The name of the pipeline in which the job is being run.
+   .. var:: buildset
 
-**zuul.job**
-  The name of the job being run.
+      The build set UUID.  When Zuul runs jobs for an item, the
+      collection of those jobs is known as a buildset.  If the
+      configuration of items ahead in a dependent pipeline changes,
+      Zuul creates a new buildset and restarts all of the jobs.
 
-**zuul.voting**
-  A boolean indicating whether the job is voting.
+   .. var:: ref
 
-**zuul.project**
-  The item's project.  This is a data structure with the following
-  fields:
+      The git ref of the item.  This will be the full path (e.g.,
+      `refs/heads/master` or `refs/changes/...`).
 
-**zuul.project.name**
-  The name of the project, excluding hostname.  E.g., `org/project`.
+   .. var:: pipeline
 
-**zuul.project.canonical_hostname**
-  The canonical hostname where the project lives.  E.g.,
-  `git.example.com`.
+      The name of the pipeline in which the job is being run.
 
-**zuul.project.canonical_name**
-  The full canonical name of the project including hostname.  E.g.,
-  `git.example.com/org/project`.
+   .. var:: job
 
-**zuul.tenant**
-  The name of the current Zuul tenant.
+      The name of the job being run.
 
-**zuul.jobtags**
-  A list of tags associated with the job.  Not to be confused with git
-  tags, these are simply free-form text fields that can be used by the
-  job for reporting or classification purposes.
+   .. var:: voting
 
-**zuul.items**
+      A boolean indicating whether the job is voting.
 
-  A list of dictionaries, each representing an item being tested with
-  this change with the format:
+   .. var:: project
 
-  **project.name**
-    The name of the project, excluding hostname.  E.g., `org/project`.
-  
-  **project.canonical_hostname**
-    The canonical hostname where the project lives.  E.g.,
-    `git.example.com`.
-  
-  **project.canonical_name**
-    The full canonical name of the project including hostname.  E.g.,
-    `git.example.com/org/project`.
-  
-  **branch**
-    The target branch of the change (without the `refs/heads/` prefix).
-  
-  **change**
-    The identifier for the change.
-  
-  **patchset**
-    The patchset identifier for the change.  If a change is revised,
-    this will have a different value.
+      The item's project.  This is a data structure with the following
+      fields:
+
+      .. var:: name
+
+         The name of the project, excluding hostname.  E.g., `org/project`.
+
+      .. var:: short_name
+
+         The name of the project, excluding directories or
+         organizations.  E.g., `project`.
+
+      .. var:: canonical_hostname
+
+         The canonical hostname where the project lives.  E.g.,
+         `git.example.com`.
+
+      .. var:: canonical_name
+
+         The full canonical name of the project including hostname.
+         E.g., `git.example.com/org/project`.
+
+   .. var:: tenant
+
+      The name of the current Zuul tenant.
+
+   .. var:: jobtags
+
+      A list of tags associated with the job.  Not to be confused with
+      git tags, these are simply free-form text fields that can be
+      used by the job for reporting or classification purposes.
+
+   .. var:: items
+      :type: list
+
+      A list of dictionaries, each representing an item being tested
+      with this change with the format:
+
+      .. var:: project
+
+         The item's project.  This is a data structure with the
+         following fields:
+
+         .. var:: name
+
+            The name of the project, excluding hostname.  E.g.,
+            `org/project`.
+
+         .. var:: short_name
+
+            The name of the project, excluding directories or
+            organizations.  E.g., `project`.
+
+         .. var:: canonical_hostname
+
+            The canonical hostname where the project lives.  E.g.,
+            `git.example.com`.
+
+         .. var:: canonical_name
+
+            The full canonical name of the project including hostname.
+            E.g., `git.example.com/org/project`.
+
+      .. var:: branch
+
+         The target branch of the change (without the `refs/heads/` prefix).
+
+      .. var:: change
+
+         The identifier for the change.
+
+      .. var:: patchset
+
+         The patchset identifier for the change.  If a change is
+         revised, this will have a different value.
 
 Change Items
 ++++++++++++
@@ -225,15 +264,21 @@
 change or a GitHub pull request).  The following additional variables
 are available:
 
-**zuul.branch**
-  The target branch of the change (without the `refs/heads/` prefix).
+.. var:: zuul
+   :hidden:
 
-**zuul.change**
-  The identifier for the change.
+   .. var:: branch
 
-**zuul.patchset**
-  The patchset identifier for the change.  If a change is revised,
-  this will have a different value.
+      The target branch of the change (without the `refs/heads/` prefix).
+
+   .. var:: change
+
+      The identifier for the change.
+
+   .. var:: patchset
+
+      The patchset identifier for the change.  If a change is revised,
+      this will have a different value.
 
 Branch Items
 ++++++++++++
@@ -244,18 +289,25 @@
 of verifying the current condition of the branch.  The following
 additional variables are available:
 
-**zuul.branch**
-  The name of the item's branch (without the `refs/heads/` prefix).
+.. var:: zuul
+   :hidden:
 
-**zuul.oldrev**
-  If the item was enqueued as the result of a change merging or being
-  pushed to the branch, the git sha of the old revision will be
-  included here.  Otherwise, this variable will be undefined.
+   .. var:: branch
 
-**zuul.newrev**
-  If the item was enqueued as the result of a change merging or being
-  pushed to the branch, the git sha of the new revision will be
-  included here.  Otherwise, this variable will be undefined.
+      The name of the item's branch (without the `refs/heads/`
+      prefix).
+
+   .. var:: oldrev
+
+      If the item was enqueued as the result of a change merging or
+      being pushed to the branch, the git sha of the old revision will
+      be included here.  Otherwise, this variable will be undefined.
+
+   .. var:: newrev
+
+      If the item was enqueued as the result of a change merging or
+      being pushed to the branch, the git sha of the new revision will
+      be included here.  Otherwise, this variable will be undefined.
 
 Tag Items
 +++++++++
@@ -264,20 +316,24 @@
 tag was created or deleted.  The following additional variables are
 available:
 
-**zuul.tag**
-  The name of the item's tag (without the `refs/tags/` prefix).
+.. var:: zuul
+   :hidden:
 
-**zuul.oldrev**
-  If the item was enqueued as the result of a tag being deleted, the
-  previous git sha of the tag will be included here.  If the tag was
-  created, this will be set to the value
-  0000000000000000000000000000000000000000.
+   .. var:: tag
 
-**zuul.newrev**
-  If the item was enqueued as the result of a tag being created, the
-  new git sha of the tag will be included here.  If the tag was
-  deleted, this will be set to the value
-  0000000000000000000000000000000000000000.
+      The name of the item's tag (without the `refs/tags/` prefix).
+
+   .. var:: oldrev
+
+      If the item was enqueued as the result of a tag being deleted,
+      the previous git sha of the tag will be included here.  If the
+      tag was created, this variable will be undefined.
+
+   .. var:: newrev
+
+      If the item was enqueued as the result of a tag being created,
+      the new git sha of the tag will be included here.  If the tag
+      was deleted, this variable will be undefined.
 
 Ref Items
 +++++++++
@@ -287,17 +343,20 @@
 to identify the ref.  The following additional variables are
 available:
 
-**zuul.oldrev**
-  If the item was enqueued as the result of a ref being deleted, the
-  previous git sha of the ref will be included here.  If the ref was
-  created, this will be set to the value
-  0000000000000000000000000000000000000000.
+.. var:: zuul
+   :hidden:
 
-**zuul.newrev**
-  If the item was enqueued as the result of a ref being created, the
-  new git sha of the ref will be included here.  If the ref was
-  deleted, this will be set to the value
-  0000000000000000000000000000000000000000.
+   .. var:: oldrev
+
+      If the item was enqueued as the result of a ref being deleted,
+      the previous git sha of the ref will be included here.  If the
+      ref was created, this variable will be undefined.
+
+   .. var:: newrev
+
+      If the item was enqueued as the result of a ref being created,
+      the new git sha of the ref will be included here.  If the ref
+      was deleted, this variable will be undefined.
 
 Working Directory
 +++++++++++++++++
@@ -305,15 +364,29 @@
 Additionally, some information about the working directory and the
 executor running the job is available:
 
-**zuul.executor.hostname**
-  The hostname of the executor.
+.. var:: zuul
+   :hidden:
 
-**zuul.executor.src_root**
-  The path to the source directory.
+   .. var:: executor
 
-**zuul.executor.log_root**
-  The path to the logs directory.
+      A number of values related to the executor running the job are
+      available:
 
+      .. var:: hostname
+
+         The hostname of the executor.
+
+      .. var:: src_root
+
+         The path to the source directory.
+
+      .. var:: log_root
+
+         The path to the logs directory.
+
+      .. var:: work_root
+
+         The path to the working directory.
 
 .. _user_sitewide_variables:
 
@@ -349,7 +422,9 @@
 
 The job may return some values to Zuul to affect its behavior.  To
 return a value, use the *zuul_return* Ansible module in a job
-playbook.  For example::
+playbook.  For example:
+
+.. code-block:: yaml
 
   tasks:
     - zuul_return:
@@ -362,7 +437,9 @@
 
 Several uses of these values are planned, but the only currently
 implemented use is to set the log URL for a build.  To do so, set the
-**zuul.log_url** value.  For example::
+**zuul.log_url** value.  For example:
+
+.. code-block:: yaml
 
   tasks:
     - zuul_return:
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index e6375a5..6e79f9b 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -23,12 +23,11 @@
 git_dir=/var/lib/zuul/git
 ;git_user_email=zuul@example.com
 ;git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 default_username=zuul
-trusted_ro_dirs=/opt/zuul-scripts:/var/cache
-trusted_rw_dirs=/opt/zuul-logs
+trusted_ro_paths=/opt/zuul-scripts:/var/cache
+trusted_rw_paths=/opt/zuul-logs
 
 [web]
 listen_address=127.0.0.1
diff --git a/test-requirements.txt b/test-requirements.txt
index 914dcf0..dcc67e2 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,7 +11,6 @@
 testrepository>=0.0.17
 testtools>=0.9.32
 sphinxcontrib-programoutput
-oslosphinx
 mock
 PyMySQL
 mypy
diff --git a/tests/base.py b/tests/base.py
index 568e15f..7209c87 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -139,7 +139,8 @@
                   'Verified': ('Verified', -2, 2)}
 
     def __init__(self, gerrit, number, project, branch, subject,
-                 status='NEW', upstream_root=None, files={}):
+                 status='NEW', upstream_root=None, files={},
+                 parent=None):
         self.gerrit = gerrit
         self.source = gerrit
         self.reported = 0
@@ -174,16 +175,18 @@
             'url': 'https://hostname/%s' % number}
 
         self.upstream_root = upstream_root
-        self.addPatchset(files=files)
+        self.addPatchset(files=files, parent=parent)
         self.data['submitRecords'] = self.getSubmitRecords()
         self.open = status == 'NEW'
 
-    def addFakeChangeToRepo(self, msg, files, large):
+    def addFakeChangeToRepo(self, msg, files, large, parent):
         path = os.path.join(self.upstream_root, self.project)
         repo = git.Repo(path)
+        if parent is None:
+            parent = 'refs/tags/init'
         ref = GerritChangeReference.create(
             repo, '1/%s/%s' % (self.number, self.latest_patchset),
-            'refs/tags/init')
+            parent)
         repo.head.reference = ref
         zuul.merger.merger.reset_repo_to_head(repo)
         repo.git.clean('-x', '-f', '-d')
@@ -211,7 +214,7 @@
         repo.heads['master'].checkout()
         return r
 
-    def addPatchset(self, files=None, large=False):
+    def addPatchset(self, files=None, large=False, parent=None):
         self.latest_patchset += 1
         if not files:
             fn = '%s-%s' % (self.branch.replace('/', '_'), self.number)
@@ -219,7 +222,7 @@
                     (self.branch, self.number, self.latest_patchset))
             files = {fn: data}
         msg = self.subject + '-' + str(self.latest_patchset)
-        c = self.addFakeChangeToRepo(msg, files, large)
+        c = self.addFakeChangeToRepo(msg, files, large, parent)
         ps_files = [{'file': '/COMMIT_MSG',
                      'type': 'ADDED'},
                     {'file': 'README',
@@ -469,12 +472,12 @@
         self.upstream_root = upstream_root
 
     def addFakeChange(self, project, branch, subject, status='NEW',
-                      files=None):
+                      files=None, parent=None):
         """Add a change to the fake Gerrit."""
         self.change_number += 1
         c = FakeGerritChange(self, self.change_number, project, branch,
                              subject, upstream_root=self.upstream_root,
-                             status=status, files=files)
+                             status=status, files=files, parent=parent)
         self.changes[self.change_number] = c
         return c
 
@@ -551,6 +554,98 @@
     _points_to_commits_only = True
 
 
+class FakeGithub(object):
+
+    class FakeUser(object):
+        def __init__(self, login):
+            self.login = login
+            self.name = "Github User"
+            self.email = "github.user@example.com"
+
+    class FakeBranch(object):
+        def __init__(self, branch='master'):
+            self.name = branch
+
+    class FakeStatus(object):
+        def __init__(self, state, url, description, context, user):
+            self._state = state
+            self._url = url
+            self._description = description
+            self._context = context
+            self._user = user
+
+        def as_dict(self):
+            return {
+                'state': self._state,
+                'url': self._url,
+                'description': self._description,
+                'context': self._context,
+                'creator': {
+                    'login': self._user
+                }
+            }
+
+    class FakeCommit(object):
+        def __init__(self):
+            self._statuses = []
+
+        def set_status(self, state, url, description, context, user):
+            status = FakeGithub.FakeStatus(
+                state, url, description, context, user)
+            # always insert a status to the front of the list, to represent
+            # the last status provided for a commit.
+            self._statuses.insert(0, status)
+
+        def statuses(self):
+            return self._statuses
+
+    class FakeRepository(object):
+        def __init__(self):
+            self._branches = [FakeGithub.FakeBranch()]
+            self._commits = {}
+
+        def branches(self, protected=False):
+            if protected:
+                # simulate there is no protected branch
+                return []
+            return self._branches
+
+        def create_status(self, sha, state, url, description, context,
+                          user='zuul'):
+            # Since we're bypassing github API, which would require a user, we
+            # default the user as 'zuul' here.
+            commit = self._commits.get(sha, None)
+            if commit is None:
+                commit = FakeGithub.FakeCommit()
+                self._commits[sha] = commit
+            commit.set_status(state, url, description, context, user)
+
+        def commit(self, sha):
+            commit = self._commits.get(sha, None)
+            if commit is None:
+                commit = FakeGithub.FakeCommit()
+                self._commits[sha] = commit
+            return commit
+
+    def __init__(self):
+        self._repos = {}
+
+    def user(self, login):
+        return self.FakeUser(login)
+
+    def repository(self, owner, proj):
+        return self._repos.get((owner, proj), None)
+
+    def repo_from_project(self, project):
+        # This is a convenience method for the tests.
+        owner, proj = project.split('/')
+        return self.repository(owner, proj)
+
+    def addProject(self, project):
+        owner, proj = project.name.split('/')
+        self._repos[(owner, proj)] = self.FakeRepository()
+
+
 class FakeGithubPullRequest(object):
 
     def __init__(self, github, number, project, branch,
@@ -863,6 +958,13 @@
         }
         return (name, data)
 
+    def setMerged(self, commit_message):
+        self.is_merged = True
+        self.merge_message = commit_message
+
+        repo = self._getRepo()
+        repo.heads[self.branch].commit = repo.commit(self.head_sha)
+
 
 class FakeGithubConnection(githubconnection.GithubConnection):
     log = logging.getLogger("zuul.test.FakeGithubConnection")
@@ -879,6 +981,13 @@
         self.merge_failure = False
         self.merge_not_allowed_count = 0
         self.reports = []
+        self.github_client = FakeGithub()
+
+    def getGithubClient(self,
+                        project=None,
+                        user_id=None,
+                        use_app=True):
+        return self.github_client
 
     def openFakePullRequest(self, project, branch, subject, files=[],
                             body=None):
@@ -892,7 +1001,7 @@
     def getPushEvent(self, project, ref, old_rev=None, new_rev=None,
                      added_files=[], removed_files=[], modified_files=[]):
         if not old_rev:
-            old_rev = '00000000000000000000000000000000'
+            old_rev = '0' * 40
         if not new_rev:
             new_rev = random_sha1()
         name = 'push'
@@ -927,6 +1036,12 @@
             data=payload, headers=headers)
         return urllib.request.urlopen(req)
 
+    def addProject(self, project):
+        # use the original method here and additionally register it in the
+        # fake github
+        super(FakeGithubConnection, self).addProject(project)
+        self.getGithubClient(project).addProject(project)
+
     def getPull(self, project, number):
         pr = self.pull_requests[number - 1]
         data = {
@@ -965,14 +1080,6 @@
         pr = self.pull_requests[number - 1]
         return pr.reviews
 
-    def getUser(self, login):
-        data = {
-            'username': login,
-            'name': 'Github User',
-            'email': 'github.user@example.com'
-        }
-        return data
-
     def getRepoPermission(self, project, login):
         owner, proj = project.split('/')
         for pr in self.pull_requests:
@@ -989,12 +1096,6 @@
     def real_getGitUrl(self, project):
         return super(FakeGithubConnection, self).getGitUrl(project)
 
-    def getProjectBranches(self, project):
-        """Masks getProjectBranches since we don't have a real github"""
-
-        # just returns master for now
-        return ['master']
-
     def commentPull(self, project, pr_number, message):
         # record that this got reported
         self.reports.append((project, pr_number, 'comment'))
@@ -1011,30 +1112,15 @@
             self.merge_not_allowed_count -= 1
             raise MergeFailure('Merge was not successful due to mergeability'
                                ' conflict')
-        pull_request.is_merged = True
-        pull_request.merge_message = commit_message
-
-    def getCommitStatuses(self, project, sha):
-        return self.statuses.get(project, {}).get(sha, [])
+        pull_request.setMerged(commit_message)
 
     def setCommitStatus(self, project, sha, state, url='', description='',
                         context='default', user='zuul'):
-        # record that this got reported
+        # record that this got reported and call original method
         self.reports.append((project, sha, 'status', (user, context, state)))
-        # always insert a status to the front of the list, to represent
-        # the last status provided for a commit.
-        # Since we're bypassing github API, which would require a user, we
-        # default the user as 'zuul' here.
-        self.statuses.setdefault(project, {}).setdefault(sha, [])
-        self.statuses[project][sha].insert(0, {
-            'state': state,
-            'url': url,
-            'description': description,
-            'context': context,
-            'creator': {
-                'login': user
-            }
-        })
+        super(FakeGithubConnection, self).setCommitStatus(
+            project, sha, state,
+            url=url, description=description, context=context)
 
     def labelPull(self, project, pr_number, label):
         # record that this got reported
@@ -2080,7 +2166,7 @@
                         self.copyDirToRepo(project,
                                            os.path.join(git_path, reponame))
         # Make test_root persist after ansible run for .flag test
-        self.config.set('executor', 'trusted_rw_dirs', self.test_root)
+        self.config.set('executor', 'trusted_rw_paths', self.test_root)
         self.setupAllProjectKeys()
 
     def setupSimpleLayout(self):
@@ -2122,15 +2208,15 @@
         config = [{'tenant':
                    {'name': 'tenant-one',
                     'source': {driver:
-                               {'config-projects': ['common-config'],
+                               {'config-projects': ['org/common-config'],
                                 'untrusted-projects': untrusted_projects}}}}]
         f.write(yaml.dump(config).encode('utf8'))
         f.close()
         self.config.set('scheduler', 'tenant_config',
                         os.path.join(FIXTURE_DIR, f.name))
 
-        self.init_repo('common-config')
-        self.addCommitToRepo('common-config', 'add content from fixture',
+        self.init_repo('org/common-config')
+        self.addCommitToRepo('org/common-config', 'add content from fixture',
                              files, branch='master', tag='init')
 
         return True
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
index 3f62c4c..cd343d0 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-vars.yaml
@@ -13,6 +13,7 @@
           - zuul.executor.hostname is defined
           - zuul.executor.src_root is defined
           - zuul.executor.log_root is defined
+          - zuul.executor.work_root is defined
 
     - name: Assert zuul.project variables are valid.
       assert:
@@ -29,4 +30,4 @@
         that:
           - vartest_job == 'vartest_job'
           - vartest_secret.value == 'vartest_secret'
-          - vartest_site == 'vartest_site'
\ No newline at end of file
+          - vartest_site == 'vartest_site'
diff --git a/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml b/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml
new file mode 100644
index 0000000..95ab870
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml
@@ -0,0 +1,6 @@
+- hosts: localhost
+  tasks:
+    - command: dd if=/dev/zero of=toobig bs=1M count=2
+    - wait_for:
+        delay: 10
+        path: /
diff --git a/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml b/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml
new file mode 100644
index 0000000..83a5158
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml
@@ -0,0 +1,22 @@
+- pipeline:
+    name: check
+    manager: independent
+    allow-secrets: true
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name: dd-big-empty-file
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - dd-big-empty-file
diff --git a/tests/fixtures/config/disk-accountant/git/org_project/README b/tests/fixtures/config/disk-accountant/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/disk-accountant/main.yaml b/tests/fixtures/config/disk-accountant/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-projects:
+          - common-config
+        untrusted-projects:
+          - org/project
diff --git a/tests/fixtures/config/multi-driver/git/common-config/playbooks/project-gerrit.yaml b/tests/fixtures/config/multi-driver/git/org_common-config/playbooks/project-gerrit.yaml
similarity index 100%
rename from tests/fixtures/config/multi-driver/git/common-config/playbooks/project-gerrit.yaml
rename to tests/fixtures/config/multi-driver/git/org_common-config/playbooks/project-gerrit.yaml
diff --git a/tests/fixtures/config/multi-driver/git/common-config/playbooks/project1-github.yaml b/tests/fixtures/config/multi-driver/git/org_common-config/playbooks/project1-github.yaml
similarity index 100%
rename from tests/fixtures/config/multi-driver/git/common-config/playbooks/project1-github.yaml
rename to tests/fixtures/config/multi-driver/git/org_common-config/playbooks/project1-github.yaml
diff --git a/tests/fixtures/config/multi-driver/git/common-config/zuul.yaml b/tests/fixtures/config/multi-driver/git/org_common-config/zuul.yaml
similarity index 74%
rename from tests/fixtures/config/multi-driver/git/common-config/zuul.yaml
rename to tests/fixtures/config/multi-driver/git/org_common-config/zuul.yaml
index 2dab845..7b5a77c 100644
--- a/tests/fixtures/config/multi-driver/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/multi-driver/git/org_common-config/zuul.yaml
@@ -1,5 +1,5 @@
 - pipeline:
-    name: check_github
+    name: check
     manager: independent
     trigger:
       github:
@@ -8,25 +8,23 @@
             - opened
             - changed
             - reopened
-    success:
-      github:
-        status: 'success'
-    failure:
-      github:
-        status: 'failure'
-
-- pipeline:
-    name: check_gerrit
-    manager: independent
-    trigger:
       gerrit:
         - event: patchset-created
     success:
+      github:
+        status: 'success'
       gerrit:
-        verify: 1
+        Verified: 1
     failure:
+      github:
+        status: 'failure'
       gerrit:
-        verify: 1
+        Verified: 1
+    start:
+      github:
+        comment: true
+      gerrit:
+        Verified: 0
 
 - job:
     name: project-gerrit
@@ -35,12 +33,12 @@
 
 - project:
     name: org/project
-    check_gerrit:
+    check:
       jobs:
         - project-gerrit
 
 - project:
     name: org/project1
-    check_github:
+    check:
       jobs:
         - project1-github
diff --git a/tests/fixtures/config/multi-driver/main.yaml b/tests/fixtures/config/multi-driver/main.yaml
index 301df38..4eed523 100644
--- a/tests/fixtures/config/multi-driver/main.yaml
+++ b/tests/fixtures/config/multi-driver/main.yaml
@@ -3,7 +3,7 @@
     source:
       github:
         config-projects:
-          - common-config
+          - org/common-config
         untrusted-projects:
           - org/project1
       gerrit:
diff --git a/tests/fixtures/config/multi-tenant/main.yaml b/tests/fixtures/config/multi-tenant/main.yaml
index 3ae7756..4916905 100644
--- a/tests/fixtures/config/multi-tenant/main.yaml
+++ b/tests/fixtures/config/multi-tenant/main.yaml
@@ -10,6 +10,7 @@
 
 - tenant:
     name: tenant-two
+    max-nodes-per-job: 10
     source:
       gerrit:
         config-projects:
diff --git a/tests/fixtures/config/push-reqs/git/common-config/playbooks/job1.yaml b/tests/fixtures/config/push-reqs/git/org_common-config/playbooks/job1.yaml
similarity index 100%
rename from tests/fixtures/config/push-reqs/git/common-config/playbooks/job1.yaml
rename to tests/fixtures/config/push-reqs/git/org_common-config/playbooks/job1.yaml
diff --git a/tests/fixtures/config/push-reqs/git/common-config/zuul.yaml b/tests/fixtures/config/push-reqs/git/org_common-config/zuul.yaml
similarity index 100%
rename from tests/fixtures/config/push-reqs/git/common-config/zuul.yaml
rename to tests/fixtures/config/push-reqs/git/org_common-config/zuul.yaml
diff --git a/tests/fixtures/config/push-reqs/main.yaml b/tests/fixtures/config/push-reqs/main.yaml
index d9f1a42..b58db73 100644
--- a/tests/fixtures/config/push-reqs/main.yaml
+++ b/tests/fixtures/config/push-reqs/main.yaml
@@ -3,7 +3,7 @@
     source:
       github:
         config-projects:
-          - common-config
+          - org/common-config
         untrusted-projects:
           - org/project1
       gerrit:
diff --git a/tests/fixtures/config/tenant-parser/unprotected-branches.yaml b/tests/fixtures/config/tenant-parser/unprotected-branches.yaml
new file mode 100644
index 0000000..bf2feef
--- /dev/null
+++ b/tests/fixtures/config/tenant-parser/unprotected-branches.yaml
@@ -0,0 +1,11 @@
+- tenant:
+    name: tenant-one
+    exclude-unprotected-branches: true
+    source:
+      gerrit:
+        config-projects:
+          - common-config:
+              exclude-unprotected-branches: false
+        untrusted-projects:
+          - org/project1
+          - org/project2
diff --git a/tests/fixtures/config/unprotected-branches/git/org_common-config/zuul.yaml b/tests/fixtures/config/unprotected-branches/git/org_common-config/zuul.yaml
new file mode 100644
index 0000000..c0fbf0d
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/git/org_common-config/zuul.yaml
@@ -0,0 +1,19 @@
+- pipeline:
+    name: check
+    manager: independent
+    trigger:
+      github:
+        - event: pull_request
+          action:
+            - opened
+            - changed
+            - reopened
+    success:
+      github:
+        status: 'success'
+    failure:
+      github:
+        status: 'failure'
+    start:
+      github:
+        comment: true
diff --git a/tests/fixtures/config/unprotected-branches/git/org_project1/README b/tests/fixtures/config/unprotected-branches/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/multi-driver/git/common-config/playbooks/project-gerrit.yaml b/tests/fixtures/config/unprotected-branches/git/org_project1/playbooks/project-test.yaml
similarity index 100%
copy from tests/fixtures/config/multi-driver/git/common-config/playbooks/project-gerrit.yaml
copy to tests/fixtures/config/unprotected-branches/git/org_project1/playbooks/project-test.yaml
diff --git a/tests/fixtures/config/unprotected-branches/git/org_project1/zuul.yaml b/tests/fixtures/config/unprotected-branches/git/org_project1/zuul.yaml
new file mode 100644
index 0000000..31abadf
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/git/org_project1/zuul.yaml
@@ -0,0 +1,8 @@
+- job:
+    name: project-test
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - project-test
diff --git a/tests/fixtures/config/unprotected-branches/git/org_project2/README b/tests/fixtures/config/unprotected-branches/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/unprotected-branches/git/org_project2/zuul.yaml b/tests/fixtures/config/unprotected-branches/git/org_project2/zuul.yaml
new file mode 100644
index 0000000..64d316d
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/git/org_project2/zuul.yaml
@@ -0,0 +1 @@
+This zuul.yaml is intentionally broken and should not be loaded on startup.
diff --git a/tests/fixtures/config/unprotected-branches/main.yaml b/tests/fixtures/config/unprotected-branches/main.yaml
new file mode 100644
index 0000000..8078d37
--- /dev/null
+++ b/tests/fixtures/config/unprotected-branches/main.yaml
@@ -0,0 +1,10 @@
+- tenant:
+    name: tenant-one
+    source:
+      github:
+        config-projects:
+          - org/common-config
+        untrusted-projects:
+          - org/project1
+          - org/project2:
+              exclude-unprotected-branches: true
diff --git a/tests/fixtures/layouts/autohold.yaml b/tests/fixtures/layouts/autohold.yaml
new file mode 100644
index 0000000..015e562
--- /dev/null
+++ b/tests/fixtures/layouts/autohold.yaml
@@ -0,0 +1,24 @@
+- pipeline:
+    name: check
+    manager: independent
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        Verified: 1
+    failure:
+      gerrit:
+        Verified: -1
+
+- job:
+    name: project-test2
+    nodes:
+      - name: controller
+        label: label1
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - project-test2
diff --git a/tests/fixtures/layouts/reporting-github.yaml b/tests/fixtures/layouts/reporting-github.yaml
index 0fdec85..ddb0588 100644
--- a/tests/fixtures/layouts/reporting-github.yaml
+++ b/tests/fixtures/layouts/reporting-github.yaml
@@ -35,6 +35,27 @@
         comment: false
 
 - pipeline:
+    name: this_is_a_really_stupid_long_name_for_a_pipeline_that_should_never_be_used_in_production_becuase_it_will_be_too_long_for_the_API_to_make_use_of_without_crashing
+    description: Uncommon reporting
+    manager: independent
+    trigger:
+      github:
+        - event: pull_request
+          action: comment
+          comment: 'long pipeline'
+    start:
+      github:
+        status: 'pending'
+    success:
+      github:
+        comment: false
+        status: 'success'
+        status-url: http://logs.example.com/{tenant.name}/{pipeline.name}/{change.project}/{change.number}/{buildset.uuid}/
+    failure:
+      github:
+        comment: false
+
+- pipeline:
     name: push-reporting
     description: Uncommon reporting
     manager: independent
@@ -68,6 +89,9 @@
     reporting:
       jobs:
         - project-test1
+    this_is_a_really_stupid_long_name_for_a_pipeline_that_should_never_be_used_in_production_becuase_it_will_be_too_long_for_the_API_to_make_use_of_without_crashing:
+      jobs:
+        - project-test1
 
 - project:
     name: org/project2
diff --git a/tests/fixtures/layouts/reporting-multiple-github.yaml b/tests/fixtures/layouts/reporting-multiple-github.yaml
new file mode 100644
index 0000000..22fa1e7
--- /dev/null
+++ b/tests/fixtures/layouts/reporting-multiple-github.yaml
@@ -0,0 +1,40 @@
+- pipeline:
+    name: check
+    description: Standard check
+    manager: independent
+    trigger:
+      github:
+        - event: pull_request
+          action: opened
+      github_ent:
+        - event: pull_request
+          action: opened
+    start:
+      github:
+        status: 'pending'
+        comment: false
+      github_ent:
+        status: 'pending'
+        comment: false
+    success:
+      github:
+        status: 'success'
+      github_ent:
+        status: 'success'
+
+- job:
+    name: project1-test1
+- job:
+    name: project2-test2
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - project1-test1
+
+- project:
+    name: org/project2
+    check:
+      jobs:
+        - project2-test2
diff --git a/tests/fixtures/zuul-connections-gerrit-and-github.conf b/tests/fixtures/zuul-connections-gerrit-and-github.conf
index 04f2cc2..49e53c7 100644
--- a/tests/fixtures/zuul-connections-gerrit-and-github.conf
+++ b/tests/fixtures/zuul-connections-gerrit-and-github.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-connections-merger.conf b/tests/fixtures/zuul-connections-merger.conf
index df465d5..771fc50 100644
--- a/tests/fixtures/zuul-connections-merger.conf
+++ b/tests/fixtures/zuul-connections-merger.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-connections-multiple-gerrits.conf b/tests/fixtures/zuul-connections-multiple-gerrits.conf
index 66a6926..c6eb39e 100644
--- a/tests/fixtures/zuul-connections-multiple-gerrits.conf
+++ b/tests/fixtures/zuul-connections-multiple-gerrits.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/merger-git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
index 3262294..a4f558d 100644
--- a/tests/fixtures/zuul-connections-same-gerrit.conf
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/merger-git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-disk-accounting.conf b/tests/fixtures/zuul-disk-accounting.conf
new file mode 100644
index 0000000..6f02fa4
--- /dev/null
+++ b/tests/fixtures/zuul-disk-accounting.conf
@@ -0,0 +1,27 @@
+[gearman]
+server=127.0.0.1
+
+[scheduler]
+tenant_config=main.yaml
+
+[merger]
+git_dir=/tmp/zuul-test/merger-git
+git_user_email=zuul@example.com
+git_user_name=zuul
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+disk_limit_per_job=1
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=fake_id_rsa_path
+
+[connection smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/zuul-git-driver.conf b/tests/fixtures/zuul-git-driver.conf
index 4321871..b24b0a1 100644
--- a/tests/fixtures/zuul-git-driver.conf
+++ b/tests/fixtures/zuul-git-driver.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-github-driver.conf b/tests/fixtures/zuul-github-driver.conf
index 732c30a..a96bde2 100644
--- a/tests/fixtures/zuul-github-driver.conf
+++ b/tests/fixtures/zuul-github-driver.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-push-reqs.conf b/tests/fixtures/zuul-push-reqs.conf
index cb699e0..2217f94 100644
--- a/tests/fixtures/zuul-push-reqs.conf
+++ b/tests/fixtures/zuul-push-reqs.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-sql-driver-bad.conf b/tests/fixtures/zuul-sql-driver-bad.conf
index 1f1b75f..e2a9438 100644
--- a/tests/fixtures/zuul-sql-driver-bad.conf
+++ b/tests/fixtures/zuul-sql-driver-bad.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/merger-git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul-sql-driver.conf b/tests/fixtures/zuul-sql-driver.conf
index 688d65b..e0ff3d5 100644
--- a/tests/fixtures/zuul-sql-driver.conf
+++ b/tests/fixtures/zuul-sql-driver.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/merger-git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index d6de76c..7bc8c59 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -8,7 +8,6 @@
 git_dir=/tmp/zuul-test/merger-git
 git_user_email=zuul@example.com
 git_user_name=zuul
-zuul_url=http://zuul.example.com/p
 
 [executor]
 git_dir=/tmp/zuul-test/executor-git
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index d08c6a1..1ba4ed9 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -182,6 +182,7 @@
 
     def test_tenant_groups3(self):
         tenant = self.sched.abide.tenants.get('tenant-one')
+        self.assertEqual(False, tenant.exclude_unprotected_branches)
         self.assertEqual(['common-config'],
                          [x.name for x in tenant.config_projects])
         self.assertEqual(['org/project1', 'org/project2'],
@@ -212,6 +213,29 @@
                         project2_config.pipelines['check'].job_list.jobs)
 
 
+class TestTenantUnprotectedBranches(TenantParserTestCase):
+    tenant_config_file = 'config/tenant-parser/unprotected-branches.yaml'
+
+    def test_tenant_unprotected_branches(self):
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        self.assertEqual(True, tenant.exclude_unprotected_branches)
+
+        self.assertEqual(['common-config'],
+                         [x.name for x in tenant.config_projects])
+        self.assertEqual(['org/project1', 'org/project2'],
+                         [x.name for x in tenant.untrusted_projects])
+
+        tpc = tenant.project_configs
+        project_name = tenant.config_projects[0].canonical_name
+        self.assertEqual(False, tpc[project_name].exclude_unprotected_branches)
+
+        project_name = tenant.untrusted_projects[0].canonical_name
+        self.assertIsNone(tpc[project_name].exclude_unprotected_branches)
+
+        project_name = tenant.untrusted_projects[1].canonical_name
+        self.assertIsNone(tpc[project_name].exclude_unprotected_branches)
+
+
 class TestSplitConfig(ZuulTestCase):
     tenant_config_file = 'config/split-config/main.yaml'
 
diff --git a/tests/unit/test_disk_accountant.py b/tests/unit/test_disk_accountant.py
new file mode 100644
index 0000000..7081b53
--- /dev/null
+++ b/tests/unit/test_disk_accountant.py
@@ -0,0 +1,93 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+import time
+
+from tests.base import BaseTestCase
+
+from zuul.executor.server import DiskAccountant
+
+
+class FakeExecutor(object):
+    def __init__(self):
+        self.stopped_jobs = set()
+        self.used = {}
+
+    def stopJobByJobDir(self, jobdir):
+        self.stopped_jobs.add(jobdir)
+
+    def usage(self, dirname, used):
+        self.used[dirname] = used
+
+
+class TestDiskAccountant(BaseTestCase):
+    def test_disk_accountant(self):
+        jobs_dir = tempfile.mkdtemp(
+            dir=os.environ.get("ZUUL_TEST_ROOT", None))
+        cache_dir = tempfile.mkdtemp()
+        executor_server = FakeExecutor()
+        da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
+                            cache_dir)
+        da.start()
+
+        try:
+            jobdir = os.path.join(jobs_dir, '012345')
+            os.mkdir(jobdir)
+            testfile = os.path.join(jobdir, 'tfile')
+            with open(testfile, 'w') as tf:
+                tf.write(2 * 1024 * 1024 * '.')
+
+            # da should catch over-limit dir within 5 seconds
+            for i in range(0, 50):
+                if jobdir in executor_server.stopped_jobs:
+                    break
+                time.sleep(0.1)
+            self.assertEqual(set([jobdir]), executor_server.stopped_jobs)
+        finally:
+            da.stop()
+        self.assertFalse(da.thread.is_alive())
+
+    def test_cache_hard_links(self):
+        root_dir = tempfile.mkdtemp(
+            dir=os.environ.get("ZUUL_TEST_ROOT", None))
+        jobs_dir = os.path.join(root_dir, 'jobs')
+        os.mkdir(jobs_dir)
+        cache_dir = os.path.join(root_dir, 'cache')
+        os.mkdir(cache_dir)
+
+        executor_server = FakeExecutor()
+        da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
+                            cache_dir, executor_server.usage)
+        da.start()
+        self.addCleanup(da.stop)
+
+        jobdir = os.path.join(jobs_dir, '012345')
+        os.mkdir(jobdir)
+
+        repo_dir = os.path.join(cache_dir, 'a.repo')
+        os.mkdir(repo_dir)
+        source_file = os.path.join(repo_dir, 'big_file')
+        with open(source_file, 'w') as tf:
+            tf.write(2 * 1024 * 1024 * '.')
+        dest_link = os.path.join(jobdir, 'big_file')
+        os.link(source_file, dest_link)
+
+        # da should _not_ count this file. Wait for 5s to get noticed
+        for i in range(0, 50):
+            if jobdir in executor_server.used:
+                break
+            time.sleep(0.1)
+        self.assertEqual(set(), executor_server.stopped_jobs)
+        self.assertIn(jobdir, executor_server.used)
+        self.assertTrue(executor_server.used[jobdir] <= 1)
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index 46f3b26..3793edc 100755
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -277,6 +277,11 @@
                                 'layouts/repo-checkout-no-timer-override.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
 
         self.assertEquals(1, len(self.builds), "One build is running")
 
@@ -315,6 +320,11 @@
                                 'layouts/repo-checkout-no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
 
         self.assertEquals(2, len(self.builds), "Two builds are running")
 
@@ -338,19 +348,31 @@
         p1 = "review.example.com/org/project1"
         p2 = "review.example.com/org/project2"
         projects = [p1, p2]
+        upstream = self.getUpstreamRepos(projects)
 
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         event = A.getRefUpdatedEvent()
         A.setMerged()
+        A_commit = str(upstream[p1].commit('master'))
+        self.log.debug("A commit: %s" % A_commit)
+
+        # Add another commit to the repo that merged right after this
+        # one to make sure that our post job runs with the one that we
+        # intended rather than simply the current repo state.
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+                                           parent='refs/changes/1/1/1')
+        B.setMerged()
+        B_commit = str(upstream[p1].commit('master'))
+        self.log.debug("B commit: %s" % B_commit)
+
         self.fake_gerrit.addEvent(event)
         self.waitUntilSettled()
 
-        upstream = self.getUpstreamRepos(projects)
         states = [
-            {p1: dict(commit=str(upstream[p1].commit('master')),
-                      present=[A], branch='master'),
+            {p1: dict(commit=A_commit,
+                      present=[A], absent=[B], branch='master'),
              p2: dict(commit=str(upstream[p2].commit('master')),
-                      absent=[A], branch='master'),
+                      absent=[A, B], branch='master'),
              },
         ]
 
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index e8dff51..a088236 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -110,10 +110,8 @@
 
         build_params = self.builds[0].parameters
         self.assertEqual('refs/tags/newtag', build_params['zuul']['ref'])
-        self.assertEqual('00000000000000000000000000000000',
-                         build_params['zuul']['oldrev'])
+        self.assertFalse('oldrev' in build_params['zuul'])
         self.assertEqual(sha, build_params['zuul']['newrev'])
-
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
         self.waitUntilSettled()
@@ -125,16 +123,20 @@
     def test_push_event(self):
         self.executor_server.hold_jobs_in_build = True
 
-        old_sha = random_sha1()
-        new_sha = random_sha1()
-        self.fake_github.emitEvent(
-            self.fake_github.getPushEvent('org/project', 'refs/heads/master',
-                                          old_sha, new_sha))
+        A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
+        old_sha = '0' * 40
+        new_sha = A.head_sha
+        A.setMerged("merging A")
+        pevent = self.fake_github.getPushEvent(project='org/project',
+                                               ref='refs/heads/master',
+                                               old_rev=old_sha,
+                                               new_rev=new_sha)
+        self.fake_github.emitEvent(pevent)
         self.waitUntilSettled()
 
         build_params = self.builds[0].parameters
         self.assertEqual('refs/heads/master', build_params['zuul']['ref'])
-        self.assertEqual(old_sha, build_params['zuul']['oldrev'])
+        self.assertFalse('oldrev' in build_params['zuul'])
         self.assertEqual(new_sha, build_params['zuul']['newrev'])
 
         self.executor_server.hold_jobs_in_build = False
@@ -258,21 +260,27 @@
     @simple_layout('layouts/reporting-github.yaml', driver='github')
     def test_reporting(self):
         project = 'org/project'
+        github = self.fake_github.github_client
+
         # pipeline reports pull status both on start and success
         self.executor_server.hold_jobs_in_build = True
         A = self.fake_github.openFakePullRequest(project, 'master', 'A')
         self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
         self.waitUntilSettled()
+
         # We should have a status container for the head sha
-        statuses = self.fake_github.statuses[project][A.head_sha]
-        self.assertIn(A.head_sha, self.fake_github.statuses[project].keys())
+        self.assertIn(
+            A.head_sha, github.repo_from_project(project)._commits.keys())
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
+
         # We should only have one status for the head sha
         self.assertEqual(1, len(statuses))
         check_status = statuses[0]
         check_url = ('http://zuul.example.com/status/#%s,%s' %
                      (A.number, A.head_sha))
         self.assertEqual('tenant-one/check', check_status['context'])
-        self.assertEqual('Standard check', check_status['description'])
+        self.assertEqual('check status: pending',
+                         check_status['description'])
         self.assertEqual('pending', check_status['state'])
         self.assertEqual(check_url, check_status['url'])
         self.assertEqual(0, len(A.comments))
@@ -281,12 +289,14 @@
         self.executor_server.release()
         self.waitUntilSettled()
         # We should only have two statuses for the head sha
-        statuses = self.fake_github.statuses[project][A.head_sha]
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
         self.assertEqual(2, len(statuses))
         check_status = statuses[0]
         check_url = ('http://zuul.example.com/status/#%s,%s' %
                      (A.number, A.head_sha))
         self.assertEqual('tenant-one/check', check_status['context'])
+        self.assertEqual('check status: success',
+                         check_status['description'])
         self.assertEqual('success', check_status['state'])
         self.assertEqual(check_url, check_status['url'])
         self.assertEqual(1, len(A.comments))
@@ -298,7 +308,7 @@
         self.fake_github.emitEvent(
             A.getCommentAddedEvent('reporting check'))
         self.waitUntilSettled()
-        statuses = self.fake_github.statuses[project][A.head_sha]
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
         self.assertEqual(2, len(statuses))
         # comments increased by one for the start message
         self.assertEqual(2, len(A.comments))
@@ -308,10 +318,12 @@
         self.executor_server.release()
         self.waitUntilSettled()
         # pipeline reports success status
-        statuses = self.fake_github.statuses[project][A.head_sha]
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
         self.assertEqual(3, len(statuses))
         report_status = statuses[0]
         self.assertEqual('tenant-one/reporting', report_status['context'])
+        self.assertEqual('reporting status: success',
+                         report_status['description'])
         self.assertEqual('success', report_status['state'])
         self.assertEqual(2, len(A.comments))
 
@@ -330,13 +342,46 @@
                         MatchesRegex('^[a-fA-F0-9]{32}\/$'))
 
     @simple_layout('layouts/reporting-github.yaml', driver='github')
+    def test_truncated_status_description(self):
+        project = 'org/project'
+        # pipeline reports pull status both on start and success
+        self.executor_server.hold_jobs_in_build = True
+        A = self.fake_github.openFakePullRequest(project, 'master', 'A')
+        self.fake_github.emitEvent(
+            A.getCommentAddedEvent('long pipeline'))
+        self.waitUntilSettled()
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
+        self.assertEqual(1, len(statuses))
+        check_status = statuses[0]
+        # Status is truncated due to long pipeline name
+        self.assertEqual('status: pending',
+                         check_status['description'])
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+        self.waitUntilSettled()
+        # We should only have two statuses for the head sha
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
+        self.assertEqual(2, len(statuses))
+        check_status = statuses[0]
+        # Status is truncated due to long pipeline name
+        self.assertEqual('status: success',
+                         check_status['description'])
+
+    @simple_layout('layouts/reporting-github.yaml', driver='github')
     def test_push_reporting(self):
         project = 'org/project2'
         # pipeline reports pull status both on start and success
         self.executor_server.hold_jobs_in_build = True
-        pevent = self.fake_github.getPushEvent(project=project,
-                                               ref='refs/heads/master')
 
+        A = self.fake_github.openFakePullRequest(project, 'master', 'A')
+        old_sha = '0' * 40
+        new_sha = A.head_sha
+        A.setMerged("merging A")
+        pevent = self.fake_github.getPushEvent(project=project,
+                                               ref='refs/heads/master',
+                                               old_rev=old_sha,
+                                               new_rev=new_sha)
         self.fake_github.emitEvent(pevent)
         self.waitUntilSettled()
 
@@ -406,6 +451,52 @@
         self.assertEqual(len(D.comments), 1)
         self.assertEqual(D.comments[0], 'Merge failed')
 
+    @simple_layout('layouts/reporting-multiple-github.yaml', driver='github')
+    def test_reporting_multiple_github(self):
+        project = 'org/project1'
+        github = self.fake_github.github_client
+
+        # pipeline reports pull status both on start and success
+        self.executor_server.hold_jobs_in_build = True
+        A = self.fake_github.openFakePullRequest(project, 'master', 'A')
+        self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
+        # open one on B as well, which should not effect A reporting
+        B = self.fake_github.openFakePullRequest('org/project2', 'master',
+                                                 'B')
+        self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
+        self.waitUntilSettled()
+        # We should have a status container for the head sha
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
+        self.assertIn(
+            A.head_sha, github.repo_from_project(project)._commits.keys())
+        # We should only have one status for the head sha
+        self.assertEqual(1, len(statuses))
+        check_status = statuses[0]
+        check_url = ('http://zuul.example.com/status/#%s,%s' %
+                     (A.number, A.head_sha))
+        self.assertEqual('tenant-one/check', check_status['context'])
+        self.assertEqual('check status: pending', check_status['description'])
+        self.assertEqual('pending', check_status['state'])
+        self.assertEqual(check_url, check_status['url'])
+        self.assertEqual(0, len(A.comments))
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+        self.waitUntilSettled()
+        # We should only have two statuses for the head sha
+        statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
+        self.assertEqual(2, len(statuses))
+        check_status = statuses[0]
+        check_url = ('http://zuul.example.com/status/#%s,%s' %
+                     (A.number, A.head_sha))
+        self.assertEqual('tenant-one/check', check_status['context'])
+        self.assertEqual('success', check_status['state'])
+        self.assertEqual('check status: success', check_status['description'])
+        self.assertEqual(check_url, check_status['url'])
+        self.assertEqual(1, len(A.comments))
+        self.assertThat(A.comments[0],
+                        MatchesRegex('.*Build succeeded.*', re.DOTALL))
+
     @simple_layout('layouts/dependent-github.yaml', driver='github')
     def test_parallel_changes(self):
         "Test that changes are tested in parallel and merged in series"
@@ -581,7 +672,7 @@
 
     @simple_layout('layouts/basic-github.yaml', driver='github')
     def test_push_event_reconfigure(self):
-        pevent = self.fake_github.getPushEvent(project='common-config',
+        pevent = self.fake_github.getPushEvent(project='org/common-config',
                                                ref='refs/heads/master',
                                                modified_files=['zuul.yaml'])
 
@@ -608,3 +699,20 @@
             self.fake_github.emitEvent,
             ('ping', pevent),
         )
+
+
+class TestGithubUnprotectedBranches(ZuulTestCase):
+    config_file = 'zuul-github-driver.conf'
+    tenant_config_file = 'config/unprotected-branches/main.yaml'
+
+    def test_unprotected_branches(self):
+        tenant = self.sched.abide.tenants.get('tenant-one')
+
+        project1 = tenant.untrusted_projects[0]
+        project2 = tenant.untrusted_projects[1]
+
+        # project1 should have parsed master
+        self.assertIn('master', project1.unparsed_branch_config.keys())
+
+        # project2 should have no parsed branch
+        self.assertEqual(0, len(project2.unparsed_branch_config.keys()))
diff --git a/tests/unit/test_multi_driver.py b/tests/unit/test_multi_driver.py
index 864bd31..1844c33 100644
--- a/tests/unit/test_multi_driver.py
+++ b/tests/unit/test_multi_driver.py
@@ -43,3 +43,13 @@
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
         self.waitUntilSettled()
+
+        # Check on reporting results
+        # github should have a success status (only).
+        statuses = self.fake_github.getCommitStatuses(
+            'org/project1', B.head_sha)
+        self.assertEqual(1, len(statuses))
+        self.assertEqual('success', statuses[0]['state'])
+
+        # gerrit should have only reported twice, on start and success
+        self.assertEqual(A.reported, 2)
diff --git a/tests/unit/test_push_reqs.py b/tests/unit/test_push_reqs.py
index d3a1feb..80c3be9 100644
--- a/tests/unit/test_push_reqs.py
+++ b/tests/unit/test_push_reqs.py
@@ -25,12 +25,13 @@
     def test_push_requirements(self):
         self.executor_server.hold_jobs_in_build = True
 
-        # Create a github change, add a change and emit a push event
         A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
-        old_sha = A.head_sha
+        new_sha = A.head_sha
+        A.setMerged("merging A")
         pevent = self.fake_github.getPushEvent(project='org/project1',
                                                ref='refs/heads/master',
-                                               old_rev=old_sha)
+                                               new_rev=new_sha)
+
         self.fake_github.emitEvent(pevent)
 
         self.waitUntilSettled()
@@ -43,7 +44,7 @@
         # Make a gerrit change, and emit a ref-updated event
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
         self.fake_gerrit.addEvent(B.getRefUpdatedEvent())
-
+        B.setMerged()
         self.waitUntilSettled()
 
         # All but one pipeline should be skipped, increasing builds by 1
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 5dd3f4e..93367b9 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -1103,6 +1103,12 @@
 
     def test_post(self):
         "Test that post jobs run"
+        p = "review.example.com/org/project"
+        upstream = self.getUpstreamRepos([p])
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.setMerged()
+        A_commit = str(upstream[p].commit('master'))
+        self.log.debug("A commit: %s" % A_commit)
 
         e = {
             "type": "ref-updated",
@@ -1111,7 +1117,7 @@
             },
             "refUpdate": {
                 "oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
-                "newRev": "d479a0bfcb34da57a31adb2a595c0cf687812543",
+                "newRev": A_commit,
                 "refName": "master",
                 "project": "org/project",
             }
@@ -1156,7 +1162,7 @@
             "refUpdate": {
                 "oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
                 "newRev": "0000000000000000000000000000000000000000",
-                "refName": "master",
+                "refName": "testbranch",
                 "project": "org/project",
             }
         }
@@ -1434,6 +1440,60 @@
         self.assertEqual(self.getJobFromHistory('project-test2').result,
                          'FAILURE')
 
+    @simple_layout('layouts/autohold.yaml')
+    def test_autohold(self):
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+
+        client = zuul.rpcclient.RPCClient('127.0.0.1',
+                                          self.gearman_server.port)
+        self.addCleanup(client.shutdown)
+        r = client.autohold('tenant-one', 'org/project', 'project-test2',
+                            "reason text", 1)
+        self.assertTrue(r)
+
+        self.executor_server.failJob('project-test2', A)
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+
+        self.waitUntilSettled()
+
+        self.assertEqual(A.data['status'], 'NEW')
+        self.assertEqual(A.reported, 1)
+        self.assertEqual(self.getJobFromHistory('project-test2').result,
+                         'FAILURE')
+
+        # Check nodepool for a held node
+        held_node = None
+        for node in self.fake_nodepool.getNodes():
+            if node['state'] == zuul.model.STATE_HOLD:
+                held_node = node
+                break
+        self.assertIsNotNone(held_node)
+
+        # Validate node has recorded the failed job
+        self.assertEqual(
+            held_node['hold_job'],
+            " ".join(['tenant-one',
+                      'review.example.com/org/project',
+                      'project-test2'])
+        )
+        self.assertEqual(held_node['comment'], "reason text")
+
+        # Another failed change should not hold any more nodes
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        self.executor_server.failJob('project-test2', B)
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertEqual(B.data['status'], 'NEW')
+        self.assertEqual(B.reported, 1)
+        self.assertEqual(self.getJobFromHistory('project-test2').result,
+                         'FAILURE')
+
+        held_nodes = 0
+        for node in self.fake_nodepool.getNodes():
+            if node['state'] == zuul.model.STATE_HOLD:
+                held_nodes += 1
+        self.assertEqual(held_nodes, 1)
+
     @simple_layout('layouts/three-projects.yaml')
     def test_dependent_behind_dequeue(self):
         # This particular test does a large amount of merges and needs a little
@@ -1838,6 +1898,11 @@
         self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 1, "One timer job")
 
@@ -2801,6 +2866,12 @@
         # below don't race against more jobs being queued.
         self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
         self.sched.reconfigure(self.config)
+        self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
         self.executor_server.release()
         self.waitUntilSettled()
 
@@ -2848,6 +2919,11 @@
                                     'layouts/no-timer.yaml')
             self.sched.reconfigure(self.config)
             self.waitUntilSettled()
+            # If APScheduler is in mid-event when we remove the job,
+            # we can end up with one more event firing, so give it an
+            # extra second to settle.
+            time.sleep(1)
+            self.waitUntilSettled()
             self.assertEqual(len(self.builds), 1,
                              'Timer builds iteration #%d' % x)
             self.executor_server.release('.*')
@@ -2926,6 +3002,11 @@
         self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
         self.executor_server.release('.*')
         self.waitUntilSettled()
 
@@ -2970,6 +3051,11 @@
         self.sched.reconfigure(self.config)
         self.registerJobs()
         self.waitUntilSettled()
+        # If APScheduler is in mid-event when we remove the job, we
+        # can end up with one more event firing, so give it an extra
+        # second to settle.
+        time.sleep(1)
+        self.waitUntilSettled()
         self.worker.release('.*')
         self.waitUntilSettled()
 
@@ -3000,6 +3086,12 @@
 
     def test_client_enqueue_ref(self):
         "Test that the RPC client can enqueue a ref"
+        p = "review.example.com/org/project"
+        upstream = self.getUpstreamRepos([p])
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.setMerged()
+        A_commit = str(upstream[p].commit('master'))
+        self.log.debug("A commit: %s" % A_commit)
 
         client = zuul.rpcclient.RPCClient('127.0.0.1',
                                           self.gearman_server.port)
@@ -3011,7 +3103,7 @@
             trigger='gerrit',
             ref='master',
             oldrev='90f173846e3af9154517b88543ffbd1691f31366',
-            newrev='d479a0bfcb34da57a31adb2a595c0cf687812543')
+            newrev=A_commit)
         self.waitUntilSettled()
         job_names = [x.name for x in self.history]
         self.assertEqual(len(self.history), 1)
@@ -5309,6 +5401,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
                 semaphore: test-semaphore
 
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
old mode 100644
new mode 100755
index aa091e5..8555208
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -90,6 +90,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -135,6 +138,77 @@
             dict(name='project-test2', result='SUCCESS', changes='1,1'),
             dict(name='project-test2', result='SUCCESS', changes='2,1')])
 
+    def test_dynamic_config_non_existing_job(self):
+        """Test that requesting a non existent job fails"""
+        in_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: project-test1
+
+            - project:
+                name: org/project
+                check:
+                  jobs:
+                    - non-existent-job
+            """)
+
+        in_repo_playbook = textwrap.dedent(
+            """
+            - hosts: all
+              tasks: []
+            """)
+
+        file_dict = {'.zuul.yaml': in_repo_conf,
+                     'playbooks/project-test2.yaml': in_repo_playbook}
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+                                           files=file_dict)
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertEqual(A.reported, 1,
+                         "A should report failure")
+        self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
+        self.assertIn('Job non-existent-job not defined', A.messages[0],
+                      "A should have failed the check pipeline")
+        self.assertHistory([])
+
+    def test_dynamic_config_non_existing_job_in_template(self):
+        """Test that requesting a non existent job fails"""
+        in_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: project-test1
+
+            - project-template:
+                name: test-template
+                check:
+                  jobs:
+                    - non-existent-job
+
+            - project:
+                name: org/project
+                templates:
+                  - test-template
+            """)
+
+        in_repo_playbook = textwrap.dedent(
+            """
+            - hosts: all
+              tasks: []
+            """)
+
+        file_dict = {'.zuul.yaml': in_repo_conf,
+                     'playbooks/project-test2.yaml': in_repo_playbook}
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+                                           files=file_dict)
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertEqual(A.reported, 1,
+                         "A should report failure")
+        self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
+        self.assertIn('Job non-existent-job not defined', A.messages[0],
+                      "A should have failed the check pipeline")
+        self.assertHistory([])
+
     def test_dynamic_config_new_patchset(self):
         self.executor_server.hold_jobs_in_build = True
 
@@ -144,6 +218,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -174,6 +251,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -197,9 +277,16 @@
         self.assertTrue(items[0].live)
 
         self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release('project-test1')
+        self.waitUntilSettled()
         self.executor_server.release()
         self.waitUntilSettled()
 
+        self.assertHistory([
+            dict(name='project-test2', result='ABORTED', changes='1,1'),
+            dict(name='project-test1', result='SUCCESS', changes='1,2'),
+            dict(name='project-test2', result='SUCCESS', changes='1,2')])
+
     def test_dynamic_dependent_pipeline(self):
         # Test dynamically adding a project to a
         # dependent pipeline for the first time
@@ -211,6 +298,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -250,6 +340,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -312,6 +405,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -926,3 +1022,53 @@
         self.assertIn('- data-return-relative '
                       'http://example.com/test/log/url/docs/index.html',
                       A.messages[-1])
+
+
+class TestDiskAccounting(AnsibleZuulTestCase):
+    config_file = 'zuul-disk-accounting.conf'
+    tenant_config_file = 'config/disk-accountant/main.yaml'
+
+    def test_disk_accountant_kills_job(self):
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertHistory([
+            dict(name='dd-big-empty-file', result='ABORTED', changes='1,1')])
+
+
+class TestMaxNodesPerJob(AnsibleZuulTestCase):
+    tenant_config_file = 'config/multi-tenant/main.yaml'
+
+    def test_max_nodes_reached(self):
+        in_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: test-job
+                nodes:
+                  - name: node01
+                    label: fake
+                  - name: node02
+                    label: fake
+                  - name: node03
+                    label: fake
+                  - name: node04
+                    label: fake
+                  - name: node05
+                    label: fake
+                  - name: node06
+                    label: fake
+            """)
+        file_dict = {'.zuul.yaml': in_repo_conf}
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+                                           files=file_dict)
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertIn('The job "test-job" exceeds tenant max-nodes-per-job 5.',
+                      A.messages[0], "A should fail because of nodes limit")
+
+        B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
+                                           files=file_dict)
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertNotIn("exceeds tenant max-nodes", B.messages[0],
+                         "B should not fail because of nodes limit")
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index 078e1c9..9dd724d 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -285,10 +285,16 @@
         self._log("")
 
     def v2_runner_on_skipped(self, result):
-        reason = result._result.get('skip_reason')
-        if reason:
-            # No reason means it's an item, which we'll log differently
-            self._log_message(result, status='skipping', msg=reason)
+        if result._task.loop:
+            self._items_done = False
+            self._deferred_result = dict(result._result)
+        else:
+            reason = result._result.get('skip_reason')
+            if reason:
+                # No reason means it's an item, which we'll log differently
+                self._log_message(result, status='skipping', msg=reason)
+                # Log an extra blank line to get space after each skip
+                self._log("")
 
     def v2_runner_item_on_skipped(self, result):
         reason = result._result.get('skip_reason')
@@ -297,14 +303,14 @@
         else:
             self._log_message(result, status='skipping')
 
+        if self._deferred_result:
+            self._process_deferred(result)
+
     def v2_runner_on_ok(self, result):
         if (self._play.strategy == 'free'
                 and self._last_task_banner != result._task._uuid):
             self._print_task_banner(result._task)
 
-        if result._task.action in ('include', 'include_role', 'setup'):
-            return
-
         result_dict = dict(result._result)
 
         self._clean_results(result_dict, result._task.action)
@@ -388,8 +394,6 @@
 
         if self._deferred_result:
             self._process_deferred(result)
-        # Log an extra blank line to get space after each task
-        self._log("")
 
     def v2_runner_item_on_failed(self, result):
         result_dict = dict(result._result)
@@ -434,10 +438,13 @@
         self._items_done = True
         result_dict = self._deferred_result
         self._deferred_result = None
+        status = result_dict.get('status')
 
-        self._log_message(
-            result, "All items complete",
-            status=result_dict['status'])
+        if status:
+            self._log_message(result, "All items complete", status=status)
+
+        # Log an extra blank line to get space after each task
+        self._log("")
 
     def _print_task_banner(self, task):
 
diff --git a/zuul/cmd/client.py b/zuul/cmd/client.py
index b55aed8..177283e 100755
--- a/zuul/cmd/client.py
+++ b/zuul/cmd/client.py
@@ -46,6 +46,21 @@
                                            description='valid commands',
                                            help='additional help')
 
+        cmd_autohold = subparsers.add_parser(
+            'autohold', help='hold nodes for failed job')
+        cmd_autohold.add_argument('--tenant', help='tenant name',
+                                  required=True)
+        cmd_autohold.add_argument('--project', help='project name',
+                                  required=True)
+        cmd_autohold.add_argument('--job', help='job name',
+                                  required=True)
+        cmd_autohold.add_argument('--reason', help='reason for the hold',
+                                  required=True)
+        cmd_autohold.add_argument('--count',
+                                  help='number of job runs (default: 1)',
+                                  required=False, type=int, default=1)
+        cmd_autohold.set_defaults(func=self.autohold)
+
         cmd_enqueue = subparsers.add_parser('enqueue', help='enqueue a change')
         cmd_enqueue.add_argument('--tenant', help='tenant name',
                                  required=True)
@@ -137,6 +152,16 @@
         else:
             sys.exit(1)
 
+    def autohold(self):
+        client = zuul.rpcclient.RPCClient(
+            self.server, self.port, self.ssl_key, self.ssl_cert, self.ssl_ca)
+        r = client.autohold(tenant=self.args.tenant,
+                            project=self.args.project,
+                            job=self.args.job,
+                            reason=self.args.reason,
+                            count=self.args.count)
+        return r
+
     def enqueue(self):
         client = zuul.rpcclient.RPCClient(
             self.server, self.port, self.ssl_key, self.ssl_cert, self.ssl_ca)
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 7640dfc..a09147c 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -67,6 +67,15 @@
         super(DuplicateNodeError, self).__init__(message)
 
 
+class MaxNodeError(Exception):
+    def __init__(self, job, tenant):
+        message = textwrap.dedent("""\
+        The job "{job}" exceeds tenant max-nodes-per-job {maxnodes}.""")
+        message = textwrap.fill(message.format(
+            job=job.name, maxnodes=tenant.max_nodes_per_job))
+        super(MaxNodeError, self).__init__(message)
+
+
 class DuplicateGroupError(Exception):
     def __init__(self, nodeset, group):
         message = textwrap.dedent("""\
@@ -475,6 +484,9 @@
                 for conf_node in conf_nodes:
                     node = model.Node(conf_node['name'], conf_node['label'])
                     ns.addNode(node)
+            if tenant.max_nodes_per_job != -1 and \
+               len(ns) > tenant.max_nodes_per_job:
+                raise MaxNodeError(job, tenant)
             job.nodeset = ns
 
         if 'required-projects' in conf:
@@ -632,6 +644,12 @@
                 raise Exception("Job must be a string or dictionary")
             attrs['_source_context'] = source_context
             attrs['_start_mark'] = start_mark
+
+            # validate that the job is existing
+            with configuration_exceptions('project or project-template',
+                                          attrs):
+                layout.getJob(attrs['name'])
+
             job_list.addJob(JobParser.fromYaml(tenant, layout, attrs,
                                                project_pipeline=True))
 
@@ -762,20 +780,6 @@
 
         precedence = vs.Any('normal', 'low', 'high')
 
-        approval = vs.Schema({'username': str,
-                              'email-filter': str,
-                              'email': str,
-                              'older-than': str,
-                              'newer-than': str,
-                              }, extra=vs.ALLOW_EXTRA)
-
-        require = {'approval': to_list(approval),
-                   'open': bool,
-                   'current-patchset': bool,
-                   'status': to_list(str)}
-
-        reject = {'approval': to_list(approval)}
-
         window = vs.All(int, vs.Range(min=0))
         window_floor = vs.All(int, vs.Range(min=1))
         window_type = vs.Any('linear', 'exponential')
@@ -785,8 +789,6 @@
                     vs.Required('manager'): manager,
                     'precedence': precedence,
                     'description': str,
-                    'require': require,
-                    'reject': reject,
                     'success-message': str,
                     'failure-message': str,
                     'merge-failure-message': str,
@@ -933,6 +935,7 @@
         'include': to_list(classes),
         'exclude': to_list(classes),
         'shadow': to_list(str),
+        'exclude-unprotected-branches': bool,
     }}
 
     project = vs.Any(str, project_dict)
@@ -968,7 +971,10 @@
     @staticmethod
     def getSchema(connections=None):
         tenant = {vs.Required('name'): str,
-                  'source': TenantParser.validateTenantSources(connections)}
+                  'max-nodes-per-job': int,
+                  'source': TenantParser.validateTenantSources(connections),
+                  'exclude-unprotected-branches': bool,
+                  }
         return vs.Schema(tenant)
 
     @staticmethod
@@ -976,6 +982,12 @@
                  cached):
         TenantParser.getSchema(connections)(conf)
         tenant = model.Tenant(conf['name'])
+        if conf.get('max-nodes-per-job') is not None:
+            tenant.max_nodes_per_job = conf['max-nodes-per-job']
+        if conf.get('exclude-unprotected-branches') is not None:
+            tenant.exclude_unprotected_branches = \
+                conf['exclude-unprotected-branches']
+
         tenant.unparsed_config = conf
         unparsed_config = model.UnparsedTenantConfig()
         # tpcs is TenantProjectConfigs
@@ -994,7 +1006,7 @@
             TenantParser._loadTenantInRepoLayouts(merger, connections,
                                                   tenant.config_projects,
                                                   tenant.untrusted_projects,
-                                                  cached)
+                                                  cached, tenant)
         unparsed_config.extend(tenant.config_projects_config)
         unparsed_config.extend(tenant.untrusted_projects_config)
         tenant.layout = TenantParser._parseLayout(base, tenant,
@@ -1066,6 +1078,7 @@
             project = source.getProject(conf)
             project_include = current_include
             shadow_projects = []
+            project_exclude_unprotected_branches = None
         else:
             project_name = list(conf.keys())[0]
             project = source.getProject(project_name)
@@ -1079,10 +1092,14 @@
                 as_list(conf[project_name].get('exclude', [])))
             if project_exclude:
                 project_include = frozenset(project_include - project_exclude)
+            project_exclude_unprotected_branches = conf[project_name].get(
+                'exclude-unprotected-branches', None)
 
         tenant_project_config = model.TenantProjectConfig(project)
         tenant_project_config.load_classes = frozenset(project_include)
         tenant_project_config.shadow_projects = shadow_projects
+        tenant_project_config.exclude_unprotected_branches = \
+            project_exclude_unprotected_branches
 
         return tenant_project_config
 
@@ -1149,7 +1166,7 @@
 
     @staticmethod
     def _loadTenantInRepoLayouts(merger, connections, config_projects,
-                                 untrusted_projects, cached):
+                                 untrusted_projects, cached, tenant):
         config_projects_config = model.UnparsedTenantConfig()
         untrusted_projects_config = model.UnparsedTenantConfig()
         jobs = []
@@ -1197,7 +1214,7 @@
             # branch.  Remember the branch and then implicitly add a
             # branch selector to each job there.  This makes the
             # in-repo configuration apply only to that branch.
-            for branch in project.source.getProjectBranches(project):
+            for branch in project.source.getProjectBranches(project, tenant):
                 project.unparsed_branch_config[branch] = \
                     model.UnparsedTenantConfig()
                 job = merger.getFiles(
@@ -1417,11 +1434,11 @@
         new_abide.tenants[tenant.name] = new_tenant
         return new_abide
 
-    def _loadDynamicProjectData(self, config, project, files, trusted):
+    def _loadDynamicProjectData(self, config, project, files, trusted, tenant):
         if trusted:
             branches = ['master']
         else:
-            branches = project.source.getProjectBranches(project)
+            branches = project.source.getProjectBranches(project, tenant)
 
         for branch in branches:
             fns1 = []
@@ -1473,11 +1490,12 @@
         if include_config_projects:
             config = model.UnparsedTenantConfig()
             for project in tenant.config_projects:
-                self._loadDynamicProjectData(config, project, files, True)
+                self._loadDynamicProjectData(
+                    config, project, files, True, tenant)
         else:
             config = tenant.config_projects_config.copy()
         for project in tenant.untrusted_projects:
-            self._loadDynamicProjectData(config, project, files, False)
+            self._loadDynamicProjectData(config, project, files, False, tenant)
 
         layout = model.Layout(tenant)
         # NOTE: the actual pipeline objects (complete with queues and
diff --git a/zuul/driver/__init__.py b/zuul/driver/__init__.py
index 5193fe6..6ac9197 100644
--- a/zuul/driver/__init__.py
+++ b/zuul/driver/__init__.py
@@ -272,11 +272,11 @@
         pass
 
     @abc.abstractmethod
-    def setMountsMap(self, state_dir, ro_dirs=[], rw_dirs=[]):
+    def setMountsMap(self, state_dir, ro_paths=None, rw_paths=None):
         """Add additional mount point to the execution environment.
 
         :arg str state_dir: the state directory to be read write
-        :arg list ro_dirs: read only directories paths
-        :arg list rw_dirs: read write directories paths
+        :arg list ro_paths: read only files or directories to bind mount
+        :arg list rw_paths: read write files or directories to bind mount
         """
         pass
diff --git a/zuul/driver/bubblewrap/__init__.py b/zuul/driver/bubblewrap/__init__.py
index e8209f1..5370484 100644
--- a/zuul/driver/bubblewrap/__init__.py
+++ b/zuul/driver/bubblewrap/__init__.py
@@ -83,8 +83,12 @@
     def stop(self):
         pass
 
-    def setMountsMap(self, ro_dirs=[], rw_dirs=[]):
-        self.mounts_map = {'ro': ro_dirs, 'rw': rw_dirs}
+    def setMountsMap(self, ro_paths=None, rw_paths=None):
+        if not ro_paths:
+            ro_paths = []
+        if not rw_paths:
+            rw_paths = []
+        self.mounts_map = {'ro': ro_paths, 'rw': rw_paths}
 
     def getPopen(self, **kwargs):
         # Set zuul_dir if it was not passed in
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 647085b..de72c69 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -617,7 +617,7 @@
                                    (record.get('number'),))
         return changes
 
-    def getProjectBranches(self, project: Project) -> List[str]:
+    def getProjectBranches(self, project: Project, tenant) -> List[str]:
         refs = self.getInfoRefs(project)
         heads = [str(k[len('refs/heads/'):]) for k in refs.keys()
                  if k.startswith('refs/heads/')]
diff --git a/zuul/driver/gerrit/gerritmodel.py b/zuul/driver/gerrit/gerritmodel.py
index 7c1bb5a..b96ed4c 100644
--- a/zuul/driver/gerrit/gerritmodel.py
+++ b/zuul/driver/gerrit/gerritmodel.py
@@ -71,14 +71,12 @@
             for k, v in a.items():
                 if k == 'username':
                     a['username'] = re.compile(v)
-                elif k in ['email', 'email-filter']:
+                elif k == 'email':
                     a['email'] = re.compile(v)
                 elif k == 'newer-than':
                     a[k] = time_to_seconds(v)
                 elif k == 'older-than':
                     a[k] = time_to_seconds(v)
-            if 'email-filter' in a:
-                del a['email-filter']
         return approvals
 
     def _match_approval_required_approval(self, rapproval, approval):
diff --git a/zuul/driver/gerrit/gerritsource.py b/zuul/driver/gerrit/gerritsource.py
index 4571cc1..7141080 100644
--- a/zuul/driver/gerrit/gerritsource.py
+++ b/zuul/driver/gerrit/gerritsource.py
@@ -54,8 +54,8 @@
     def getProjectOpenChanges(self, project):
         return self.connection.getProjectOpenChanges(project)
 
-    def getProjectBranches(self, project):
-        return self.connection.getProjectBranches(project)
+    def getProjectBranches(self, project, tenant):
+        return self.connection.getProjectBranches(project, tenant)
 
     def getGitUrl(self, project):
         return self.connection.getGitUrl(project)
@@ -82,7 +82,6 @@
 
 
 approval = vs.Schema({'username': str,
-                      'email-filter': str,
                       'email': str,
                       'older-than': str,
                       'newer-than': str,
diff --git a/zuul/driver/gerrit/gerrittrigger.py b/zuul/driver/gerrit/gerrittrigger.py
index 706b7df..cfedd4e 100644
--- a/zuul/driver/gerrit/gerrittrigger.py
+++ b/zuul/driver/gerrit/gerrittrigger.py
@@ -77,7 +77,6 @@
     variable_dict = v.Schema(dict)
 
     approval = v.Schema({'username': str,
-                         'email-filter': str,
                          'email': str,
                          'older-than': str,
                          'newer-than': str,
diff --git a/zuul/driver/git/gitconnection.py b/zuul/driver/git/gitconnection.py
index f4fe7e5..0624088 100644
--- a/zuul/driver/git/gitconnection.py
+++ b/zuul/driver/git/gitconnection.py
@@ -48,7 +48,7 @@
     def addProject(self, project):
         self.projects[project.name] = project
 
-    def getProjectBranches(self, project):
+    def getProjectBranches(self, project, tenant):
         # TODO(jeblair): implement; this will need to handle local or
         # remote git urls.
         raise NotImplemented()
diff --git a/zuul/driver/git/gitsource.py b/zuul/driver/git/gitsource.py
index 61a328e..8d85c08 100644
--- a/zuul/driver/git/gitsource.py
+++ b/zuul/driver/git/gitsource.py
@@ -45,8 +45,8 @@
             self.connection.addProject(p)
         return p
 
-    def getProjectBranches(self, project):
-        return self.connection.getProjectBranches(project)
+    def getProjectBranches(self, project, tenant):
+        return self.connection.getProjectBranches(project, tenant)
 
     def getGitUrl(self, project):
         return self.connection.getGitUrl(project)
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index ba063fb..616e774 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -326,30 +326,31 @@
         self._data = None
 
     def __getitem__(self, key):
-        if self._data is None:
-            self._data = self._init_data()
+        self._init_data()
         return self._data[key]
 
     def __iter__(self):
+        self._init_data()
         return iter(self._data)
 
     def __len__(self):
+        self._init_data()
         return len(self._data)
 
     def _init_data(self):
-        user = self._github.user(self._username)
-        log_rate_limit(self.log, self._github)
-        data = {
-            'username': user.login,
-            'name': user.name,
-            'email': user.email
-        }
-        return data
+        if self._data is None:
+            user = self._github.user(self._username)
+            log_rate_limit(self.log, self._github)
+            self._data = {
+                'username': user.login,
+                'name': user.name,
+                'email': user.email
+            }
 
 
 class GithubConnection(BaseConnection):
     driver_name = 'github'
-    log = logging.getLogger("connection.github")
+    log = logging.getLogger("zuul.GithubConnection")
     payload_path = 'payload'
 
     def __init__(self, driver, connection_name, connection_config):
@@ -363,6 +364,12 @@
             'canonical_hostname', self.server)
         self.source = driver.getSource(self)
 
+        # ssl verification must default to true
+        verify_ssl = self.connection_config.get('verify_ssl', 'true')
+        self.verify_ssl = True
+        if verify_ssl.lower() == 'false':
+            self.verify_ssl = False
+
         self._github = None
         self.app_id = None
         self.app_key = None
@@ -395,7 +402,11 @@
     def _createGithubClient(self):
         if self.server != 'github.com':
             url = 'https://%s/' % self.server
-            github = github3.GitHubEnterprise(url)
+            if not self.verify_ssl:
+                # disabling ssl verification is evil so emit a warning
+                self.log.warning("SSL verification disabled for "
+                                 "GitHub Enterprise")
+            github = github3.GitHubEnterprise(url, verify=self.verify_ssl)
         else:
             github = github3.GitHub()
 
@@ -687,11 +698,21 @@
     def addProject(self, project):
         self.projects[project.name] = project
 
-    def getProjectBranches(self, project):
+    def getProjectBranches(self, project, tenant):
+
+        # Evaluate if unprotected branches should be excluded or not. The first
+        # match wins. The order is project -> tenant (default is false).
+        project_config = tenant.project_configs.get(project.canonical_name)
+        if project_config.exclude_unprotected_branches is not None:
+            exclude_unprotected = project_config.exclude_unprotected_branches
+        else:
+            exclude_unprotected = tenant.exclude_unprotected_branches
+
         github = self.getGithubClient()
         owner, proj = project.name.split('/')
         repository = github.repository(owner, proj)
-        branches = [branch.name for branch in repository.branches()]
+        branches = [branch.name for branch in repository.branches(
+            protected=exclude_unprotected)]
         log_rate_limit(self.log, github)
         return branches
 
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index ea41ccd..3b8f518 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -19,6 +19,7 @@
 from zuul.reporter import BaseReporter
 from zuul.exceptions import MergeFailure
 from zuul.driver.util import scalar_or_list
+from zuul.driver.github.githubsource import GithubSource
 
 
 class GithubReporter(BaseReporter):
@@ -41,6 +42,17 @@
 
     def report(self, item):
         """Report on an event."""
+
+        # If the source is not GithubSource we cannot report anything here.
+        if not isinstance(item.change.project.source, GithubSource):
+            return
+
+        # For supporting several Github connections we also must filter by
+        # the canonical hostname.
+        if item.change.project.source.connection.canonical_hostname != \
+                self.connection.canonical_hostname:
+            return
+
         # order is important for github branch protection.
         # A status should be set before a merge attempt
         if self._commit_status is not None:
@@ -89,9 +101,15 @@
                 url_pattern = sched_config.get('webapp', 'status_url')
         url = item.formatUrlPattern(url_pattern) if url_pattern else ''
 
-        description = ''
-        if item.pipeline.description:
-            description = item.pipeline.description
+        description = '%s status: %s' % (item.pipeline.name,
+                                         self._commit_status)
+
+        if len(description) >= 140:
+            # This pipeline is named with a long name and thus this
+            # desciption would overflow the GitHub limit of 1024 bytes.
+            # Truncate the description. In practice, anything over 140
+            # characters seems to trip the limit.
+            description = 'status: %s' % self._commit_status
 
         self.log.debug(
             'Reporting change %s, params %s, status:\n'
diff --git a/zuul/driver/github/githubsource.py b/zuul/driver/github/githubsource.py
index 1bd280f..1e7e07a 100644
--- a/zuul/driver/github/githubsource.py
+++ b/zuul/driver/github/githubsource.py
@@ -68,8 +68,8 @@
             self.connection.addProject(p)
         return p
 
-    def getProjectBranches(self, project):
-        return self.connection.getProjectBranches(project)
+    def getProjectBranches(self, project, tenant):
+        return self.connection.getProjectBranches(project, tenant)
 
     def getProjectOpenChanges(self, project):
         """Get the open changes for a project."""
diff --git a/zuul/driver/timer/__init__.py b/zuul/driver/timer/__init__.py
index 4489808..69cd508 100644
--- a/zuul/driver/timer/__init__.py
+++ b/zuul/driver/timer/__init__.py
@@ -81,7 +81,7 @@
     def _onTrigger(self, tenant, pipeline_name, timespec):
         for project_name in tenant.layout.project_configs.keys():
             (trusted, project) = tenant.getProject(project_name)
-            for branch in project.source.getProjectBranches(project):
+            for branch in project.source.getProjectBranches(project, tenant):
                 event = TimerTriggerEvent()
                 event.type = 'timer'
                 event.timespec = timespec
diff --git a/zuul/driver/zuul/zuultrigger.py b/zuul/driver/zuul/zuultrigger.py
index 628687e..7757a31 100644
--- a/zuul/driver/zuul/zuultrigger.py
+++ b/zuul/driver/zuul/zuultrigger.py
@@ -43,20 +43,11 @@
 
 
 def getSchema():
-    approval = v.Schema({'username': str,
-                         'email-filter': str,
-                         'email': str,
-                         'older-than': str,
-                         'newer-than': str,
-                         }, extra=v.ALLOW_EXTRA)
-
     zuul_trigger = {
         v.Required('event'):
         scalar_or_list(v.Any('parent-change-enqueued',
                              'project-change-merged')),
         'pipeline': scalar_or_list(str),
-        'require-approval': scalar_or_list(approval),
-        'reject-approval': scalar_or_list(approval),
     }
 
     return zuul_trigger
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index dfd4225..85ae68c 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -152,6 +152,7 @@
         # replace the environment variables below.
         project = dict(
             name=item.change.project.name,
+            short_name=item.change.project.name.split('/')[-1],
             canonical_hostname=item.change.project.canonical_hostname,
             canonical_name=item.change.project.canonical_name)
 
@@ -172,15 +173,18 @@
             zuul_params['change'] = str(item.change.number)
         if hasattr(item.change, 'patchset'):
             zuul_params['patchset'] = str(item.change.patchset)
-        if hasattr(item.change, 'oldrev') and item.change.oldrev:
+        if (hasattr(item.change, 'oldrev') and item.change.oldrev
+            and item.change.oldrev != '0' * 40):
             zuul_params['oldrev'] = item.change.oldrev
-        if hasattr(item.change, 'newrev') and item.change.newrev:
+        if (hasattr(item.change, 'newrev') and item.change.newrev
+            and item.change.newrev != '0' * 40):
             zuul_params['newrev'] = item.change.newrev
         zuul_params['items'] = []
         for i in all_items:
             d = dict()
             d['project'] = dict(
                 name=i.change.project.name,
+                short_name=i.change.project.name.split('/')[-1],
                 canonical_hostname=i.change.project.canonical_hostname,
                 canonical_name=i.change.project.canonical_name)
             if hasattr(i.change, 'number'):
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 0ae3b4c..8d23cb7 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -55,6 +55,88 @@
     pass
 
 
+class DiskAccountant(object):
+    ''' A single thread to periodically run du and monitor a base directory
+
+    Whenever the accountant notices a dir over limit, it will call the
+    given func with an argument of the job directory. That function
+    should be used to remediate the problem, generally by killing the
+    job producing the disk bloat). The function will be called every
+    time the problem is noticed, so it should be handled synchronously
+    to avoid stacking up calls.
+    '''
+    log = logging.getLogger("zuul.ExecutorDiskAccountant")
+
+    def __init__(self, jobs_base, limit, func, cache_dir, usage_func=None):
+        '''
+        :param str jobs_base: absolute path name of dir to be monitored
+        :param int limit: maximum number of MB allowed to be in use in any one
+                          subdir
+        :param callable func: Function to call with overlimit dirs
+        :param str cache_dir: absolute path name of dir to be passed as the
+                              first argument to du. This will ensure du does
+                              not count any hardlinks to files in this
+                              directory against a single job.
+        :param callable usage_func: Optional function to call with usage
+                                    for every dir _NOT_ over limit
+        '''
+        # Don't cross the streams
+        if cache_dir == jobs_base:
+            raise Exception("Cache dir and jobs dir cannot be the same")
+        self.thread = threading.Thread(target=self._run,
+                                       name='executor-diskaccountant')
+        self.thread.daemon = True
+        self._running = False
+        self.jobs_base = jobs_base
+        self.limit = limit
+        self.func = func
+        self.cache_dir = cache_dir
+        self.usage_func = usage_func
+        self.stop_event = threading.Event()
+
+    def _run(self):
+        while self._running:
+            # Walk job base
+            before = time.time()
+            du = subprocess.Popen(
+                ['du', '-m', '--max-depth=1', self.cache_dir, self.jobs_base],
+                stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+            for line in du.stdout:
+                (size, dirname) = line.rstrip().split()
+                dirname = dirname.decode('utf8')
+                if dirname == self.jobs_base or dirname == self.cache_dir:
+                    continue
+                if os.path.dirname(dirname) == self.cache_dir:
+                    continue
+                size = int(size)
+                if size > self.limit:
+                    self.log.info(
+                        "{job} is using {size}MB (limit={limit})"
+                        .format(size=size, job=dirname, limit=self.limit))
+                    self.func(dirname)
+                elif self.usage_func:
+                    self.log.debug(
+                        "{job} is using {size}MB (limit={limit})"
+                        .format(size=size, job=dirname, limit=self.limit))
+                    self.usage_func(dirname, size)
+            du.wait()
+            after = time.time()
+            # Sleep half as long as that took, or 1s, whichever is longer
+            delay_time = max((after - before) / 2, 1.0)
+            self.stop_event.wait(delay_time)
+
+    def start(self):
+        self._running = True
+        self.thread.start()
+
+    def stop(self):
+        self._running = False
+        self.stop_event.set()
+        # We join here to avoid whitelisting the thread -- if it takes more
+        # than 5s to stop in tests, there's a problem.
+        self.thread.join(timeout=5)
+
+
 class Watchdog(object):
     def __init__(self, timeout, function, args):
         self.timeout = timeout
@@ -425,7 +507,6 @@
         # perhaps hostname+pid.
         self.hostname = socket.gethostname()
         self.log_streaming_port = log_streaming_port
-        self.zuul_url = config.get('merger', 'zuul_url')
         self.merger_lock = threading.Lock()
         self.verbose = False
         self.command_map = dict(
@@ -443,6 +524,8 @@
                                       '/var/lib/zuul/executor-git')
         self.default_username = get_default(self.config, 'executor',
                                             'default_username', 'zuul')
+        self.disk_limit_per_job = int(get_default(self.config, 'executor',
+                                                  'disk_limit_per_job', 250))
         self.merge_email = get_default(self.config, 'merger', 'git_user_email')
         self.merge_name = get_default(self.config, 'merger', 'git_user_name')
         execution_wrapper_name = get_default(self.config, 'executor',
@@ -486,6 +569,10 @@
             pass
 
         self.job_workers = {}
+        self.disk_accountant = DiskAccountant(self.jobdir_root,
+                                              self.disk_limit_per_job,
+                                              self.stopJobByJobdir,
+                                              self.merge_root)
 
     def _getMerger(self, root, logger=None):
         if root != self.merge_root:
@@ -530,6 +617,7 @@
         self.executor_thread = threading.Thread(target=self.run_executor)
         self.executor_thread.daemon = True
         self.executor_thread.start()
+        self.disk_accountant.start()
 
     def register(self):
         self.executor_worker.registerFunction("executor:execute")
@@ -537,9 +625,11 @@
                                               self.hostname)
         self.merger_worker.registerFunction("merger:merge")
         self.merger_worker.registerFunction("merger:cat")
+        self.merger_worker.registerFunction("merger:refstate")
 
     def stop(self):
         self.log.debug("Stopping")
+        self.disk_accountant.stop()
         self._running = False
         self._command_running = False
         self.command_socket.stop()
@@ -632,6 +722,9 @@
                     elif job.name == 'merger:merge':
                         self.log.debug("Got merge job: %s" % job.unique)
                         self.merge(job)
+                    elif job.name == 'merger:refstate':
+                        self.log.debug("Got refstate job: %s" % job.unique)
+                        self.refstate(job)
                     else:
                         self.log.error("Unable to handle job %s" % job.name)
                         job.sendWorkFail()
@@ -675,23 +768,30 @@
     def finishJob(self, unique):
         del(self.job_workers[unique])
 
+    def stopJobByJobdir(self, jobdir):
+        unique = os.path.basename(jobdir)
+        self.stopJobByUnique(unique)
+
     def stopJob(self, job):
         try:
             args = json.loads(job.arguments)
             self.log.debug("Stop job with arguments: %s" % (args,))
             unique = args['uuid']
-            job_worker = self.job_workers.get(unique)
-            if not job_worker:
-                self.log.debug("Unable to find worker for job %s" % (unique,))
-                return
-            try:
-                job_worker.stop()
-            except Exception:
-                self.log.exception("Exception sending stop command "
-                                   "to worker:")
+            self.stopJobByUnique(unique)
         finally:
             job.sendWorkComplete()
 
+    def stopJobByUnique(self, unique):
+        job_worker = self.job_workers.get(unique)
+        if not job_worker:
+            self.log.debug("Unable to find worker for job %s" % (unique,))
+            return
+        try:
+            job_worker.stop()
+        except Exception:
+            self.log.exception("Exception sending stop command "
+                               "to worker:")
+
     def cat(self, job):
         args = json.loads(job.arguments)
         task = self.update(args['connection'], args['project'])
@@ -701,8 +801,15 @@
                                          args['branch'], args['files'],
                                          args.get('dirs', []))
         result = dict(updated=True,
-                      files=files,
-                      zuul_url=self.zuul_url)
+                      files=files)
+        job.sendWorkComplete(json.dumps(result))
+
+    def refstate(self, job):
+        args = json.loads(job.arguments)
+        with self.merger_lock:
+            success, repo_state = self.merger.getRepoState(args['items'])
+        result = dict(updated=success,
+                      repo_state=repo_state)
         job.sendWorkComplete(json.dumps(result))
 
     def merge(self, job):
@@ -711,8 +818,7 @@
             ret = self.merger.mergeChanges(args['items'], args.get('files'),
                                            args.get('dirs', []),
                                            args.get('repo_state'))
-        result = dict(merged=(ret is not None),
-                      zuul_url=self.zuul_url)
+        result = dict(merged=(ret is not None))
         if ret is None:
             result['commit'] = result['files'] = result['repo_state'] = None
         else:
@@ -860,6 +966,10 @@
                 # a work complete result, don't run any jobs
                 return
 
+        state_items = [i for i in args['items'] if not i.get('number')]
+        if state_items:
+            merger.setRepoState(state_items, args['repo_state'])
+
         for project in args['projects']:
             repo = repos[project['canonical_name']]
             # If this project is the Zuul project and this is a ref
@@ -1256,6 +1366,7 @@
             hostname=self.executor_server.hostname,
             src_root=self.jobdir.src_root,
             log_root=self.jobdir.log_root,
+            work_root=self.jobdir.work_root,
             result_data_file=self.jobdir.result_data_file)
 
         nodes = self.getHostList(args)
@@ -1370,20 +1481,20 @@
             opt_prefix = 'trusted'
         else:
             opt_prefix = 'untrusted'
-        ro_dirs = get_default(self.executor_server.config, 'executor',
-                              '%s_ro_dirs' % opt_prefix)
-        rw_dirs = get_default(self.executor_server.config, 'executor',
-                              '%s_rw_dirs' % opt_prefix)
-        ro_dirs = ro_dirs.split(":") if ro_dirs else []
-        rw_dirs = rw_dirs.split(":") if rw_dirs else []
+        ro_paths = get_default(self.executor_server.config, 'executor',
+                               '%s_ro_paths' % opt_prefix)
+        rw_paths = get_default(self.executor_server.config, 'executor',
+                               '%s_rw_paths' % opt_prefix)
+        ro_paths = ro_paths.split(":") if ro_paths else []
+        rw_paths = rw_paths.split(":") if rw_paths else []
 
-        ro_dirs.append(self.executor_server.ansible_dir)
+        ro_paths.append(self.executor_server.ansible_dir)
 
         if self.executor_variables_file:
-            ro_dirs.append(self.executor_variables_file)
+            ro_paths.append(self.executor_variables_file)
 
-        self.executor_server.execution_wrapper.setMountsMap(ro_dirs,
-                                                            rw_dirs)
+        self.executor_server.execution_wrapper.setMountsMap(ro_paths,
+                                                            rw_paths)
 
         popen = self.executor_server.execution_wrapper.getPopen(
             work_dir=self.jobdir.root,
@@ -1427,6 +1538,7 @@
             if timeout:
                 watchdog.stop()
                 self.log.debug("Stopped watchdog")
+            self.log.debug("Stopped disk job killer")
 
         with self.proc_lock:
             self.proc = None
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 09b09d7..8282f86 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -13,6 +13,7 @@
 import logging
 
 from zuul import exceptions
+from zuul import model
 
 
 class DynamicChangeQueueContextManager(object):
@@ -483,20 +484,18 @@
     def scheduleMerge(self, item, files=None, dirs=None):
         build_set = item.current_build_set
 
-        if not hasattr(item.change, 'branch'):
-            self.log.debug("Change %s does not have an associated branch, "
-                           "not scheduling a merge job for item %s" %
-                           (item.change, item))
-            build_set.merge_state = build_set.COMPLETE
-            return True
-
         self.log.debug("Scheduling merge for item %s (files: %s, dirs: %s)" %
                        (item, files, dirs))
         build_set = item.current_build_set
         build_set.merge_state = build_set.PENDING
-        self.sched.merger.mergeChanges(build_set.merger_items,
-                                       item.current_build_set, files, dirs,
-                                       precedence=self.pipeline.precedence)
+        if isinstance(item.change, model.Change):
+            self.sched.merger.mergeChanges(build_set.merger_items,
+                                           item.current_build_set, files, dirs,
+                                           precedence=self.pipeline.precedence)
+        else:
+            self.sched.merger.getRepoState(build_set.merger_items,
+                                           item.current_build_set,
+                                           precedence=self.pipeline.precedence)
         return False
 
     def prepareItem(self, item):
@@ -675,13 +674,13 @@
         build_set = event.build_set
         item = build_set.item
         build_set.merge_state = build_set.COMPLETE
-        build_set.zuul_url = event.zuul_url
+        build_set.repo_state = event.repo_state
         if event.merged:
             build_set.commit = event.commit
             build_set.files.setFiles(event.files)
-            build_set.repo_state = event.repo_state
         elif event.updated:
-            build_set.commit = item.change.newrev
+            build_set.commit = (item.change.newrev or
+                                '0000000000000000000000000000000000000000')
         if not build_set.commit:
             self.log.info("Unable to merge change %s" % item.change)
             item.setUnableToMerge()
diff --git a/zuul/manager/independent.py b/zuul/manager/independent.py
index 06c9a01..7b0a9f5 100644
--- a/zuul/manager/independent.py
+++ b/zuul/manager/independent.py
@@ -44,6 +44,9 @@
         if hasattr(change, 'number'):
             history = history or []
             history.append(change.number)
+        else:
+            # Don't enqueue dependencies ahead of a non-change ref.
+            return True
 
         ret = self.checkForChangesNeededBy(change, change_queue)
         if ret in [True, False]:
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index e354d5d..5191a44 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -116,6 +116,11 @@
                     repo_state=repo_state)
         self.submitJob('merger:merge', data, build_set, precedence)
 
+    def getRepoState(self, items, build_set,
+                     precedence=zuul.model.PRECEDENCE_NORMAL):
+        data = dict(items=items)
+        self.submitJob('merger:refstate', data, build_set, precedence)
+
     def getFiles(self, connection_name, project_name, branch, files, dirs=[],
                  precedence=zuul.model.PRECEDENCE_HIGH):
         data = dict(connection=connection_name,
@@ -128,7 +133,6 @@
 
     def onBuildCompleted(self, job):
         data = getJobData(job)
-        zuul_url = data.get('zuul_url')
         merged = data.get('merged', False)
         updated = data.get('updated', False)
         commit = data.get('commit')
@@ -140,7 +144,7 @@
                       (job, merged, updated, commit))
         job.setComplete()
         if job.build_set:
-            self.sched.onMergeCompleted(job.build_set, zuul_url,
+            self.sched.onMergeCompleted(job.build_set,
                                         merged, updated, commit, files,
                                         repo_state)
         # The test suite expects the job to be removed from the
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index c5d1f2a..ed98696 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -20,6 +20,8 @@
 
 import zuul.model
 
+NULL_REF = '0000000000000000000000000000000000000000'
+
 
 def reset_repo_to_head(repo):
     # This lets us reset the repo even if there is a file in the root
@@ -178,8 +180,13 @@
             self.setRef(path, hexsha, repo)
             unseen.discard(path)
         for path in unseen:
-            self.log.debug("Delete reference %s", path)
-            git.refs.SymbolicReference.delete(repo, ref.path)
+            self.deleteRef(path, repo)
+
+    def deleteRef(self, path, repo=None):
+        if repo is None:
+            repo = self.createRepoObject()
+        self.log.debug("Delete reference %s", path)
+        git.refs.SymbolicReference.delete(repo, path)
 
     def checkout(self, ref):
         repo = self.createRepoObject()
@@ -369,6 +376,16 @@
                     recent[key] = ref.object
             project[ref.path] = ref.object.hexsha
 
+    def _alterRepoState(self, connection_name, project_name,
+                        repo_state, path, hexsha):
+        projects = repo_state.setdefault(connection_name, {})
+        project = projects.setdefault(project_name, {})
+        if hexsha == NULL_REF:
+            if path in project:
+                del project[path]
+        else:
+            project[path] = hexsha
+
     def _restoreRepoState(self, connection_name, project_name, repo,
                           repo_state):
         projects = repo_state.get(connection_name, {})
@@ -470,12 +487,8 @@
         if repo_state is None:
             repo_state = {}
         for item in items:
-            if item.get("number") and item.get("patchset"):
-                self.log.debug("Merging for change %s,%s." %
-                               (item["number"], item["patchset"]))
-            elif item.get("newrev") and item.get("oldrev"):
-                self.log.debug("Merging for rev %s with oldrev %s." %
-                               (item["newrev"], item["oldrev"]))
+            self.log.debug("Merging for change %s,%s" %
+                           (item["number"], item["patchset"]))
             commit = self._mergeItem(item, recent, repo_state)
             if not commit:
                 return None
@@ -492,6 +505,49 @@
             ret_recent[k] = v.hexsha
         return commit.hexsha, read_files, repo_state, ret_recent
 
+    def setRepoState(self, items, repo_state):
+        # Sets the repo state for the items
+        seen = set()
+        for item in items:
+            repo = self.getRepo(item['connection'], item['project'])
+            key = (item['connection'], item['project'], item['branch'])
+
+            if key in seen:
+                continue
+
+            repo.reset()
+            self._restoreRepoState(item['connection'], item['project'], repo,
+                                   repo_state)
+
+    def getRepoState(self, items):
+        # Gets the repo state for items.  Generally this will be
+        # called in any non-change pipeline.  We will return the repo
+        # state for each item, but manipulated with any information in
+        # the item (eg, if it creates a ref, that will be in the repo
+        # state regardless of the actual state).
+        seen = set()
+        recent = {}
+        repo_state = {}
+        for item in items:
+            repo = self.getRepo(item['connection'], item['project'])
+            key = (item['connection'], item['project'], item['branch'])
+            if key not in seen:
+                try:
+                    repo.reset()
+                except Exception:
+                    self.log.exception("Unable to reset repo %s" % repo)
+                    return (False, {})
+
+                self._saveRepoState(item['connection'], item['project'], repo,
+                                    repo_state, recent)
+
+            if item.get('newrev'):
+                # This is a ref update rather than a branch tip, so make sure
+                # our returned state includes this change.
+                self._alterRepoState(item['connection'], item['project'],
+                                     repo_state, item['ref'], item['newrev'])
+        return (True, repo_state)
+
     def getFiles(self, connection_name, project_name, branch, files, dirs=[]):
         repo = self.getRepo(connection_name, project_name)
         return repo.getFiles(files, dirs, branch=branch)
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index 555a4bc..fc599c1 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -28,7 +28,6 @@
 
     def __init__(self, config, connections={}):
         self.config = config
-        self.zuul_url = config.get('merger', 'zuul_url')
 
         merge_root = get_default(self.config, 'merger', 'git_dir',
                                  '/var/lib/zuul/merger-git')
@@ -59,6 +58,7 @@
     def register(self):
         self.worker.registerFunction("merger:merge")
         self.worker.registerFunction("merger:cat")
+        self.worker.registerFunction("merger:refstate")
 
     def stop(self):
         self.log.debug("Stopping")
@@ -81,6 +81,9 @@
                     elif job.name == 'merger:cat':
                         self.log.debug("Got cat job: %s" % job.unique)
                         self.cat(job)
+                    elif job.name == 'merger:refstate':
+                        self.log.debug("Got refstate job: %s" % job.unique)
+                        self.refstate(job)
                     else:
                         self.log.error("Unable to handle job %s" % job.name)
                         job.sendWorkFail()
@@ -97,8 +100,7 @@
         ret = self.merger.mergeChanges(
             args['items'], args.get('files'),
             args.get('dirs'), args.get('repo_state'))
-        result = dict(merged=(ret is not None),
-                      zuul_url=self.zuul_url)
+        result = dict(merged=(ret is not None))
         if ret is None:
             result['commit'] = result['files'] = result['repo_state'] = None
         else:
@@ -106,6 +108,14 @@
              recent) = ret
         job.sendWorkComplete(json.dumps(result))
 
+    def refstate(self, job):
+        args = json.loads(job.arguments)
+
+        success, repo_state = self.merger.getItemRepoState(args['items'])
+        result = dict(updated=success,
+                      repo_state=repo_state)
+        job.sendWorkComplete(json.dumps(result))
+
     def cat(self, job):
         args = json.loads(job.arguments)
         self.merger.updateRepo(args['connection'], args['project'])
@@ -113,6 +123,5 @@
                                      args['branch'], args['files'],
                                      args.get('dirs'))
         result = dict(updated=True,
-                      files=files,
-                      zuul_url=self.zuul_url)
+                      files=files)
         job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/model.py b/zuul/model.py
index ed50164..26a7963 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -356,6 +356,8 @@
         self.label = label
         self.id = None
         self.lock = None
+        self.hold_job = None
+        self.comment = None
         # Attributes from Nodepool
         self._state = 'unknown'
         self.state_time = time.time()
@@ -396,6 +398,8 @@
     def toDict(self):
         d = {}
         d['state'] = self.state
+        d['hold_job'] = self.hold_job
+        d['comment'] = self.comment
         for k in self._keys:
             d[k] = getattr(self, k)
         return d
@@ -497,6 +501,9 @@
             name = ''
         return '<NodeSet %s%s%s>' % (name, self.nodes, self.groups)
 
+    def __len__(self):
+        return len(self.nodes)
+
 
 class NodeRequest(object):
     """A request for a set of nodes."""
@@ -1233,7 +1240,6 @@
         self.previous_build_set = None
         self.uuid = None
         self.commit = None
-        self.zuul_url = None
         self.dependent_items = None
         self.merger_items = None
         self.unable_to_merge = False
@@ -2085,6 +2091,10 @@
         self.load_classes = set()
         self.shadow_projects = set()
 
+        # The tenant's default setting of exclude_unprotected_branches will
+        # be overridden by this one if not None.
+        self.exclude_unprotected_branches = None
+
 
 class ProjectConfig(object):
     # Represents a project cofiguration
@@ -2444,6 +2454,8 @@
 class Tenant(object):
     def __init__(self, name):
         self.name = name
+        self.max_nodes_per_job = 5
+        self.exclude_unprotected_branches = False
         self.layout = None
         # The unparsed configuration from the main zuul config for
         # this tenant.
diff --git a/zuul/nodepool.py b/zuul/nodepool.py
index 8f6489c..0696c60 100644
--- a/zuul/nodepool.py
+++ b/zuul/nodepool.py
@@ -44,6 +44,35 @@
             except Exception:
                 self.log.exception("Error deleting node request:")
 
+    def holdNodeSet(self, nodeset, autohold_key):
+        '''
+        If requested, perform a hold on the given set of nodes.
+
+        :param NodeSet nodeset: The object containing the set of nodes to hold.
+        :param set autohold_key: A set with the tenant/project/job names
+            associated with the given NodeSet.
+        '''
+        if autohold_key not in self.sched.autohold_requests:
+            return
+
+        (hold_iterations, reason) = self.sched.autohold_requests[autohold_key]
+        nodes = nodeset.getNodes()
+
+        for node in nodes:
+            node.state = model.STATE_HOLD
+            node.hold_job = " ".join(autohold_key)
+            node.comment = reason
+            self.sched.zk.storeNode(node)
+
+        # We remove the autohold when the number of nodes in hold
+        # is equal to or greater than (run iteration count can be
+        # altered) the number of nodes used in a single job run
+        # times the number of run iterations requested.
+        nodes_in_hold = self.sched.zk.heldNodeCount(autohold_key)
+        if nodes_in_hold >= len(nodes) * hold_iterations:
+            self.log.debug("Removing autohold for %s", autohold_key)
+            del self.sched.autohold_requests[autohold_key]
+
     def useNodeSet(self, nodeset):
         self.log.info("Setting nodeset %s in use" % (nodeset,))
         for node in nodeset.getNodes():
diff --git a/zuul/rpcclient.py b/zuul/rpcclient.py
index fd3517f..1a0a084 100644
--- a/zuul/rpcclient.py
+++ b/zuul/rpcclient.py
@@ -48,6 +48,14 @@
         self.log.debug("Job complete, success: %s" % (not job.failure))
         return job
 
+    def autohold(self, tenant, project, job, reason, count):
+        data = {'tenant': tenant,
+                'project': project,
+                'job': job,
+                'reason': reason,
+                'count': count}
+        return not self.submitJob('zuul:autohold', data).failure
+
     def enqueue(self, tenant, pipeline, project, trigger, change):
         data = {'tenant': tenant,
                 'pipeline': pipeline,
diff --git a/zuul/rpclistener.py b/zuul/rpclistener.py
index 6543c91..52a7e51 100644
--- a/zuul/rpclistener.py
+++ b/zuul/rpclistener.py
@@ -49,6 +49,7 @@
         self.thread.start()
 
     def register(self):
+        self.worker.registerFunction("zuul:autohold")
         self.worker.registerFunction("zuul:enqueue")
         self.worker.registerFunction("zuul:enqueue_ref")
         self.worker.registerFunction("zuul:promote")
@@ -89,6 +90,39 @@
             except Exception:
                 self.log.exception("Exception while getting job")
 
+    def handle_autohold(self, job):
+        args = json.loads(job.arguments)
+        params = {}
+
+        tenant = self.sched.abide.tenants.get(args['tenant'])
+        if tenant:
+            params['tenant_name'] = args['tenant']
+        else:
+            error = "Invalid tenant: %s" % args['tenant']
+            job.sendWorkException(error.encode('utf8'))
+            return
+
+        (trusted, project) = tenant.getProject(args['project'])
+        if project:
+            params['project_name'] = project.canonical_name
+        else:
+            error = "Invalid project: %s" % args['project']
+            job.sendWorkException(error.encode('utf8'))
+            return
+
+        params['job_name'] = args['job']
+        params['reason'] = args['reason']
+
+        if args['count'] < 0:
+            error = "Invalid count: %d" % args['count']
+            job.sendWorkException(error.encode('utf8'))
+            return
+
+        params['count'] = args['count']
+
+        self.sched.autohold(**params)
+        job.sendWorkComplete()
+
     def _common_enqueue(self, job):
         args = json.loads(job.arguments)
         event = model.TriggerEvent()
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 2217b0b..a64d9e0 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -136,17 +136,15 @@
     """A remote merge operation has completed
 
     :arg BuildSet build_set: The build_set which is ready.
-    :arg str zuul_url: The URL of the Zuul Merger.
     :arg bool merged: Whether the merge succeeded (changes with refs).
     :arg bool updated: Whether the repo was updated (changes without refs).
     :arg str commit: The SHA of the merged commit (changes with refs).
     :arg dict repo_state: The starting repo state before the merge.
     """
 
-    def __init__(self, build_set, zuul_url, merged, updated, commit,
+    def __init__(self, build_set, merged, updated, commit,
                  files, repo_state):
         self.build_set = build_set
-        self.zuul_url = zuul_url
         self.merged = merged
         self.updated = updated
         self.commit = commit
@@ -231,6 +229,7 @@
         self.zuul_version = zuul_version.version_info.release_string()
         self.last_reconfigured = None
         self.tenant_last_reconfigured = {}
+        self.autohold_requests = {}
 
     def stop(self):
         self._stopped = True
@@ -316,11 +315,11 @@
         self.wake_event.set()
         self.log.debug("Done adding complete event for build: %s" % build)
 
-    def onMergeCompleted(self, build_set, zuul_url, merged, updated,
+    def onMergeCompleted(self, build_set, merged, updated,
                          commit, files, repo_state):
         self.log.debug("Adding merge complete event for build set: %s" %
                        build_set)
-        event = MergeCompletedEvent(build_set, zuul_url, merged,
+        event = MergeCompletedEvent(build_set, merged,
                                     updated, commit, files, repo_state)
         self.result_event_queue.put(event)
         self.wake_event.set()
@@ -349,6 +348,15 @@
         self.last_reconfigured = int(time.time())
         # TODOv3(jeblair): reconfigure time should be per-tenant
 
+    def autohold(self, tenant_name, project_name, job_name, reason, count):
+        key = (tenant_name, project_name, job_name)
+        if count == 0 and key in self.autohold_requests:
+            self.log.debug("Removing autohold for %s", key)
+            del self.autohold_requests[key]
+        else:
+            self.log.debug("Autohold requested for %s", key)
+            self.autohold_requests[key] = (count, reason)
+
     def promote(self, tenant_name, pipeline_name, change_ids):
         event = PromoteEvent(tenant_name, pipeline_name, change_ids)
         self.management_event_queue.put(event)
@@ -828,6 +836,16 @@
         # the nodes to nodepool.
         try:
             nodeset = build.build_set.getJobNodeSet(build.job.name)
+            autohold_key = (build.pipeline.layout.tenant.name,
+                            build.build_set.item.change.project.canonical_name,
+                            build.job.name)
+
+            try:
+                self.nodepool.holdNodeSet(nodeset, autohold_key)
+            except Exception:
+                self.log.exception("Unable to process autohold for %s",
+                                   autohold_key)
+
             self.nodepool.returnNodeSet(nodeset)
         except Exception:
             self.log.exception("Unable to return nodeset %s" % (nodeset,))
diff --git a/zuul/source/__init__.py b/zuul/source/__init__.py
index b37aeb4..0396aff 100644
--- a/zuul/source/__init__.py
+++ b/zuul/source/__init__.py
@@ -64,7 +64,7 @@
         """Get a project."""
 
     @abc.abstractmethod
-    def getProjectBranches(self, project):
+    def getProjectBranches(self, project, tenant):
         """Get branches for a project"""
 
     @abc.abstractmethod
diff --git a/zuul/sphinx/__init__.py b/zuul/sphinx/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/sphinx/__init__.py
diff --git a/zuul/sphinx/zuul.py b/zuul/sphinx/zuul.py
new file mode 100644
index 0000000..7946074
--- /dev/null
+++ b/zuul/sphinx/zuul.py
@@ -0,0 +1,206 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sphinx import addnodes
+from sphinx.domains import Domain
+from sphinx.roles import XRefRole
+from sphinx.directives import ObjectDescription
+from sphinx.util.nodes import make_refnode
+from docutils import nodes
+
+from typing import Dict # noqa
+
+
+class ZuulConfigObject(ObjectDescription):
+    object_names = {
+        'attr': 'attribute',
+        'var': 'variable',
+    }
+
+    def get_path(self):
+        return self.env.ref_context.get('zuul:attr_path', [])
+
+    def get_display_path(self):
+        return self.env.ref_context.get('zuul:display_attr_path', [])
+
+    @property
+    def parent_pathname(self):
+        return '.'.join(self.get_display_path())
+
+    @property
+    def full_pathname(self):
+        name = self.names[-1].lower()
+        return '.'.join(self.get_path() + [name])
+
+    def add_target_and_index(self, name, sig, signode):
+        targetname = self.objtype + '-' + self.full_pathname
+        if targetname not in self.state.document.ids:
+            signode['names'].append(targetname)
+            signode['ids'].append(targetname)
+            signode['first'] = (not self.names)
+            self.state.document.note_explicit_target(signode)
+            objects = self.env.domaindata['zuul']['objects']
+            if targetname in objects:
+                self.state_machine.reporter.warning(
+                    'duplicate object description of %s, ' % targetname +
+                    'other instance in ' +
+                    self.env.doc2path(objects[targetname][0]) +
+                    ', use :noindex: for one of them',
+                    line=self.lineno)
+            objects[targetname] = (self.env.docname, self.objtype)
+
+        objname = self.object_names.get(self.objtype, self.objtype)
+        if self.parent_pathname:
+            indextext = '%s (%s of %s)' % (name, objname,
+                                           self.parent_pathname)
+        else:
+            indextext = '%s (%s)' % (name, objname)
+        self.indexnode['entries'].append(('single', indextext,
+                                          targetname, '', None))
+
+
+class ZuulAttrDirective(ZuulConfigObject):
+    has_content = True
+
+    option_spec = {
+        'required': lambda x: x,
+        'default': lambda x: x,
+    }
+
+    def before_content(self):
+        path = self.env.ref_context.setdefault('zuul:attr_path', [])
+        path.append(self.names[-1])
+        path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
+        path.append(self.names[-1])
+
+    def after_content(self):
+        path = self.env.ref_context.get('zuul:attr_path')
+        if path:
+            path.pop()
+        path = self.env.ref_context.get('zuul:display_attr_path')
+        if path:
+            path.pop()
+
+    def handle_signature(self, sig, signode):
+        path = self.get_display_path()
+        signode['is_multiline'] = True
+        line = addnodes.desc_signature_line()
+        line['add_permalink'] = True
+        for x in path:
+            line += addnodes.desc_addname(x + '.', x + '.')
+        line += addnodes.desc_name(sig, sig)
+        if 'required' in self.options:
+            line += addnodes.desc_annotation(' (required)', ' (required)')
+        signode += line
+        if 'default' in self.options:
+            line = addnodes.desc_signature_line()
+            line += addnodes.desc_type('Default: ', 'Default: ')
+            line += nodes.literal(self.options['default'],
+                                  self.options['default'])
+            signode += line
+        return sig
+
+
+class ZuulValueDirective(ZuulConfigObject):
+    has_content = True
+
+    def handle_signature(self, sig, signode):
+        signode += addnodes.desc_name(sig, sig)
+        return sig
+
+
+class ZuulVarDirective(ZuulConfigObject):
+    has_content = True
+
+    option_spec = {
+        'type': lambda x: x,
+        'hidden': lambda x: x,
+    }
+
+    type_map = {
+        'list': '[]',
+        'dict': '{}',
+    }
+
+    def get_type_str(self):
+        if 'type' in self.options:
+            return self.type_map[self.options['type']]
+        return ''
+
+    def before_content(self):
+        path = self.env.ref_context.setdefault('zuul:attr_path', [])
+        element = self.names[-1]
+        path.append(element)
+        path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
+        element = self.names[-1] + self.get_type_str()
+        path.append(element)
+
+    def after_content(self):
+        path = self.env.ref_context.get('zuul:attr_path')
+        if path:
+            path.pop()
+        path = self.env.ref_context.get('zuul:display_attr_path')
+        if path:
+            path.pop()
+
+    def handle_signature(self, sig, signode):
+        if 'hidden' in self.options:
+            return sig
+        path = self.get_display_path()
+        for x in path:
+            signode += addnodes.desc_addname(x + '.', x + '.')
+        signode += addnodes.desc_name(sig, sig)
+        return sig
+
+
+class ZuulDomain(Domain):
+    name = 'zuul'
+    label = 'Zuul'
+
+    directives = {
+        'attr': ZuulAttrDirective,
+        'value': ZuulValueDirective,
+        'var': ZuulVarDirective,
+    }
+
+    roles = {
+        'attr': XRefRole(innernodeclass=nodes.inline,  # type: ignore
+                         warn_dangling=True),
+        'value': XRefRole(innernodeclass=nodes.inline,  # type: ignore
+                          warn_dangling=True),
+        'var': XRefRole(innernodeclass=nodes.inline,  # type: ignore
+                        warn_dangling=True),
+    }
+
+    initial_data = {
+        'objects': {},
+    }  # type: Dict[str, Dict]
+
+    def resolve_xref(self, env, fromdocname, builder, type, target,
+                     node, contnode):
+        objects = self.data['objects']
+        name = type + '-' + target
+        obj = objects.get(name)
+        if obj:
+            return make_refnode(builder, fromdocname, obj[0], name,
+                                contnode, name)
+
+    def clear_doc(self, docname):
+        for fullname, (fn, _l) in list(self.data['objects'].items()):
+            if fn == docname:
+                del self.data['objects'][fullname]
+
+
+def setup(app):
+    app.add_domain(ZuulDomain)
diff --git a/zuul/webapp.py b/zuul/webapp.py
index e4feaa0..b9129b8 100644
--- a/zuul/webapp.py
+++ b/zuul/webapp.py
@@ -133,11 +133,15 @@
                 return handler(path, '', request)
 
         # Now try with a tenant_name stripped
-        tenant_name = request.path.split('/')[1]
-        path = request.path.replace('/' + tenant_name, '')
+        x, tenant_name, path = request.path.split('/', 2)
+        path = '/' + path
         # Handle keys
         if path.startswith('/keys'):
-            return self._handle_keys(request, path)
+            try:
+                return self._handle_keys(request, path)
+            except Exception as e:
+                self.log.exception("Issue with _handle_keys")
+                raise
         for path_re, handler in self.routes.values():
             if path_re.match(path):
                 return handler(path, tenant_name, request)
diff --git a/zuul/zk.py b/zuul/zk.py
index 31b85ea..5ea4e56 100644
--- a/zuul/zk.py
+++ b/zuul/zk.py
@@ -15,19 +15,12 @@
 import json
 import logging
 import time
+
 from kazoo.client import KazooClient, KazooState
 from kazoo import exceptions as kze
 from kazoo.recipe.lock import Lock
 
-# States:
-# We are building this node but it is not ready for use.
-BUILDING = 'building'
-# The node is ready for use.
-READY = 'ready'
-# The node should be deleted.
-DELETING = 'deleting'
-
-STATES = set([BUILDING, READY, DELETING])
+import zuul.model
 
 
 class LockException(Exception):
@@ -246,3 +239,25 @@
             raise LockException("Node %s does not hold a lock" % (node,))
         node.lock.release()
         node.lock = None
+
+    def heldNodeCount(self, autohold_key):
+        '''
+        Count the number of nodes being held for the given tenant/project/job.
+
+        :param set autohold_key: A set with the tenant/project/job names.
+        '''
+        identifier = " ".join(autohold_key)
+        try:
+            nodes = self.client.get_children(self.NODE_ROOT)
+        except kze.NoNodeError:
+            return 0
+
+        count = 0
+        for nodeid in nodes:
+            node_path = '%s/%s' % (self.NODE_ROOT, nodeid)
+            node_data, node_stat = self.client.get(node_path)
+            node_data = self._strToDict(node_data)
+            if (node_data['state'] == zuul.model.STATE_HOLD and
+                    node_data.get('hold_job') == identifier):
+                count += 1
+        return count