Merge "Add required reason for hold" into feature/zuulv3
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index c11b5cb..e30966b 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -354,6 +354,33 @@
 
      variables=/etc/zuul/variables.yaml
 
+**disk_limit_per_job**
+  This integer is the maximum number of megabytes that any one job is
+  allowed to consume on disk while it is running. If a job's scratch
+  space has more than this much space consumed, it will be aborted::
+
+      disk_limit_per_job=100
+
+**trusted_ro_paths**
+
+  List of paths, separated by ':' to read-only bind mount into trusted
+  bubblewrap contexts.
+
+**trusted_rw_paths**
+
+  List of paths, separated by ':' to read-write bind mount into trusted
+  bubblewrap contexts.
+
+**untrusted_ro_paths**
+
+  List of paths, separated by ':' to read-only bind mount into untrusted
+  bubblewrap contexts.
+
+**untrusted_rw_paths**
+
+  List of paths, separated by ':' to read-write bind mount into untrusted
+  bubblewrap contexts.
+
 merger
 """"""
 
diff --git a/doc/source/admin/drivers/gerrit.rst b/doc/source/admin/drivers/gerrit.rst
index 296e47f..454f8d0 100644
--- a/doc/source/admin/drivers/gerrit.rst
+++ b/doc/source/admin/drivers/gerrit.rst
@@ -145,9 +145,9 @@
   This may be used for any event.  It requires that a certain kind of
   approval be present for the current patchset of the change (the
   approval could be added by the event in question).  It follows the
-  same syntax as the :ref:`"approval" pipeline requirement
-  <gerrit-pipeline-require-approval>`. For each specified criteria
-  there must exist a matching approval.
+  same syntax as :attr:`pipeline.require.<gerrit
+  source>.approval`. For each specified criteria there must exist a
+  matching approval.
 
 **reject-approval**
   This takes a list of approvals in the same format as
@@ -174,13 +174,12 @@
 Requirements Configuration
 --------------------------
 
-As described in :ref:`pipeline.require <pipeline-require>` and
-:ref:`pipeline.reject <pipeline-reject>`, pipelines may specify that
-items meet certain conditions in order to be enqueued into the
-pipeline.  These conditions vary according to the source of the
-project in question.  To supply requirements for changes from a Gerrit
-source named *my-gerrit*, create a configuration such as the
-following::
+As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
+pipelines may specify that items meet certain conditions in order to
+be enqueued into the pipeline.  These conditions vary according to the
+source of the project in question.  To supply requirements for changes
+from a Gerrit source named *my-gerrit*, create a configuration such as
+the following::
 
   pipeline:
     require:
@@ -192,14 +191,12 @@
 named *my-gerrit* must have a Code Review vote of +2 in order to be
 enqueued into the pipeline.
 
-.. zuul:configobject:: pipeline.require.<source>
+.. attr:: pipeline.require.<gerrit source>
 
    The dictionary passed to the Gerrit pipeline `require` attribute
    supports the following attributes:
 
-   .. _gerrit-pipeline-require-approval:
-
-   .. zuul:attr:: approval
+   .. attr:: approval
 
       This requires that a certain kind of approval be present for the
       current patchset of the change (the approval could be added by
@@ -207,24 +204,24 @@
       which are optional and are combined together so that there must
       be an approval matching all specified requirements.
 
-      .. zuul:attr:: username
+      .. attr:: username
 
          If present, an approval from this username is required.  It is
          treated as a regular expression.
 
-      .. zuul:attr:: email
+      .. attr:: email
 
          If present, an approval with this email address is required.  It is
          treated as a regular expression.
 
-      .. zuul:attr:: older-than
+      .. attr:: older-than
 
          If present, the approval must be older than this amount of time
          to match.  Provide a time interval as a number with a suffix of
          "w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
          (seconds).  Example ``48h`` or ``2d``.
 
-      .. zuul:attr:: newer-than
+      .. attr:: newer-than
 
          If present, the approval must be newer than this amount
          of time to match.  Same format as "older-than".
@@ -235,33 +232,33 @@
       may either be a single value or a list: ``Verified: [1, 2]``
       would match either a +1 or +2 vote.
 
-   .. zuul:attr:: open
+   .. attr:: open
 
       A boolean value (``true`` or ``false``) that indicates whether
       the change must be open or closed in order to be enqueued.
 
-   .. zuul:attr:: current-patchset
+   .. attr:: current-patchset
 
       A boolean value (``true`` or ``false``) that indicates whether the
       change must be the current patchset in order to be enqueued.
 
-   .. zuul:attr:: status
+   .. attr:: status
 
       A string value that corresponds with the status of the change
       reported by the trigger.
 
-.. zuul:configobject:: pipeline.reject.<source>
+.. attr:: pipeline.reject.<gerrit source>
 
    The `reject` attribute is the mirror of the `require` attribute.  It
    also accepts a dictionary under the connection name.  This
    dictionary supports the following attributes:
 
-   .. zuul:attr:: approval
+   .. attr:: approval
 
       This takes a list of approvals. If an approval matches the
       provided criteria the change can not be entered into the
-      pipeline. It follows the same syntax as the :ref:`approval
-      pipeline requirement above <gerrit-pipeline-require-approval>`.
+      pipeline. It follows the same syntax as
+      :attr:`pipeline.require.<gerrit source>.approval`.
 
       Example to reject a change with any negative vote::
 
diff --git a/doc/source/admin/drivers/github.rst b/doc/source/admin/drivers/github.rst
index 6619322..ed577a5 100644
--- a/doc/source/admin/drivers/github.rst
+++ b/doc/source/admin/drivers/github.rst
@@ -198,13 +198,12 @@
 Requirements Configuration
 --------------------------
 
-As described in :ref:`pipeline.require <pipeline-require>` and
-:ref:`pipeline.reject <pipeline-reject>`, pipelines may specify that
-items meet certain conditions in order to be enqueued into the
-pipeline.  These conditions vary according to the source of the
-project in question.  To supply requirements for changes from a GitHub
-source named *my-github*, create a congfiguration such as the
-following::
+As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
+pipelines may specify that items meet certain conditions in order to
+be enqueued into the pipeline.  These conditions vary according to the
+source of the project in question.  To supply requirements for changes
+from a GitHub source named *my-github*, create a congfiguration such
+as the following::
 
   pipeline:
     require:
@@ -216,14 +215,12 @@
 named *my-github* must have an approved code review in order to be
 enqueued into the pipeline.
 
-.. zuul:configobject:: pipeline.require.<source>
+.. attr:: pipeline.require.<github source>
 
    The dictionary passed to the GitHub pipeline `require` attribute
    supports the following attributes:
 
-   .. _github-pipeline-require-review:
-
-   .. zuul:attr:: review
+   .. attr:: review
 
       This requires that a certain kind of code review be present for
       the pull request (it could be added by the event in question).
@@ -231,46 +228,46 @@
       are combined together so that there must be a code review
       matching all specified requirements.
 
-      .. zuul:attr:: username
+      .. attr:: username
 
          If present, a code review from this username is required.  It
          is treated as a regular expression.
 
-      .. zuul:attr:: email
+      .. attr:: email
 
          If present, a code review with this email address is
          required.  It is treated as a regular expression.
 
-      .. zuul:attr:: older-than
+      .. attr:: older-than
 
          If present, the code review must be older than this amount of
          time to match.  Provide a time interval as a number with a
          suffix of "w" (weeks), "d" (days), "h" (hours), "m"
          (minutes), "s" (seconds).  Example ``48h`` or ``2d``.
 
-      .. zuul:attr:: newer-than
+      .. attr:: newer-than
 
          If present, the code review must be newer than this amount of
          time to match.  Same format as "older-than".
 
-      .. zuul:attr:: type
+      .. attr:: type
 
          If present, the code review must match this type (or types).
 
          .. TODO: what types are valid?
 
-      .. zuul:attr:: permission
+      .. attr:: permission
 
          If present, the author of the code review must have this
          permission (or permissions).  The available values are
          ``read``, ``write``, and ``admin``.
 
-   .. zuul:attr:: open
+   .. attr:: open
 
       A boolean value (``true`` or ``false``) that indicates whether
       the change must be open or closed in order to be enqueued.
 
-   .. zuul:attr:: current-patchset
+   .. attr:: current-patchset
 
       A boolean value (``true`` or ``false``) that indicates whether
       the item must be associated with the latest commit in the pull
@@ -279,26 +276,26 @@
       .. TODO: this could probably be expanded upon -- under what
          circumstances might this happen with github
 
-   .. zuul:attr:: status
+   .. attr:: status
 
       A string value that corresponds with the status of the pull
       request.  The syntax is ``user:status:value``.
 
-   .. zuul:attr:: label
+   .. attr:: label
 
       A string value indicating that the pull request must have the
       indicated label (or labels).
 
 
-.. zuul:configobject:: pipeline.reject.<source>
+.. attr:: pipeline.reject.<github source>
 
    The `reject` attribute is the mirror of the `require` attribute.  It
    also accepts a dictionary under the connection name.  This
    dictionary supports the following attributes:
 
-   .. zuul:attr:: review
+   .. attr:: review
 
       This takes a list of code reviews.  If a code review matches the
       provided criteria the pull request can not be entered into the
-      pipeline.  It follows the same syntax as the :ref:`review
-      pipeline requirement above <github-pipeline-require-review>`.
+      pipeline.  It follows the same syntax as
+      :attr:`pipeline.require.<github source>.review`
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 80cde65..7c0d587 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -34,6 +34,8 @@
 #extensions = ['sphinx.ext.intersphinx']
 #intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
 
+primary_domain = 'zuul'
+
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
 
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 2ef581a..3eccbb5 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -105,42 +105,42 @@
 Here is an example *check* pipeline, which runs whenever a new
 patchset is created in Gerrit.  If the associated jobs all report
 success, the pipeline reports back to Gerrit with a *Verified* vote of
-+1, or if at least one of them fails, a -1::
++1, or if at least one of them fails, a -1:
 
-  - pipeline:
-      name: check
-      manager: independent
-      trigger:
-        my_gerrit:
-          - event: patchset-created
-      success:
-        my_gerrit:
-          Verified: 1
-      failure:
-        my_gerrit
-          Verified: -1
+.. code-block:: yaml
+
+   - pipeline:
+       name: check
+       manager: independent
+       trigger:
+         my_gerrit:
+           - event: patchset-created
+       success:
+         my_gerrit:
+           Verified: 1
+       failure:
+         my_gerrit
+           Verified: -1
 
 .. TODO: See TODO for more annotated examples of common pipeline configurations.
 
-.. zuul:configobject:: pipeline
+.. attr:: pipeline
 
    The attributes available on a pipeline are as follows (all are
    optional unless otherwise specified):
 
-   .. zuul:attr:: name
+   .. attr:: name
       :required:
 
       This is used later in the project definition to indicate what jobs
       should be run for events in the pipeline.
 
-   .. zuul:attr:: manager
+   .. attr:: manager
       :required:
 
       There are currently two schemes for managing pipelines:
 
-      .. _independent_pipeline_manager:
-
-      .. zuul:value:: independent
+      .. value:: independent
 
          Every event in this pipeline should be treated as independent
          of other events in the pipeline.  This is appropriate when
@@ -158,9 +158,7 @@
          pipeline. In that case, the changes have already merged, so
          the results can not affect any other events in the pipeline.
 
-      .. _dependent_pipeline_manager:
-
-      .. zuul:value:: dependent
+      .. value:: dependent
 
          The dependent pipeline manager is designed for gating.  It
          ensures that every change is tested exactly as it is going to
@@ -184,7 +182,7 @@
          For more detail on the theory and operation of Zuul's
          dependent pipeline manager, see: :doc:`gating`.
 
-   .. zuul:attr:: allow-secrets
+   .. attr:: allow-secrets
 
       This is a boolean which can be used to prevent jobs which
       require secrets from running in this pipeline.  Some pipelines
@@ -196,34 +194,34 @@
 
       For more information, see :ref:`secret`.
 
-   .. zuul:attr:: description
+   .. attr:: description
 
       This field may be used to provide a textual description of the
       pipeline.  It may appear in the status page or in documentation.
 
-   .. zuul:attr:: success-message
+   .. attr:: success-message
 
       The introductory text in reports when all the voting jobs are
       successful.  Defaults to "Build successful."
 
-   .. zuul:attr:: failure-message
+   .. attr:: failure-message
 
       The introductory text in reports when at least one voting job
       fails.  Defaults to "Build failed."
 
-   .. zuul:attr:: merge-failure-message
+   .. attr:: merge-failure-message
 
       The introductory text in the message reported when a change
       fails to merge with the current state of the repository.
       Defaults to "Merge failed."
 
-   .. zuul:attr:: footer-message
+   .. attr:: footer-message
 
       Supplies additional information after test results.  Useful for
       adding information about the CI system such as debugging and
       contact details.
 
-   .. zuul:attr:: trigger
+   .. attr:: trigger
 
       At least one trigger source must be supplied for each pipeline.
       Triggers are not exclusive -- matching events may be placed in
@@ -234,9 +232,7 @@
       of the connection will dictate which options are available.  See
       :ref:`drivers`.
 
-   .. _pipeline-require:
-
-   .. zuul:attr:: require
+   .. attr:: require
 
       If this section is present, it establishes pre-requisites for
       any kind of item entering the Pipeline.  Regardless of how the
@@ -249,9 +245,7 @@
       type of the connection will dictate which options are available.
       See :ref:`drivers`.
 
-   .. _pipeline-reject:
-
-   .. zuul:attr:: reject
+   .. attr:: reject
 
       If this section is present, it establishes pre-requisites that
       can block an item from being enqueued. It can be considered a
@@ -261,7 +255,7 @@
       type of the connection will dictate which options are available.
       See :ref:`drivers`.
 
-   .. zuul:attr:: dequeue-on-new-patchset
+   .. attr:: dequeue-on-new-patchset
 
       Normally, if a new patchset is uploaded to a change that is in a
       pipeline, the existing entry in the pipeline will be removed
@@ -269,7 +263,7 @@
       merge as well.  To suppress this behavior (and allow jobs to
       continue running), set this to ``false``.  Default: ``true``.
 
-   .. zuul:attr:: ignore-dependencies
+   .. attr:: ignore-dependencies
 
       In any kind of pipeline (dependent or independent), Zuul will
       attempt to enqueue all dependencies ahead of the current change
@@ -279,7 +273,7 @@
       pipeline, set this to ``true``.  This option is ignored by
       dependent pipelines.  The default is: ``false``.
 
-   .. zuul:attr:: precedence
+   .. attr:: precedence
 
       Indicates how the build scheduler should prioritize jobs for
       different pipelines.  Each pipeline may have one precedence,
@@ -295,7 +289,7 @@
    driver which implements it.  See :ref:`drivers` for more
    information.
 
-   .. zuul:attr:: success
+   .. attr:: success
 
       Describes where Zuul should report to if all the jobs complete
       successfully.  This section is optional; if it is omitted, Zuul
@@ -305,25 +299,25 @@
       connection name. The options available depend on the driver for
       the supplied connection.
 
-   .. zuul:attr:: failure
+   .. attr:: failure
 
       These reporters describe what Zuul should do if at least one job
       fails.
 
-   .. zuul:attr:: merge-failure
+   .. attr:: merge-failure
 
       These reporters describe what Zuul should do if it is unable to
       merge in the patchset. If no merge-failure reporters are listed
       then the ``failure`` reporters will be used to notify of
       unsuccessful merges.
 
-   .. zuul:attr:: start
+   .. attr:: start
 
       These reporters describe what Zuul should do when a change is
       added to the pipeline.  This can be used, for example, to reset
       a previously reported result.
 
-   .. zuul:attr:: disabled
+   .. attr:: disabled
 
       These reporters describe what Zuul should do when a pipeline is
       disabled.  See ``disable-after-consecutive-failures``.
@@ -333,7 +327,7 @@
    due to a problem with an external dependency, or unusually high
    non-deterministic test failures).
 
-   .. zuul:attr:: disable-after-consecutive-failures
+   .. attr:: disable-after-consecutive-failures
 
       If set, a pipeline can enter a ''disabled'' state if too many
       changes in a row fail. When this value is exceeded the pipeline
@@ -342,7 +336,7 @@
       ``disabled`` reporters.  (No ``start`` reports are made when a
       pipeline is disabled).
 
-   .. zuul:attr:: window
+   .. attr:: window
 
       Dependent pipeline managers only. Zuul can rate limit dependent
       pipelines in a manner similar to TCP flow control.  Jobs are
@@ -352,13 +346,13 @@
       be a positive integer value. A value of ``0`` disables rate
       limiting on the DependentPipelineManager.  Default: ``20``.
 
-   .. zuul:attr:: window-floor
+   .. attr:: window-floor
 
       Dependent pipeline managers only. This is the minimum value for
       the window described above. Should be a positive non zero
       integer value.  Default: ``3``.
 
-   .. zuul:attr:: window-increase-type
+   .. attr:: window-increase-type
 
       Dependent pipeline managers only. This value describes how the
       window should grow when changes are successfully merged by
@@ -369,13 +363,13 @@
       previous window value and the result will become the window
       size.  Default: ``linear``.
 
-   .. zuul:attr:: window-increase-factor
+   .. attr:: window-increase-factor
 
       Dependent pipeline managers only. The value to be added or
       multiplied against the previous window value to determine the
       new window after successful change merges.  Default: ``1``.
 
-   .. zuul:attr:: window-decrease-type
+   .. attr:: window-decrease-type
 
       Dependent pipeline managers only. This value describes how the
       window should shrink when changes are not able to be merged by
@@ -386,7 +380,7 @@
       previous window value and the result will become the window
       size.  Default: ``exponential``.
 
-   .. zuul:attr:: window-decrease-factor
+   .. attr:: window-decrease-factor
 
       Dependent pipline managers only. The value to be subtracted or
       divided against the previous window value to determine the new
@@ -451,365 +445,418 @@
 
 Further inheritance would nest even deeper.
 
-Here is an example of two job definitions::
+Here is an example of two job definitions:
 
-  - job:
-      name: base
-      pre-run: copy-git-repos
-      post-run: copy-logs
+.. code-block:: yaml
 
-  - job:
-      name: run-tests
-      parent: base
-      nodes:
-        - name: test-node
-          image: fedora
+   - job:
+       name: base
+       pre-run: copy-git-repos
+       post-run: copy-logs
 
-The following attributes are available on a job; all are optional
-unless otherwise specified:
+   - job:
+       name: run-tests
+       parent: base
+       nodes:
+         - name: test-node
+           image: fedora
 
-**name** (required)
-  The name of the job.  By default, Zuul looks for a playbook with
-  this name to use as the main playbook for the job.  This name is
-  also referenced later in a project pipeline configuration.
+.. attr:: job
 
-**parent**
-  Specifies a job to inherit from.  The parent job can be defined in
-  this or any other project.  Any attributes not specified on a job
-  will be collected from its parent.
+   The following attributes are available on a job; all are optional
+   unless otherwise specified:
 
-**description**
-  A textual description of the job.  Not currently used directly by
-  Zuul, but it is used by the zuul-sphinx extension to Sphinx to
-  auto-document Zuul jobs (in which case it is interpreted as
-  ReStructuredText.
+   .. attr:: name
+      :required:
 
-**success-message**
-  Normally when a job succeeds, the string "SUCCESS" is reported as
-  the result for the job.  If set, this option may be used to supply a
-  different string.  Default: "SUCCESS".
+      The name of the job.  By default, Zuul looks for a playbook with
+      this name to use as the main playbook for the job.  This name is
+      also referenced later in a project pipeline configuration.
 
-**failure-message**
-  Normally when a job fails, the string "FAILURE" is reported as
-  the result for the job.  If set, this option may be used to supply a
-  different string.  Default: "FAILURE".
+   .. attr:: parent
 
-**success-url**
-  When a job succeeds, this URL is reported along with the result.  If
-  this value is not supplied, Zuul uses the content of the job
-  :ref:`return value <return_values>` **zuul.log_url**.  This is
-  recommended as it allows the code which stores the URL to the job
-  artifacts to report exactly where they were stored.  To override
-  this value, or if it is not set, supply an absolute URL in this
-  field.  If a relative URL is supplied in this field, and
-  **zuul.log_url** is set, then the two will be combined to produce
-  the URL used for the report.  This can be used to specify that
-  certain jobs should "deep link" into the stored job artifacts.
-  Default: none.
+      Specifie s a job to inherit from.  The parent job can be defined
+      in this or a ny other project.  Any attributes not specified on
+      a job will be collected from its parent.
 
-**failure-url**
-  When a job fails, this URL is reported along with the result.
-  Otherwise behaves the same as **success-url**.
+   .. attr:: description
 
-**hold-following-changes**
-  In a dependent pipeline, this option may be used to indicate that no
-  jobs should start on any items which depend on the current item
-  until this job has completed successfully.  This may be used to
-  conserve build resources, at the expense of inhibiting the
-  parallelization which speeds the processing of items in a dependent
-  pipeline.  A boolean value, default: false.
+      A textual description of the job.  Not currently used directly
+      by Zuul, but it is used by the zuul-sphinx extension to Sphinx
+      to auto-document Zuul jobs (in which case it is interpreted as
+      ReStructuredText.
 
-**voting**
-  Indicates whether the result of this job should be used in
-  determining the overall result of the item.  A boolean value,
-  default: true.
+   .. attr:: success-message
 
-**semaphore**
-  The name of a :ref:`semaphore` which should be acquired and released
-  when the job begins and ends.  If the semaphore is at maximum
-  capacity, then Zuul will wait until it can be acquired before
-  starting the job.  Default: none.
+      Normally when a job succeeds, the string "SUCCESS" is reported
+      as the result for the job.  If set, this option may be used to
+      supply a different string.  Default: "SUCCESS".
 
-**tags**
-  Metadata about this job.  Tags are units of information attached to
-  the job; they do not affect Zuul's behavior, but they can be used
-  within the job to characterize the job.  For example, a job which
-  tests a certain subsystem could be tagged with the name of that
-  subsystem, and if the job's results are reported into a database,
-  then the results of all jobs affecting that subsystem could be
-  queried.  This attribute is specified as a list of strings, and when
-  inheriting jobs or applying variants, tags accumulate in a set, so
-  the result is always a set of all the tags from all the jobs and
-  variants used in constructing the frozen job, with no duplication.
-  Default: none.
+   .. attr:: failure-message
 
-**branches**
-  A regular expression (or list of regular expressions) which describe
-  on what branches a job should run (or in the case of variants: to
-  alter the behavior of a job for a certain branch).
+      Normally when a job fails, the string "FAILURE" is reported as
+      the result for the job.  If set, this option may be used to
+      supply a different string.  Default: "FAILURE".
 
-  If there is no job definition for a given job which matches the
-  branch of an item, then that job is not run for the item.
-  Otherwise, all of the job variants which match that branch (and any
-  other selection criteria) are used when freezing the job.
+   .. attr:: success-url
 
-  This example illustrates a job called *run-tests* which uses a
-  nodeset based on the current release of an operating system to
-  perform its tests, except when testing changes to the stable/2.0
-  branch, in which case it uses an older release::
+      When a job succeeds, this URL is reported along with the result.
+      If this value is not supplied, Zuul uses the content of the job
+      :ref:`return value <return_values>` **zuul.log_url**.  This is
+      recommended as it allows the code which stores the URL to the
+      job artifacts to report exactly where they were stored.  To
+      override this value, or if it is not set, supply an absolute URL
+      in this field.  If a relative URL is supplied in this field, and
+      **zuul.log_url** is set, then the two will be combined to
+      produce the URL used for the report.  This can be used to
+      specify that certain jobs should "deep link" into the stored job
+      artifacts.  Default: none.
 
-    - job:
-        name: run-tests
-        nodes: current-release
+   .. attr:: failure-url
 
-    - job:
-        name: run-tests
-        branch: stable/2.0
-        nodes: old-release
+      When a job fails, this URL is reported along with the result.
+      Otherwise behaves the same as **success-url**.
 
-  In some cases, Zuul uses an implied value for the branch specifier
-  if none is supplied:
+   .. attr:: hold-following-changes
 
-  * For a job definition in a *config-project*, no implied branch
-    specifier is used.  If no branch specifier appears, the job
-    applies to all branches.
+      In a dependent pipeline, this option may be used to indicate
+      that no jobs should start on any items which depend on the
+      current item until this job has completed successfully.  This
+      may be used to conserve build resources, at the expense of
+      inhibiting the parallelization which speeds the processing of
+      items in a dependent pipeline.  A boolean value, default: false.
 
-  * In the case of an *untrusted-project*, no implied branch specifier
-    is applied to the reference definition of a job.  That is to say,
-    that if the first appearance of the job definition appears without
-    a branch specifier, then it will apply to all branches.  Note that
-    when collecting its configuration, Zuul reads the `master` branch
-    of a given project first, then other branches in alphabetical
-    order.
+   .. attr:: voting
 
-  * Any further job variants other than the reference definition in an
-    *untrusted-project* will, if they do not have a branch specifier,
-    will have an implied branch specifier for the current branch
-    applied.
+      Indicates whether the result of this job should be used in
+      determining the overall result of the item.  A boolean value,
+      default: true.
 
-  This allows for the very simple and expected workflow where if a
-  project defines a job on the master branch with no branch specifier,
-  and then creates a new branch based on master, any changes to that
-  job definition within the new branch only affect that branch.
+   .. attr:: semaphore
 
-**files**
-  This attribute indicates that the job should only run on changes
-  where the specified files are modified.  This is a regular
-  expression or list of regular expressions.  Default: none.
+      The name of a :ref:`semaphore` which should be acquired and
+      released when the job begins and ends.  If the semaphore is at
+      maximum capacity, then Zuul will wait until it can be acquired
+      before starting the job.  Default: none.
 
-**irrelevant-files**
-  This is a negative complement of `files`.  It indicates that the job
-  should run unless *all* of the files changed match this list.  In
-  other words, if the regular expression `docs/.*` is supplied, then
-  this job will not run if the only files changed are in the docs
-  directory.  A regular expression or list of regular expressions.
-  Default: none.
+   .. attr:: tags
 
-**auth**
-  Authentication information to be made available to the job.  This is
-  a dictionary with two potential keys:
+      Metadata about this job.  Tags are units of information attached
+      to the job; they do not affect Zuul's behavior, but they can be
+      used within the job to characterize the job.  For example, a job
+      which tests a certain subsystem could be tagged with the name of
+      that subsystem, and if the job's results are reported into a
+      database, then the results of all jobs affecting that subsystem
+      could be queried.  This attribute is specified as a list of
+      strings, and when inheriting jobs or applying variants, tags
+      accumulate in a set, so the result is always a set of all the
+      tags from all the jobs and variants used in constructing the
+      frozen job, with no duplication.  Default: none.
 
-  **inherit**
-  A boolean indicating that the authentication information referenced
-  by this job should be able to be inherited by child jobs.  Normally
-  when a job inherits from another job, the auth section is not
-  included.  This permits jobs to inherit the same basic structure and
-  playbook, but ensures that secret information is unable to be
-  exposed by a child job which may alter the job's behavior.  If it is
-  safe for the contents of the authentication section to be used by
-  child jobs, set this to ``true``.  Default: ``false``.
+   .. attr:: branches
 
-  **secrets**
-  A list of secrets which may be used by the job.  A :ref:`secret` is
-  a named collection of private information defined separately in the
-  configuration.  The secrets that appear here must be defined in the
-  same project as this job definition.
+      A regular expression (or list of regular expressions) which
+      describe on what branches a job should run (or in the case of
+      variants: to alter the behavior of a job for a certain branch).
 
-  In the future, other types of authentication information may be
-  added.
+      If there is no job definition for a given job which matches the
+      branch of an item, then that job is not run for the item.
+      Otherwise, all of the job variants which match that branch (and
+      any other selection criteria) are used when freezing the job.
 
-**nodes**
-  A list of nodes which should be supplied to the job.  This parameter
-  may be supplied either as a string, in which case it references a
-  :ref:`nodeset` definition which appears elsewhere in the
-  configuration, or a list, in which case it is interpreted in the
-  same way as a Nodeset definition (in essence, it is an anonymous
-  Node definition unique to this job).  See the :ref:`nodeset`
-  reference for the syntax to use in that case.
+      This example illustrates a job called *run-tests* which uses a
+      nodeset based on the current release of an operating system to
+      perform its tests, except when testing changes to the stable/2.0
+      branch, in which case it uses an older release:
 
-  If a job has an empty or no node definition, it will still run and
-  may be able to perform actions on the Zuul executor.
+      .. code-block:: yaml
 
-**override-branch**
-  When Zuul runs jobs for a proposed change, it normally checks out
-  the branch associated with that change on every project present in
-  the job.  If jobs are running on a ref (such as a branch tip or
-  tag), then that ref is normally checked out.  This attribute is used
-  to override that behavior and indicate that this job should,
-  regardless of the branch for the queue item, use the indicated
-  branch instead.  This can be used, for example, to run a previous
-  version of the software (from a stable maintenance branch) under
-  test even if the change being tested applies to a different branch
-  (this is only likely to be useful if there is some cross-branch
-  interaction with some component of the system being tested).  See
-  also the project-specific **override-branch** attribute under
-  **required-projects** to apply this behavior to a subset of a job's
-  projects.
+         - job:
+             name: run-tests
+             nodes: current-release
 
-**timeout**
-  The time in minutes that the job should be allowed to run before it
-  is automatically aborted and failure is reported.  If no timeout is
-  supplied, the job may run indefinitely.  Supplying a timeout is
-  highly recommended.
+         - job:
+             name: run-tests
+             branch: stable/2.0
+             nodes: old-release
 
-**attempts**
-  When Zuul encounters an error running a job's pre-run playbook, Zuul
-  will stop and restart the job.  Errors during the main or
-  post-run -playbook phase of a job are not affected by this parameter
-  (they are reported immediately).  This parameter controls the number
-  of attempts to make before an error is reported.  Default: 3.
+      In some cases, Zuul uses an implied value for the branch specifier
+      if none is supplied:
 
-**pre-run**
-  The name of a playbook or list of playbooks without file extension
-  to run before the main body of a job.  The full path to the playbook
-  in the repo where the job is defined is expected.
+      * For a job definition in a *config-project*, no implied branch
+        specifier is used.  If no branch specifier appears, the job
+        applies to all branches.
 
-  When a job inherits from a parent, the child's pre-run playbooks are
-  run after the parent's.  See :ref:`job` for more information.
+      * In the case of an *untrusted-project*, no implied branch specifier
+        is applied to the reference definition of a job.  That is to say,
+        that if the first appearance of the job definition appears without
+        a branch specifier, then it will apply to all branches.  Note that
+        when collecting its configuration, Zuul reads the `master` branch
+        of a given project first, then other branches in alphabetical
+        order.
 
-**post-run**
-  The name of a playbook or list of playbooks without file extension
-  to run after the main body of a job.  The full path to the playbook
-  in the repo where the job is defined is expected.
+      * Any further job variants other than the reference definition in an
+        *untrusted-project* will, if they do not have a branch specifier,
+        will have an implied branch specifier for the current branch
+        applied.
 
-  When a job inherits from a parent, the child's post-run playbooks
-  are run before the parent's.  See :ref:`job` for more information.
+      This allows for the very simple and expected workflow where if a
+      project defines a job on the master branch with no branch specifier,
+      and then creates a new branch based on master, any changes to that
+      job definition within the new branch only affect that branch.
 
-**run**
-  The name of the main playbook for this job.  This parameter is
-  not normally necessary, as it defaults to a playbook with the
-  same name as the job inside of the `playbooks/` directory (e.g.,
-  the `foo` job would default to `playbooks/foo`.  However, if a
-  playbook with a different name is needed, it can be specified
-  here.  The file extension is not required, but the full path
-  within the repo is.  When a child inherits from a parent, a
-  playbook with the name of the child job is implicitly searched
-  first, before falling back on the playbook used by the parent
-  job (unless the child job specifies a ``run`` attribute, in which
-  case that value is used).  Example::
+   .. attr:: files
 
-     run: playbooks/<name of the job>
+      This attribute indicates that the job should only run on changes
+      where the specified files are modified.  This is a regular
+      expression or list of regular expressions.  Default: none.
 
-**roles**
-  A list of Ansible roles to prepare for the job.  Because a job runs
-  an Ansible playbook, any roles which are used by the job must be
-  prepared and installed by Zuul before the job begins.  This value is
-  a list of dictionaries, each of which indicates one of two types of
-  roles: a Galaxy role, which is simply a role that is installed from
-  Ansible Galaxy, or a Zuul role, which is a role provided by a
-  project managed by Zuul.  Zuul roles are able to benefit from
-  speculative merging and cross-project dependencies when used by
-  playbooks in untrusted projects.  Roles are added to the Ansible
-  role path in the order they appear on the job -- roles earlier in
-  the list will take precedence over those which follow.
+   .. attr:: irrelevant-files
 
-  In the case of job inheritance or variance, the roles used for each
-  of the playbooks run by the job will be only those which were
-  defined along with that playbook.  If a child job inherits from a
-  parent which defines a pre and post playbook, then the pre and post
-  playbooks it inherits from the parent job will run only with the
-  roles that were defined on the parent.  If the child adds its own
-  pre and post playbooks, then any roles added by the child will be
-  available to the child's playbooks.  This is so that a job which
-  inherits from a parent does not inadvertantly alter the behavior of
-  the parent's playbooks by the addition of conflicting roles.  Roles
-  added by a child will appear before those it inherits from its
-  parent.
+      This is a negative complement of `files`.  It indicates that the
+      job should run unless *all* of the files changed match this
+      list.  In other words, if the regular expression `docs/.*` is
+      supplied, then this job will not run if the only files changed
+      are in the docs directory.  A regular expression or list of
+      regular expressions.  Default: none.
 
-  A project which supplies a role may be structured in one of two
-  configurations: a bare role (in which the role exists at the root of
-  the project), or a contained role (in which the role exists within
-  the `roles/` directory of the project, perhaps along with other
-  roles).  In the case of a contained role, the `roles/` directory of
-  the project is added to the role search path.  In the case of a bare
-  role, the project itself is added to the role search path.  In case
-  the name of the project is not the name under which the role should
-  be installed (and therefore referenced from Ansible), the `name`
-  attribute may be used to specify an alternate.
+   .. attr:: auth
 
-  A job automatically has the project in which it is defined added to
-  the roles path if that project appears to contain a role or `roles/`
-  directory.  By default, the project is added to the path under its
-  own name, however, that may be changed by explicitly listing the
-  project in the roles list in the usual way.
+      Authentication information to be made available to the job.
+      This is a dictionary with two potential keys:
 
-  .. note:: galaxy roles are not yet implemented
+      .. attr:: inherit
 
-  **galaxy**
-    The name of the role in Ansible Galaxy.  If this attribute is
-    supplied, Zuul will search Ansible Galaxy for a role by this name
-    and install it.  Mutually exclusive with ``zuul``; either
-    ``galaxy`` or ``zuul`` must be supplied.
+         A boolean indicating that the authentication information
+         referenced by this job should be able to be inherited by
+         child jobs.  Normally when a job inherits from another job,
+         the auth section is not included.  This permits jobs to
+         inherit the same basic structure and playbook, but ensures
+         that secret information is unable to be exposed by a child
+         job which may alter the job's behavior.  If it is safe for
+         the contents of the authentication section to be used by
+         child jobs, set this to ``true``.  Default: ``false``.
 
-  **zuul**
-    The name of a Zuul project which supplies the role.  Mutually
-    exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must be
-    supplied.
+      .. attr:: secrets
 
-  **name**
-    The installation name of the role.  In the case of a bare role,
-    the role will be made available under this name.  Ignored in the
-    case of a contained role.
+         A list of secrets which may be used by the job.  A
+         :ref:`secret` is a named collection of private information
+         defined separately in the configuration.  The secrets that
+         appear here must be defined in the same project as this job
+         definition.
 
-**required-projects**
-  A list of other projects which are used by this job.  Any Zuul
-  projects specified here will also be checked out by Zuul into the
-  working directory for the job.  Speculative merging and cross-repo
-  dependencies will be honored.
+         In the future, other types of authentication information may
+         be added.
 
-  The format for this attribute is either a list of strings or
-  dictionaries.  Strings are interpreted as project names,
-  dictionaries may have the following attributes:
+   .. attr:: nodes
 
-  **name**
-    The name of the required project.
+      A list of nodes which should be supplied to the job.  This
+      parameter may be supplied either as a string, in which case it
+      references a :ref:`nodeset` definition which appears elsewhere
+      in the configuration, or a list, in which case it is interpreted
+      in the same way as a Nodeset definition (in essence, it is an
+      anonymous Node definition unique to this job).  See the
+      :ref:`nodeset` reference for the syntax to use in that case.
 
-  **override-branch**
-    When Zuul runs jobs for a proposed change, it normally checks out
-    the branch associated with that change on every project present in
-    the job.  If jobs are running on a ref (such as a branch tip or
-    tag), then that ref is normally checked out.  This attribute is
-    used to override that behavior and indicate that this job should,
-    regardless of the branch for the queue item, use the indicated
-    branch instead, for only this project.  See also the
-    **override-branch** attribute of jobs to apply the same behavior
-    to all projects in a job.
+      If a job has an empty or no node definition, it will still run
+      and may be able to perform actions on the Zuul executor.
 
-**vars**
+   .. attr:: override-branch
 
-A dictionary of variables to supply to Ansible.  When inheriting from
-a job (or creating a variant of a job) vars are merged with previous
-definitions.  This means a variable definition with the same name will
-override a previously defined variable, but new variable names will be
-added to the set of defined variables.
+      When Zuul runs jobs for a proposed change, it normally checks
+      out the branch associated with that change on every project
+      present in the job.  If jobs are running on a ref (such as a
+      branch tip or tag), then that ref is normally checked out.  This
+      attribute is used to override that behavior and indicate that
+      this job should, regardless of the branch for the queue item,
+      use the indicated branch instead.  This can be used, for
+      example, to run a previous version of the software (from a
+      stable maintenance branch) under test even if the change being
+      tested applies to a different branch (this is only likely to be
+      useful if there is some cross-branch interaction with some
+      component of the system being tested).  See also the
+      project-specific **override-branch** attribute under
+      **required-projects** to apply this behavior to a subset of a
+      job's projects.
 
-**dependencies**
-  A list of other jobs upon which this job depends.  Zuul will not
-  start executing this job until all of its dependencies have
-  completed successfully, and if one or more of them fail, this job
-  will not be run.
+   .. attr:: timeout
 
-**allowed-projects**
-  A list of Zuul projects which may use this job.  By default, a job
-  may be used by any other project known to Zuul, however, some jobs
-  use resources or perform actions which are not appropriate for other
-  projects.  In these cases, a list of projects which are allowed to
-  use this job may be supplied.  If this list is not empty, then it
-  must be an exhaustive list of all projects permitted to use the job.
-  The current project (where the job is defined) is not automatically
-  included, so if it should be able to run this job, then it must be
-  explicitly listed.  Default: the empty list (all projects may use
-  the job).
+      The time in minutes that the job should be allowed to run before
+      it is automatically aborted and failure is reported.  If no
+      timeout is supplied, the job may run indefinitely.  Supplying a
+      timeout is highly recommended.
+
+   .. attr:: attempts
+
+      When Zuul encounters an error running a job's pre-run playbook,
+      Zuul will stop and restart the job.  Errors during the main or
+      post-run -playbook phase of a job are not affected by this
+      parameter (they are reported immediately).  This parameter
+      controls the number of attempts to make before an error is
+      reported.  Default: 3.
+
+   .. attr:: pre-run
+
+      The name of a playbook or list of playbooks without file
+      extension to run before the main body of a job.  The full path
+      to the playbook in the repo where the job is defined is
+      expected.
+
+      When a job inherits from a parent, the child's pre-run playbooks
+      are run after the parent's.  See :ref:`job` for more
+      information.
+
+   .. attr:: post-run
+
+      The name of a playbook or list of playbooks without file
+      extension to run after the main body of a job.  The full path to
+      the playbook in the repo where the job is defined is expected.
+
+      When a job inherits from a parent, the child's post-run
+      playbooks are run before the parent's.  See :ref:`job` for more
+      information.
+
+   .. attr:: run
+
+      The name of the main playbook for this job.  This parameter is
+      not normally necessary, as it defaults to a playbook with the
+      same name as the job inside of the `playbooks/` directory (e.g.,
+      the `foo` job would default to `playbooks/foo`.  However, if a
+      playbook with a different name is needed, it can be specified
+      here.  The file extension is not required, but the full path
+      within the repo is.  When a child inherits from a parent, a
+      playbook with the name of the child job is implicitly searched
+      first, before falling back on the playbook used by the parent
+      job (unless the child job specifies a ``run`` attribute, in
+      which case that value is used).  Example:
+
+      .. code-block:: yaml
+
+         run: playbooks/<name of the job>
+
+   .. attr:: roles
+
+      A list of Ansible roles to prepare for the job.  Because a job
+      runs an Ansible playbook, any roles which are used by the job
+      must be prepared and installed by Zuul before the job begins.
+      This value is a list of dictionaries, each of which indicates
+      one of two types of roles: a Galaxy role, which is simply a role
+      that is installed from Ansible Galaxy, or a Zuul role, which is
+      a role provided by a project managed by Zuul.  Zuul roles are
+      able to benefit from speculative merging and cross-project
+      dependencies when used by playbooks in untrusted projects.
+      Roles are added to the Ansible role path in the order they
+      appear on the job -- roles earlier in the list will take
+      precedence over those which follow.
+
+      In the case of job inheritance or variance, the roles used for
+      each of the playbooks run by the job will be only those which
+      were defined along with that playbook.  If a child job inherits
+      from a parent which defines a pre and post playbook, then the
+      pre and post playbooks it inherits from the parent job will run
+      only with the roles that were defined on the parent.  If the
+      child adds its own pre and post playbooks, then any roles added
+      by the child will be available to the child's playbooks.  This
+      is so that a job which inherits from a parent does not
+      inadvertantly alter the behavior of the parent's playbooks by
+      the addition of conflicting roles.  Roles added by a child will
+      appear before those it inherits from its parent.
+
+      A project which supplies a role may be structured in one of two
+      configurations: a bare role (in which the role exists at the
+      root of the project), or a contained role (in which the role
+      exists within the `roles/` directory of the project, perhaps
+      along with other roles).  In the case of a contained role, the
+      `roles/` directory of the project is added to the role search
+      path.  In the case of a bare role, the project itself is added
+      to the role search path.  In case the name of the project is not
+      the name under which the role should be installed (and therefore
+      referenced from Ansible), the `name` attribute may be used to
+      specify an alternate.
+
+      A job automatically has the project in which it is defined added
+      to the roles path if that project appears to contain a role or
+      `roles/` directory.  By default, the project is added to the
+      path under its own name, however, that may be changed by
+      explicitly listing the project in the roles list in the usual
+      way.
+
+      .. note:: Galaxy roles are not yet implemented.
+
+      .. attr:: galaxy
+
+         The name of the role in Ansible Galaxy.  If this attribute is
+         supplied, Zuul will search Ansible Galaxy for a role by this
+         name and install it.  Mutually exclusive with ``zuul``;
+         either ``galaxy`` or ``zuul`` must be supplied.
+
+      .. attr:: zuul
+
+         The name of a Zuul project which supplies the role.  Mutually
+         exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must
+         be supplied.
+
+      .. attr:: name
+
+         The installation name of the role.  In the case of a bare
+         role, the role will be made available under this name.
+         Ignored in the case of a contained role.
+
+   .. attr:: required-projects
+
+      A list of other projects which are used by this job.  Any Zuul
+      projects specified here will also be checked out by Zuul into
+      the working directory for the job.  Speculative merging and
+      cross-repo dependencies will be honored.
+
+      The format for this attribute is either a list of strings or
+      dictionaries.  Strings are interpreted as project names,
+      dictionaries, if used, may have the following attributes:
+
+      .. attr:: name
+         :required:
+
+         The name of the required project.
+
+      .. attr:: override-branch
+
+         When Zuul runs jobs for a proposed change, it normally checks
+         out the branch associated with that change on every project
+         present in the job.  If jobs are running on a ref (such as a
+         branch tip or tag), then that ref is normally checked out.
+         This attribute is used to override that behavior and indicate
+         that this job should, regardless of the branch for the queue
+         item, use the indicated branch instead, for only this
+         project.  See also the **override-branch** attribute of jobs
+         to apply the same behavior to all projects in a job.
+
+   .. attr:: vars
+
+      A dictionary of variables to supply to Ansible.  When inheriting
+      from a job (or creating a variant of a job) vars are merged with
+      previous definitions.  This means a variable definition with the
+      same name will override a previously defined variable, but new
+      variable names will be added to the set of defined variables.
+
+   .. attr:: dependencies
+
+      A list of other jobs upon which this job depends.  Zuul will not
+      start executing this job until all of its dependencies have
+      completed successfully, and if one or more of them fail, this
+      job will not be run.
+
+   .. attr:: allowed-projects
+
+      A list of Zuul projects which may use this job.  By default, a
+      job may be used by any other project known to Zuul, however,
+      some jobs use resources or perform actions which are not
+      appropriate for other projects.  In these cases, a list of
+      projects which are allowed to use this job may be supplied.  If
+      this list is not empty, then it must be an exhaustive list of
+      all projects permitted to use the job.  The current project
+      (where the job is defined) is not automatically included, so if
+      it should be able to run this job, then it must be explicitly
+      listed.  Default: the empty list (all projects may use the job).
 
 
 .. _project:
diff --git a/doc/source/user/gating.rst b/doc/source/user/gating.rst
index 3398892..c1d04a7 100644
--- a/doc/source/user/gating.rst
+++ b/doc/source/user/gating.rst
@@ -41,7 +41,7 @@
 developers to create changes at a rate faster than they can be tested
 and merged.
 
-Zuul's :ref:`dependent pipeline manager<dependent_pipeline_manager>`
+Zuul's :value:`dependent pipeline manager<pipeline.manager.dependent>`
 allows for parallel execution of test jobs for gating while ensuring
 changes are tested correctly, exactly as if they had been tested one
 at a time.  It does this by performing speculative execution of test
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index e6375a5..0ae42a2 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -27,8 +27,8 @@
 
 [executor]
 default_username=zuul
-trusted_ro_dirs=/opt/zuul-scripts:/var/cache
-trusted_rw_dirs=/opt/zuul-logs
+trusted_ro_paths=/opt/zuul-scripts:/var/cache
+trusted_rw_paths=/opt/zuul-logs
 
 [web]
 listen_address=127.0.0.1
diff --git a/tests/base.py b/tests/base.py
index 568e15f..35c8324 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -2080,7 +2080,7 @@
                         self.copyDirToRepo(project,
                                            os.path.join(git_path, reponame))
         # Make test_root persist after ansible run for .flag test
-        self.config.set('executor', 'trusted_rw_dirs', self.test_root)
+        self.config.set('executor', 'trusted_rw_paths', self.test_root)
         self.setupAllProjectKeys()
 
     def setupSimpleLayout(self):
diff --git a/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml b/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml
new file mode 100644
index 0000000..95ab870
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/common-config/playbooks/dd-big-empty-file.yaml
@@ -0,0 +1,6 @@
+- hosts: localhost
+  tasks:
+    - command: dd if=/dev/zero of=toobig bs=1M count=2
+    - wait_for:
+        delay: 10
+        path: /
diff --git a/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml b/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml
new file mode 100644
index 0000000..83a5158
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/common-config/zuul.yaml
@@ -0,0 +1,22 @@
+- pipeline:
+    name: check
+    manager: independent
+    allow-secrets: true
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name: dd-big-empty-file
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - dd-big-empty-file
diff --git a/tests/fixtures/config/disk-accountant/git/org_project/README b/tests/fixtures/config/disk-accountant/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/disk-accountant/main.yaml b/tests/fixtures/config/disk-accountant/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/disk-accountant/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-projects:
+          - common-config
+        untrusted-projects:
+          - org/project
diff --git a/tests/fixtures/zuul-disk-accounting.conf b/tests/fixtures/zuul-disk-accounting.conf
new file mode 100644
index 0000000..b0ae48e
--- /dev/null
+++ b/tests/fixtures/zuul-disk-accounting.conf
@@ -0,0 +1,28 @@
+[gearman]
+server=127.0.0.1
+
+[scheduler]
+tenant_config=main.yaml
+
+[merger]
+git_dir=/tmp/zuul-test/merger-git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+disk_limit_per_job=1
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=fake_id_rsa_path
+
+[connection smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/unit/test_disk_accountant.py b/tests/unit/test_disk_accountant.py
new file mode 100644
index 0000000..22c8f34
--- /dev/null
+++ b/tests/unit/test_disk_accountant.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+import time
+
+from tests.base import BaseTestCase
+
+from zuul.executor.server import DiskAccountant
+
+
+class FakeExecutor(object):
+    def __init__(self):
+        self.stopped_jobs = set()
+        self.used = {}
+
+    def stopJobByJobDir(self, jobdir):
+        self.stopped_jobs.add(jobdir)
+
+    def usage(self, dirname, used):
+        self.used[dirname] = used
+
+
+class TestDiskAccountant(BaseTestCase):
+    def test_disk_accountant(self):
+        jobs_dir = tempfile.mkdtemp()
+        cache_dir = tempfile.mkdtemp()
+        executor_server = FakeExecutor()
+        da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
+                            cache_dir)
+        da.start()
+
+        jobdir = os.path.join(jobs_dir, '012345')
+        os.mkdir(jobdir)
+        testfile = os.path.join(jobdir, 'tfile')
+        with open(testfile, 'w') as tf:
+            tf.write(2 * 1024 * 1024 * '.')
+
+        # da should catch over-limit dir within 5 seconds
+        for i in range(0, 50):
+            if jobdir in executor_server.stopped_jobs:
+                break
+            time.sleep(0.1)
+        self.assertEqual(set([jobdir]), executor_server.stopped_jobs)
+        da.stop()
+        self.assertFalse(da.thread.is_alive())
+
+    def test_cache_hard_links(self):
+        root_dir = tempfile.mkdtemp()
+        jobs_dir = os.path.join(root_dir, 'jobs')
+        os.mkdir(jobs_dir)
+        cache_dir = os.path.join(root_dir, 'cache')
+        os.mkdir(cache_dir)
+
+        executor_server = FakeExecutor()
+        da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
+                            cache_dir, executor_server.usage)
+        da.start()
+
+        jobdir = os.path.join(jobs_dir, '012345')
+        os.mkdir(jobdir)
+
+        repo_dir = os.path.join(cache_dir, 'a.repo')
+        os.mkdir(repo_dir)
+        source_file = os.path.join(repo_dir, 'big_file')
+        with open(source_file, 'w') as tf:
+            tf.write(2 * 1024 * 1024 * '.')
+        dest_link = os.path.join(jobdir, 'big_file')
+        os.link(source_file, dest_link)
+
+        # da should _not_ count this file. Wait for 5s to get noticed
+        for i in range(0, 50):
+            if jobdir in executor_server.used:
+                break
+            time.sleep(0.1)
+        self.assertEqual(set(), executor_server.stopped_jobs)
+        self.assertIn(jobdir, executor_server.used)
+        self.assertEqual(1, executor_server.used[jobdir])
+        da.stop()
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index aa091e5..2d68089 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -174,6 +174,9 @@
         in_repo_conf = textwrap.dedent(
             """
             - job:
+                name: project-test1
+
+            - job:
                 name: project-test2
 
             - project:
@@ -197,9 +200,16 @@
         self.assertTrue(items[0].live)
 
         self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release('project-test1')
+        self.waitUntilSettled()
         self.executor_server.release()
         self.waitUntilSettled()
 
+        self.assertHistory([
+            dict(name='project-test2', result='ABORTED', changes='1,1'),
+            dict(name='project-test1', result='SUCCESS', changes='1,2'),
+            dict(name='project-test2', result='SUCCESS', changes='1,2')])
+
     def test_dynamic_dependent_pipeline(self):
         # Test dynamically adding a project to a
         # dependent pipeline for the first time
@@ -926,3 +936,15 @@
         self.assertIn('- data-return-relative '
                       'http://example.com/test/log/url/docs/index.html',
                       A.messages[-1])
+
+
+class TestDiskAccounting(AnsibleZuulTestCase):
+    config_file = 'zuul-disk-accounting.conf'
+    tenant_config_file = 'config/disk-accountant/main.yaml'
+
+    def test_disk_accountant_kills_job(self):
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertHistory([
+            dict(name='dd-big-empty-file', result='ABORTED', changes='1,1')])
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index 078e1c9..9dd724d 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -285,10 +285,16 @@
         self._log("")
 
     def v2_runner_on_skipped(self, result):
-        reason = result._result.get('skip_reason')
-        if reason:
-            # No reason means it's an item, which we'll log differently
-            self._log_message(result, status='skipping', msg=reason)
+        if result._task.loop:
+            self._items_done = False
+            self._deferred_result = dict(result._result)
+        else:
+            reason = result._result.get('skip_reason')
+            if reason:
+                # No reason means it's an item, which we'll log differently
+                self._log_message(result, status='skipping', msg=reason)
+                # Log an extra blank line to get space after each skip
+                self._log("")
 
     def v2_runner_item_on_skipped(self, result):
         reason = result._result.get('skip_reason')
@@ -297,14 +303,14 @@
         else:
             self._log_message(result, status='skipping')
 
+        if self._deferred_result:
+            self._process_deferred(result)
+
     def v2_runner_on_ok(self, result):
         if (self._play.strategy == 'free'
                 and self._last_task_banner != result._task._uuid):
             self._print_task_banner(result._task)
 
-        if result._task.action in ('include', 'include_role', 'setup'):
-            return
-
         result_dict = dict(result._result)
 
         self._clean_results(result_dict, result._task.action)
@@ -388,8 +394,6 @@
 
         if self._deferred_result:
             self._process_deferred(result)
-        # Log an extra blank line to get space after each task
-        self._log("")
 
     def v2_runner_item_on_failed(self, result):
         result_dict = dict(result._result)
@@ -434,10 +438,13 @@
         self._items_done = True
         result_dict = self._deferred_result
         self._deferred_result = None
+        status = result_dict.get('status')
 
-        self._log_message(
-            result, "All items complete",
-            status=result_dict['status'])
+        if status:
+            self._log_message(result, "All items complete", status=status)
+
+        # Log an extra blank line to get space after each task
+        self._log("")
 
     def _print_task_banner(self, task):
 
diff --git a/zuul/driver/__init__.py b/zuul/driver/__init__.py
index 5193fe6..6ac9197 100644
--- a/zuul/driver/__init__.py
+++ b/zuul/driver/__init__.py
@@ -272,11 +272,11 @@
         pass
 
     @abc.abstractmethod
-    def setMountsMap(self, state_dir, ro_dirs=[], rw_dirs=[]):
+    def setMountsMap(self, state_dir, ro_paths=None, rw_paths=None):
         """Add additional mount point to the execution environment.
 
         :arg str state_dir: the state directory to be read write
-        :arg list ro_dirs: read only directories paths
-        :arg list rw_dirs: read write directories paths
+        :arg list ro_paths: read only files or directories to bind mount
+        :arg list rw_paths: read write files or directories to bind mount
         """
         pass
diff --git a/zuul/driver/bubblewrap/__init__.py b/zuul/driver/bubblewrap/__init__.py
index e8209f1..5370484 100644
--- a/zuul/driver/bubblewrap/__init__.py
+++ b/zuul/driver/bubblewrap/__init__.py
@@ -83,8 +83,12 @@
     def stop(self):
         pass
 
-    def setMountsMap(self, ro_dirs=[], rw_dirs=[]):
-        self.mounts_map = {'ro': ro_dirs, 'rw': rw_dirs}
+    def setMountsMap(self, ro_paths=None, rw_paths=None):
+        if not ro_paths:
+            ro_paths = []
+        if not rw_paths:
+            rw_paths = []
+        self.mounts_map = {'ro': ro_paths, 'rw': rw_paths}
 
     def getPopen(self, **kwargs):
         # Set zuul_dir if it was not passed in
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index ba063fb..b095215 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -349,7 +349,7 @@
 
 class GithubConnection(BaseConnection):
     driver_name = 'github'
-    log = logging.getLogger("connection.github")
+    log = logging.getLogger("zuul.GithubConnection")
     payload_path = 'payload'
 
     def __init__(self, driver, connection_name, connection_config):
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 0ae3b4c..21c4cf1 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -55,6 +55,88 @@
     pass
 
 
+class DiskAccountant(object):
+    ''' A single thread to periodically run du and monitor a base directory
+
+    Whenever the accountant notices a dir over limit, it will call the
+    given func with an argument of the job directory. That function
+    should be used to remediate the problem, generally by killing the
+    job producing the disk bloat). The function will be called every
+    time the problem is noticed, so it should be handled synchronously
+    to avoid stacking up calls.
+    '''
+    log = logging.getLogger("zuul.ExecutorDiskAccountant")
+
+    def __init__(self, jobs_base, limit, func, cache_dir, usage_func=None):
+        '''
+        :param str jobs_base: absolute path name of dir to be monitored
+        :param int limit: maximum number of MB allowed to be in use in any one
+                          subdir
+        :param callable func: Function to call with overlimit dirs
+        :param str cache_dir: absolute path name of dir to be passed as the
+                              first argument to du. This will ensure du does
+                              not count any hardlinks to files in this
+                              directory against a single job.
+        :param callable usage_func: Optional function to call with usage
+                                    for every dir _NOT_ over limit
+        '''
+        # Don't cross the streams
+        if cache_dir == jobs_base:
+            raise Exception("Cache dir and jobs dir cannot be the same")
+        self.thread = threading.Thread(target=self._run,
+                                       name='executor-diskaccountant')
+        self.thread.daemon = True
+        self._running = False
+        self.jobs_base = jobs_base
+        self.limit = limit
+        self.func = func
+        self.cache_dir = cache_dir
+        self.usage_func = usage_func
+        self.stop_event = threading.Event()
+
+    def _run(self):
+        while self._running:
+            # Walk job base
+            before = time.time()
+            du = subprocess.Popen(
+                ['du', '-m', '--max-depth=1', self.cache_dir, self.jobs_base],
+                stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+            for line in du.stdout:
+                (size, dirname) = line.rstrip().split()
+                dirname = dirname.decode('utf8')
+                if dirname == self.jobs_base or dirname == self.cache_dir:
+                    continue
+                if os.path.dirname(dirname) == self.cache_dir:
+                    continue
+                size = int(size)
+                if size > self.limit:
+                    self.log.info(
+                        "{job} is using {size}MB (limit={limit})"
+                        .format(size=size, job=dirname, limit=self.limit))
+                    self.func(dirname)
+                elif self.usage_func:
+                    self.log.debug(
+                        "{job} is using {size}MB (limit={limit})"
+                        .format(size=size, job=dirname, limit=self.limit))
+                    self.usage_func(dirname, size)
+            du.wait()
+            after = time.time()
+            # Sleep half as long as that took, or 1s, whichever is longer
+            delay_time = max((after - before) / 2, 1.0)
+            self.stop_event.wait(delay_time)
+
+    def start(self):
+        self._running = True
+        self.thread.start()
+
+    def stop(self):
+        self._running = False
+        self.stop_event.set()
+        # We join here to avoid whitelisting the thread -- if it takes more
+        # than 5s to stop in tests, there's a problem.
+        self.thread.join(timeout=5)
+
+
 class Watchdog(object):
     def __init__(self, timeout, function, args):
         self.timeout = timeout
@@ -443,6 +525,8 @@
                                       '/var/lib/zuul/executor-git')
         self.default_username = get_default(self.config, 'executor',
                                             'default_username', 'zuul')
+        self.disk_limit_per_job = int(get_default(self.config, 'executor',
+                                                  'disk_limit_per_job', 250))
         self.merge_email = get_default(self.config, 'merger', 'git_user_email')
         self.merge_name = get_default(self.config, 'merger', 'git_user_name')
         execution_wrapper_name = get_default(self.config, 'executor',
@@ -486,6 +570,10 @@
             pass
 
         self.job_workers = {}
+        self.disk_accountant = DiskAccountant(self.jobdir_root,
+                                              self.disk_limit_per_job,
+                                              self.stopJobByJobdir,
+                                              self.merge_root)
 
     def _getMerger(self, root, logger=None):
         if root != self.merge_root:
@@ -530,6 +618,7 @@
         self.executor_thread = threading.Thread(target=self.run_executor)
         self.executor_thread.daemon = True
         self.executor_thread.start()
+        self.disk_accountant.start()
 
     def register(self):
         self.executor_worker.registerFunction("executor:execute")
@@ -540,6 +629,7 @@
 
     def stop(self):
         self.log.debug("Stopping")
+        self.disk_accountant.stop()
         self._running = False
         self._command_running = False
         self.command_socket.stop()
@@ -675,23 +765,30 @@
     def finishJob(self, unique):
         del(self.job_workers[unique])
 
+    def stopJobByJobdir(self, jobdir):
+        unique = os.path.basename(jobdir)
+        self.stopJobByUnique(unique)
+
     def stopJob(self, job):
         try:
             args = json.loads(job.arguments)
             self.log.debug("Stop job with arguments: %s" % (args,))
             unique = args['uuid']
-            job_worker = self.job_workers.get(unique)
-            if not job_worker:
-                self.log.debug("Unable to find worker for job %s" % (unique,))
-                return
-            try:
-                job_worker.stop()
-            except Exception:
-                self.log.exception("Exception sending stop command "
-                                   "to worker:")
+            self.stopJobByUnique(unique)
         finally:
             job.sendWorkComplete()
 
+    def stopJobByUnique(self, unique):
+        job_worker = self.job_workers.get(unique)
+        if not job_worker:
+            self.log.debug("Unable to find worker for job %s" % (unique,))
+            return
+        try:
+            job_worker.stop()
+        except Exception:
+            self.log.exception("Exception sending stop command "
+                               "to worker:")
+
     def cat(self, job):
         args = json.loads(job.arguments)
         task = self.update(args['connection'], args['project'])
@@ -1370,20 +1467,20 @@
             opt_prefix = 'trusted'
         else:
             opt_prefix = 'untrusted'
-        ro_dirs = get_default(self.executor_server.config, 'executor',
-                              '%s_ro_dirs' % opt_prefix)
-        rw_dirs = get_default(self.executor_server.config, 'executor',
-                              '%s_rw_dirs' % opt_prefix)
-        ro_dirs = ro_dirs.split(":") if ro_dirs else []
-        rw_dirs = rw_dirs.split(":") if rw_dirs else []
+        ro_paths = get_default(self.executor_server.config, 'executor',
+                               '%s_ro_paths' % opt_prefix)
+        rw_paths = get_default(self.executor_server.config, 'executor',
+                               '%s_rw_paths' % opt_prefix)
+        ro_paths = ro_paths.split(":") if ro_paths else []
+        rw_paths = rw_paths.split(":") if rw_paths else []
 
-        ro_dirs.append(self.executor_server.ansible_dir)
+        ro_paths.append(self.executor_server.ansible_dir)
 
         if self.executor_variables_file:
-            ro_dirs.append(self.executor_variables_file)
+            ro_paths.append(self.executor_variables_file)
 
-        self.executor_server.execution_wrapper.setMountsMap(ro_dirs,
-                                                            rw_dirs)
+        self.executor_server.execution_wrapper.setMountsMap(ro_paths,
+                                                            rw_paths)
 
         popen = self.executor_server.execution_wrapper.getPopen(
             work_dir=self.jobdir.root,
@@ -1427,6 +1524,7 @@
             if timeout:
                 watchdog.stop()
                 self.log.debug("Stopped watchdog")
+            self.log.debug("Stopped disk job killer")
 
         with self.proc_lock:
             self.proc = None
diff --git a/zuul/sphinx/zuul.py b/zuul/sphinx/zuul.py
index a4fb127..7798720 100644
--- a/zuul/sphinx/zuul.py
+++ b/zuul/sphinx/zuul.py
@@ -14,21 +14,22 @@
 
 from sphinx import addnodes
 from sphinx.domains import Domain
+from sphinx.roles import XRefRole
 from sphinx.directives import ObjectDescription
+from sphinx.util.nodes import make_refnode
+from docutils import nodes
+
+from typing import Dict # noqa
 
 
 class ZuulConfigObject(ObjectDescription):
     object_names = {
         'attr': 'attribute',
-        'configobject': 'configuration object',
     }
 
     def get_path(self):
-        obj = self.env.ref_context.get('zuul:configobject')
         attr_path = self.env.ref_context.get('zuul:attr_path', [])
         path = []
-        if obj:
-            path.append(obj)
         if attr_path:
             path.extend(attr_path)
         return path
@@ -49,6 +50,15 @@
             signode['ids'].append(targetname)
             signode['first'] = (not self.names)
             self.state.document.note_explicit_target(signode)
+            objects = self.env.domaindata['zuul']['objects']
+            if targetname in objects:
+                self.state_machine.reporter.warning(
+                    'duplicate object description of %s, ' % targetname +
+                    'other instance in ' +
+                    self.env.doc2path(objects[targetname][0]) +
+                    ', use :noindex: for one of them',
+                    line=self.lineno)
+            objects[targetname] = (self.env.docname, self.objtype)
 
         objname = self.object_names.get(self.objtype, self.objtype)
         if self.parent_pathname:
@@ -60,17 +70,6 @@
                                           targetname, '', None))
 
 
-class ZuulConfigobjectDirective(ZuulConfigObject):
-    has_content = True
-
-    def before_content(self):
-        self.env.ref_context['zuul:configobject'] = self.names[-1]
-
-    def handle_signature(self, sig, signode):
-        signode += addnodes.desc_name(sig, sig)
-        return sig
-
-
 class ZuulAttrDirective(ZuulConfigObject):
     has_content = True
 
@@ -110,11 +109,35 @@
     label = 'Zuul'
 
     directives = {
-        'configobject': ZuulConfigobjectDirective,
         'attr': ZuulAttrDirective,
         'value': ZuulValueDirective,
     }
 
+    roles = {
+        'attr': XRefRole(innernodeclass=nodes.inline,  # type: ignore
+                         warn_dangling=True),
+        'value': XRefRole(innernodeclass=nodes.inline,  # type: ignore
+                          warn_dangling=True),
+    }
+
+    initial_data = {
+        'objects': {},
+    }  # type: Dict[str, Dict]
+
+    def resolve_xref(self, env, fromdocname, builder, type, target,
+                     node, contnode):
+        objects = self.data['objects']
+        name = type + '-' + target
+        obj = objects.get(name)
+        if obj:
+            return make_refnode(builder, fromdocname, obj[0], name,
+                                contnode, name)
+
+    def clear_doc(self, docname):
+        for fullname, (fn, _l) in list(self.data['objects'].items()):
+            if fn == docname:
+                del self.data['objects'][fullname]
+
 
 def setup(app):
     app.add_domain(ZuulDomain)