Merge "Return RETRY_LIMIT on completion of last build" into feature/zuulv3
diff --git a/doc/source/client.rst b/doc/source/admin/client.rst
similarity index 100%
rename from doc/source/client.rst
rename to doc/source/admin/client.rst
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
new file mode 100644
index 0000000..a24b833
--- /dev/null
+++ b/doc/source/admin/components.rst
@@ -0,0 +1,325 @@
+:title: Components
+
+.. _components:
+
+Components
+==========
+
+Zuul is a distributed system consisting of several components, each of
+which is described below. All Zuul processes read the
+**/etc/zuul/zuul.conf** file (an alternate location may be supplied on
+the command line) which uses an INI file syntax. Each component may
+have its own configuration file, though you may find it simpler to use
+the same file for all components.
+
+A minimal Zuul system may consist of a *scheduler* and *executor* both
+running on the same host. Larger installations should consider
+running multiple executors, each on a dedicated host, and running
+mergers on dedicated hosts as well.
+
+Common
+------
+
+The following applies to all Zuul components.
+
+Configuration
+~~~~~~~~~~~~~
+
+The following sections of **zuul.conf** are used by all Zuul components:
+
+gearman
+"""""""
+
+Client connection information for gearman.
+
+**server**
+ Hostname or IP address of the Gearman server.
+ ``server=gearman.example.com`` (required)
+
+**port**
+ Port on which the Gearman server is listening.
+ ``port=4730`` (optional)
+
+**ssl_ca**
+ Optional: An openssl file containing a set of concatenated
+ “certification authority” certificates in PEM formet.
+
+**ssl_cert**
+ Optional: An openssl file containing the client public certificate in
+ PEM format.
+
+**ssl_key**
+ Optional: An openssl file containing the client private key in PEM format.
+
+zookeeper
+"""""""""
+
+.. NOTE: this is a white lie at this point, since only the scheduler
+ uses this, however, we expect other components to use it later, so
+ it's reasonable for admins to plan for this now.
+
+**hosts**
+ A list of zookeeper hosts for Zuul to use when communicating with
+ Nodepool. ``hosts=zk1.example.com,zk2.example.com,zk3.example.com``
+
+
+Scheduler
+---------
+
+The scheduler is the primary component of Zuul. The scheduler is not
+a scalable component; one, and only one, scheduler must be running at
+all times for Zuul to be operational. It receives events from any
+connections to remote systems which have been configured, enqueues
+items into pipelines, distributes jobs to executors, and reports
+results.
+
+Configuration
+~~~~~~~~~~~~~
+
+The following sections of **zuul.conf** are used by the scheduler:
+
+gearman_server
+""""""""""""""
+
+The builtin gearman server. Zuul can fork a gearman process from itself rather
+than connecting to an external one.
+
+**start**
+ Whether to start the internal Gearman server (default: False).
+ ``start=true``
+
+**listen_address**
+ IP address or domain name on which to listen (default: all addresses).
+ ``listen_address=127.0.0.1``
+
+**log_config**
+ Path to log config file for internal Gearman server.
+ ``log_config=/etc/zuul/gearman-logging.yaml``
+
+**ssl_ca**
+ Optional: An openssl file containing a set of concatenated “certification authority” certificates
+ in PEM formet.
+
+**ssl_cert**
+ Optional: An openssl file containing the server public certificate in PEM format.
+
+**ssl_key**
+ Optional: An openssl file containing the server private key in PEM format.
+
+webapp
+""""""
+
+**listen_address**
+ IP address or domain name on which to listen (default: 0.0.0.0).
+ ``listen_address=127.0.0.1``
+
+**port**
+ Port on which the webapp is listening (default: 8001).
+ ``port=8008``
+
+**status_expiry**
+ Zuul will cache the status.json file for this many seconds. This is an
+ optional value and ``1`` is used by default.
+ ``status_expiry=1``
+
+**status_url**
+ URL that will be posted in Zuul comments made to changes when
+ starting jobs for a change. Used by zuul-scheduler only.
+ ``status_url=https://zuul.example.com/status``
+
+scheduler
+"""""""""
+
+**tenant_config**
+ Path to tenant config file.
+ ``layout_config=/etc/zuul/tenant.yaml``
+
+**log_config**
+ Path to log config file.
+ ``log_config=/etc/zuul/scheduler-logging.yaml``
+
+**pidfile**
+ Path to PID lock file.
+ ``pidfile=/var/run/zuul/scheduler.pid``
+
+**state_dir**
+ Path to directory that Zuul should save state to.
+ ``state_dir=/var/lib/zuul``
+
+Operation
+~~~~~~~~~
+
+To start the scheduler, run ``zuul-scheduler``. To stop it, kill the
+PID which was saved in the pidfile specified in the configuration.
+
+Most of Zuul's configuration is automatically updated as changes to
+the repositories which contain it are merged. However, Zuul must be
+explicitly notified of changes to the tenant config file, since it is
+not read from a git repository. To do so, send the scheduler PID
+(saved in the pidfile specified in the configuration) a SIGHUP signal.
+
+Merger
+------
+
+Mergers are an optional Zuul service; they are not required for Zuul
+to operate, but some high volume sites may benefit from running them.
+Zuul performs quite a lot of git operations in the course of its work.
+Each change that is to be tested must be speculatively merged with the
+current state of its target branch to ensure that it can merge, and to
+ensure that the tests that Zuul perform accurately represent the
+outcome of merging the change. Because Zuul's configuration is stored
+in the git repos it interacts with, and is dynamically evaluated, Zuul
+often needs to perform a speculative merge in order to determine
+whether it needs to perform any further actions.
+
+All of these git operations add up, and while Zuul executors can also
+perform them, large numbers may impact their ability to run jobs.
+Therefore, administrators may wish to run standalone mergers in order
+to reduce the load on executors.
+
+Configuration
+~~~~~~~~~~~~~
+
+The following section of **zuul.conf** is used by the merger:
+
+merger
+""""""
+
+**git_dir**
+ Directory that Zuul should clone local git repositories to.
+ ``git_dir=/var/lib/zuul/git``
+
+**git_user_email**
+ Optional: Value to pass to `git config user.email`.
+ ``git_user_email=zuul@example.com``
+
+**git_user_name**
+ Optional: Value to pass to `git config user.name`.
+ ``git_user_name=zuul``
+
+**log_config**
+ Path to log config file for the merger process.
+ ``log_config=/etc/zuul/logging.yaml``
+
+**pidfile**
+ Path to PID lock file for the merger process.
+ ``pidfile=/var/run/zuul-merger/merger.pid``
+
+Operation
+~~~~~~~~~
+
+To start the merger, run ``zuul-merger``. To stop it, kill the
+PID which was saved in the pidfile specified in the configuration.
+
+Executor
+--------
+
+Executors are responsible for running jobs. At the start of each job,
+an executor prepares an environment in which to run Ansible which
+contains all of the git repositories specified by the job with all
+dependent changes merged into their appropriate branches. The branch
+corresponding to the proposed change will be checked out (in all
+projects, if it exists). Any roles specified by the job will also be
+present (also with dependent changes merged, if appropriate) and added
+to the Ansible role path. The executor also prepares an Ansible
+inventory file with all of the nodes requested by the job.
+
+The executor also contains a merger. This is used by the executor to
+prepare the git repositories used by jobs, but is also available to
+perform any tasks normally performed by standalone mergers. Because
+the executor performs both roles, small Zuul installations may not
+need to run standalone mergers.
+
+Trusted and Untrusted Playbooks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The executor runs playbooks in one of two execution contexts depending
+on whether the project containing the playbook is a *config project*
+or an *untrusted project*. If the playbook is in a *config project*,
+the executor runs the playbook in the *trusted* execution context,
+otherwise, it is run in the *untrusted* execution context.
+
+Both execution contexts use `bubblewrap`_ to create a namespace to
+ensure that playbook executions are isolated and are unable to access
+files outside of a restricted environment. The administrator may
+configure additional local directories on the executor to be made
+available to the restricted environment.
+
+The *trusted* execution context has access to all Ansible features,
+including the ability to load custom Ansible modules. Needless to
+say, extra scrutiny should be given to code that runs in a trusted
+context as it could be used to compromise other jobs running on the
+executor, or the executor itself, especially if the administrator has
+granted additional access through bubblewrap, or a method of escaping
+the restricted environment created by bubblewrap is found.
+
+Playbooks run in the *untrusted* execution context are not permitted
+to load additional Ansible modules or access files outside of the
+restricted environment prepared for them by the executor. In addition
+to the bubblewrap environment applied to both execution contexts, in
+the *untrusted* context some standard Ansible modules are replaced
+with versions which prohibit some actions, including attempts to
+access files outside of the restricted execution context. These
+redundant protections are made as part of a defense-in-depth strategy.
+
+.. _bubblewrap: https://github.com/projectatomic/bubblewrap
+
+Configuration
+~~~~~~~~~~~~~
+
+The following sections of **zuul.conf** are used by the executor:
+
+executor
+""""""""
+
+**finger_port**
+ Port to use for finger log streamer.
+ ``finger_port=79``
+
+**git_dir**
+ Directory that Zuul should clone local git repositories to.
+ ``git_dir=/var/lib/zuul/git``
+
+**log_config**
+ Path to log config file for the executor process.
+ ``log_config=/etc/zuul/logging.yaml``
+
+**private_key_file**
+ SSH private key file to be used when logging into worker nodes.
+ ``private_key_file=~/.ssh/id_rsa``
+
+**user**
+ User ID for the zuul-executor process. In normal operation as a daemon,
+ the executor should be started as the ``root`` user, but it will drop
+ privileges to this user during startup.
+ ``user=zuul``
+
+merger
+""""""
+
+**git_user_email**
+ Optional: Value to pass to `git config user.email`.
+ ``git_user_email=zuul@example.com``
+
+**git_user_name**
+ Optional: Value to pass to `git config user.name`.
+ ``git_user_name=zuul``
+
+Operation
+~~~~~~~~~
+
+To start the executor, run ``zuul-executor``.
+
+There are several commands which can be run to control the executor's
+behavior once it is running.
+
+To stop the executor immediately, aborting all jobs (they may be
+relaunched according to their retry policy), run ``zuul-executor
+stop``.
+
+To request that the executor stop executing new jobs and exit when all
+currently running jobs have completed, run ``zuul-executor graceful``.
+
+To enable or disable running Ansible in verbose mode (with the '-vvv'
+argument to ansible-playbook) run ``zuul-executor verbose`` and
+``zuul-executor unverbose``.
diff --git a/doc/source/admin/connections.rst b/doc/source/admin/connections.rst
new file mode 100644
index 0000000..5b40e5b
--- /dev/null
+++ b/doc/source/admin/connections.rst
@@ -0,0 +1,59 @@
+:title: Connection Configuration
+
+.. _connection-config:
+
+Connection Configuration
+========================
+
+Most of Zuul's configuration is contained in the git repositories upon
+which Zuul operates, however, some configuration outside of git
+repositories is still required to bootstrap the system. This includes
+information on connections between Zuul and other systems, as well as
+identifying the projects Zuul uses.
+
+.. _connections:
+
+Connections
+-----------
+
+In order to interact with external systems, Zuul must have a
+*connection* to that system configured. Zuul includes a number of
+drivers, each of which implements the functionality necessary to
+connect to a system. Each connection in Zuul is associated with a
+driver.
+
+To configure a connection in Zuul, select a unique name for the
+connection and add a section to **zuul.conf** with the form
+"[connection NAME]". For example, a connection to a gerrit server may
+appear as::
+
+ [connection mygerritserver]
+ driver=gerrit
+ server=review.example.com
+
+.. _drivers:
+
+Drivers
+-------
+
+Drivers may support any of the following functions:
+
+* Sources -- hosts git repositories for projects. Zuul can clone git
+ repos for projects and fetch refs.
+* Triggers -- emits events to which Zuul may respond. Triggers are
+ configured in pipelines to cause changes or other refs to be
+ enqueued.
+* Reporters -- outputs information when a pipeline is finished
+ processing an item.
+
+Zuul includes the following drivers:
+
+.. toctree::
+ :maxdepth: 2
+
+ drivers/gerrit
+ drivers/github
+ drivers/smtp
+ drivers/sql
+ drivers/timer
+ drivers/zuul
diff --git a/doc/source/admin/drivers/gerrit.rst b/doc/source/admin/drivers/gerrit.rst
new file mode 100644
index 0000000..29e136b
--- /dev/null
+++ b/doc/source/admin/drivers/gerrit.rst
@@ -0,0 +1,172 @@
+:title: Gerrit Driver
+
+Gerrit
+======
+
+`Gerrit`_ is a code review system. The Gerrit driver supports
+sources, triggers, and reporters.
+
+.. _Gerrit: https://www.gerritcodereview.com/
+
+Zuul will need access to a Gerrit user.
+
+Create an SSH keypair for Zuul to use if there isn't one already, and
+create a Gerrit user with that key::
+
+ cat ~/id_rsa.pub | ssh -p29418 review.example.com gerrit create-account --ssh-key - --full-name Zuul zuul
+
+Give that user whatever permissions will be needed on the projects you
+want Zuul to report on. For instance, you may want to grant
+``Verified +/-1`` and ``Submit`` to the user. Additional categories
+or values may be added to Gerrit. Zuul is very flexible and can take
+advantage of those.
+
+Connection Configuration
+------------------------
+
+The supported options in zuul.conf connections are:
+
+**driver=gerrit**
+
+**server**
+ FQDN of Gerrit server.
+ ``server=review.example.com``
+
+**canonical_hostname**
+ The canonical hostname associated with the git repos on the Gerrit
+ server. Defaults to the value of **server**. This is used to
+ identify projects from this connection by name and in preparing
+ repos on the filesystem for use by jobs. Note that Zuul will still
+ only communicate with the Gerrit server identified by **server**;
+ this option is useful if users customarily use a different hostname
+ to clone or pull git repos so that when Zuul places them in the
+ job's working directory, they appear under this directory name.
+ ``canonical_hostname=git.example.com``
+
+**port**
+ Optional: Gerrit server port.
+ ``port=29418``
+
+**baseurl**
+ Optional: path to Gerrit web interface. Defaults to ``https://<value
+ of server>/``. ``baseurl=https://review.example.com/review_site/``
+
+**user**
+ User name to use when logging into above server via ssh.
+ ``user=zuul``
+
+**sshkey**
+ Path to SSH key to use when logging into above server.
+ ``sshkey=/home/zuul/.ssh/id_rsa``
+
+**keepalive**
+ Optional: Keepalive timeout, 0 means no keepalive.
+ ``keepalive=60``
+
+Trigger Configuration
+---------------------
+
+Zuul works with standard versions of Gerrit by invoking the ``gerrit
+stream-events`` command over an SSH connection. It also reports back
+to Gerrit using SSH.
+
+If using Gerrit 2.7 or later, make sure the user is a member of a group
+that is granted the ``Stream Events`` permission, otherwise it will not
+be able to invoke the ``gerrit stream-events`` command over SSH.
+
+The supported pipeline trigger options are:
+
+**event**
+ The event name from gerrit. Examples: ``patchset-created``,
+ ``comment-added``, ``ref-updated``. This field is treated as a
+ regular expression.
+
+**branch**
+ The branch associated with the event. Example: ``master``. This
+ field is treated as a regular expression, and multiple branches may
+ be listed.
+
+**ref**
+ On ref-updated events, the branch parameter is not used, instead the
+ ref is provided. Currently Gerrit has the somewhat idiosyncratic
+ behavior of specifying bare refs for branch names (e.g.,
+ ``master``), but full ref names for other kinds of refs (e.g.,
+ ``refs/tags/foo``). Zuul matches what you put here exactly against
+ what Gerrit provides. This field is treated as a regular
+ expression, and multiple refs may be listed.
+
+**ignore-deletes**
+ When a branch is deleted, a ref-updated event is emitted with a
+ newrev of all zeros specified. The ``ignore-deletes`` field is a
+ boolean value that describes whether or not these newrevs trigger
+ ref-updated events. The default is True, which will not trigger
+ ref-updated events.
+
+**approval**
+ This is only used for ``comment-added`` events. It only matches if
+ the event has a matching approval associated with it. Example:
+ ``code-review: 2`` matches a ``+2`` vote on the code review
+ category. Multiple approvals may be listed.
+
+**email**
+ This is used for any event. It takes a regex applied on the
+ performer email, i.e. Gerrit account email address. If you want to
+ specify several email filters, you must use a YAML list. Make sure
+ to use non greedy matchers and to escapes dots! Example: ``email:
+ ^.*?@example\.org$``.
+
+**email_filter** (deprecated)
+ A deprecated alternate spelling of *email*. Only one of *email* or
+ *email_filter* should be used.
+
+**username**
+ This is used for any event. It takes a regex applied on the
+ performer username, i.e. Gerrit account name. If you want to
+ specify several username filters, you must use a YAML list. Make
+ sure to use non greedy matchers and to escapes dots! Example:
+ ``username: ^jenkins$``.
+
+**username_filter** (deprecated)
+ A deprecated alternate spelling of *username*. Only one of
+ *username* or *username_filter* should be used.
+
+**comment**
+ This is only used for ``comment-added`` events. It accepts a list
+ of regexes that are searched for in the comment string. If any of
+ these regexes matches a portion of the comment string the trigger is
+ matched. ``comment: retrigger`` will match when comments containing
+ 'retrigger' somewhere in the comment text are added to a change.
+
+**comment_filter** (deprecated)
+ A deprecated alternate spelling of *comment*. Only one of *comment*
+ or *comment_filter* should be used.
+
+**require-approval**
+ This may be used for any event. It requires that a certain kind of
+ approval be present for the current patchset of the change (the
+ approval could be added by the event in question). It follows the
+ same syntax as the :ref:`"approval" pipeline requirement
+ <pipeline-require-approval>`. For each specified criteria there must
+ exist a matching approval.
+
+**reject-approval**
+ This takes a list of approvals in the same format as
+ *require-approval* but will fail to enter the pipeline if there is a
+ matching approval.
+
+Reporter Configuration
+----------------------
+
+Zuul works with standard versions of Gerrit by invoking the
+``gerrit`` command over an SSH connection. It reports back to
+Gerrit using SSH.
+
+The dictionary passed to the Gerrit reporter is used for ``gerrit
+review`` arguments, with the boolean value of ``true`` simply
+indicating that the argument should be present without following it
+with a value. For example, ``verified: 1`` becomes ``gerrit review
+--verified 1`` and ``submit: true`` becomes ``gerrit review
+--submit``.
+
+A :ref:`connection<connections>` that uses the gerrit driver must be
+supplied to the trigger.
diff --git a/doc/source/admin/drivers/github.rst b/doc/source/admin/drivers/github.rst
new file mode 100644
index 0000000..9740292
--- /dev/null
+++ b/doc/source/admin/drivers/github.rst
@@ -0,0 +1,176 @@
+:title: GitHub Driver
+
+GitHub
+======
+
+The GitHub driver supports sources, triggers, and reporters. It can
+interact with the public GitHub service as well as site-local
+installations of GitHub enterprise.
+
+.. TODO: make this section more user friendly
+
+Configure GitHub `webhook events
+<https://developer.github.com/webhooks/creating/>`_.
+
+Set *Payload URL* to
+``http://<zuul-hostname>/connection/<connection-name>/payload``.
+
+Set *Content Type* to ``application/json``.
+
+Select *Events* you are interested in. See below for the supported events.
+
+Connection Configuration
+------------------------
+
+The supported options in zuul.conf connections are:
+
+**driver=github**
+
+**api_token**
+ API token for accessing GitHub.
+ See `Creating an access token for command-line use
+ <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_.
+
+**webhook_token**
+ Optional: Token for validating the webhook event payloads.
+ If not specified, payloads are not validated.
+ See `Securing your webhooks
+ <https://developer.github.com/webhooks/securing/>`_.
+
+**sshkey**
+ Path to SSH key to use when cloning github repositories.
+ ``sshkey=/home/zuul/.ssh/id_rsa``
+
+**server**
+ Optional: Hostname of the github install (such as a GitHub Enterprise)
+ If not specified, defaults to ``github.com``
+ ``server=github.myenterprise.com``
+
+**canonical_hostname**
+ The canonical hostname associated with the git repos on the GitHub
+ server. Defaults to the value of **server**. This is used to
+ identify projects from this connection by name and in preparing
+ repos on the filesystem for use by jobs. Note that Zuul will still
+ only communicate with the GitHub server identified by **server**;
+ this option is useful if users customarily use a different hostname
+ to clone or pull git repos so that when Zuul places them in the
+ job's working directory, they appear under this directory name.
+ ``canonical_hostname=git.example.com``
+
+Trigger Configuration
+---------------------
+GitHub webhook events can be configured as triggers.
+
+A connection name with the github driver can take multiple events with the
+following options.
+
+**event**
+ The event from github. Supported events are ``pull_request``,
+ ``pull_request_review``, and ``push``.
+
+ A ``pull_request`` event will have associated action(s) to trigger
+ from. The supported actions are:
+
+ *opened* - pull request opened
+
+ *changed* - pull request synchronized
+
+ *closed* - pull request closed
+
+ *reopened* - pull request reopened
+
+ *comment* - comment added on pull request
+
+ *labeled* - label added on pull request
+
+ *unlabeled* - label removed from pull request
+
+ *status* - status set on commit
+
+ A ``pull_request_review`` event will
+ have associated action(s) to trigger from. The supported actions are:
+
+ *submitted* - pull request review added
+
+ *dismissed* - pull request review removed
+
+**branch**
+ The branch associated with the event. Example: ``master``. This
+ field is treated as a regular expression, and multiple branches may
+ be listed. Used for ``pull_request`` and ``pull_request_review``
+ events.
+
+**comment**
+ This is only used for ``pull_request`` ``comment`` actions. It
+ accepts a list of regexes that are searched for in the comment
+ string. If any of these regexes matches a portion of the comment
+ string the trigger is matched. ``comment: retrigger`` will match
+ when comments containing 'retrigger' somewhere in the comment text
+ are added to a pull request.
+
+**label**
+ This is only used for ``labeled`` and ``unlabeled`` ``pull_request``
+ actions. It accepts a list of strings each of which matches the
+ label name in the event literally. ``label: recheck`` will match a
+ ``labeled`` action when pull request is labeled with a ``recheck``
+ label. ``label: 'do not test'`` will match a ``unlabeled`` action
+ when a label with name ``do not test`` is removed from the pull
+ request.
+
+**state**
+ This is only used for ``pull_request_review`` events. It accepts a
+ list of strings each of which is matched to the review state, which
+ can be one of ``approved``, ``comment``, or ``request_changes``.
+
+**status**
+ This is used for ``pull-request`` and ``status`` actions. It accepts
+ a list of strings each of which matches the user setting the status,
+ the status context, and the status itself in the format of
+ ``user:context:status``. For example,
+ ``zuul_github_ci_bot:check_pipeline:success``.
+
+**ref**
+ This is only used for ``push`` events. This field is treated as a
+ regular expression and multiple refs may be listed. GitHub always
+ sends full ref name, eg. ``refs/tags/bar`` and this string is
+ matched against the regexp.
+
+Reporter Configuration
+----------------------
+Zuul reports back to GitHub via GitHub API. Available reports include a PR
+comment containing the build results, a commit status on start, success and
+failure, an issue label addition/removal on the PR, and a merge of the PR
+itself. Status name, description, and context is taken from the pipeline.
+
+A :ref:`connection<connections>` that uses the github driver must be
+supplied to the reporter. It has the following options:
+
+**status**
+ String value (``pending``, ``success``, ``failure``) that the
+ reporter should set as the commit status on github. ``status:
+ 'success'``
+
+**status-url**
+ String value for a link url to set in the github status. Defaults to
+ the zuul server status_url, or the empty string if that is unset.
+
+**comment**
+ Boolean value (``true`` or ``false``) that determines if the
+ reporter should add a comment to the pipeline status to the github
+ pull request. Defaults to ``true``. Only used for Pull Request based
+ events. ``comment: false``
+
+**merge**
+ Boolean value (``true`` or ``false``) that determines if the
+ reporter should merge the pull reqeust. Defaults to ``false``. Only
+ used for Pull Request based events. ``merge=true``
+
+**label**
+ List of strings each representing an exact label name which should
+ be added to the pull request by reporter. Only used for Pull Request
+ based events. ``label: 'test successful'``
+
+**unlabel**
+ List of strings each representing an exact label name which should
+ be removed from the pull request by reporter. Only used for Pull
+ Request based events. ``unlabel: 'test failed'``
diff --git a/doc/source/admin/drivers/smtp.rst b/doc/source/admin/drivers/smtp.rst
new file mode 100644
index 0000000..6f24355
--- /dev/null
+++ b/doc/source/admin/drivers/smtp.rst
@@ -0,0 +1,53 @@
+:title: SMTP Driver
+
+SMTP
+====
+
+The SMTP driver supports reporters only. It is used to send email
+when items report.
+
+Connection Configuration
+------------------------
+
+**driver=smtp**
+
+**server**
+ SMTP server hostname or address to use.
+ ``server=localhost``
+
+**port**
+ Optional: SMTP server port.
+ ``port=25``
+
+**default_from**
+ Who the email should appear to be sent from when emailing the report.
+ This can be overridden by individual pipelines.
+ ``default_from=zuul@example.com``
+
+**default_to**
+ Who the report should be emailed to by default.
+ This can be overridden by individual pipelines.
+ ``default_to=you@example.com``
+
+Reporter Configuration
+----------------------
+
+A simple email reporter is also available.
+
+A :ref:`connection<connections>` that uses the smtp driver must be supplied to the
+reporter. The connection also may specify a default *To* or *From*
+address.
+
+Each pipeline can overwrite the ``subject`` or the ``to`` or ``from`` address by
+providing alternatives as arguments to the reporter. For example, ::
+
+ - pipeline:
+ name: post-merge
+ success:
+ outgoing_smtp:
+ to: you@example.com
+ failure:
+ internal_smtp:
+ to: you@example.com
+ from: alternative@example.com
+ subject: Change {change} failed
diff --git a/doc/source/admin/drivers/sql.rst b/doc/source/admin/drivers/sql.rst
new file mode 100644
index 0000000..b890f08
--- /dev/null
+++ b/doc/source/admin/drivers/sql.rst
@@ -0,0 +1,44 @@
+:title: SQL Driver
+
+SQL
+===
+
+The SQL driver supports reporters only. Only one connection per
+database is permitted. The connection options are:
+
+**driver=sql**
+
+**dburi**
+ Database connection information in the form of a URI understood by
+ sqlalchemy. eg http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#database-urls
+ ``dburi=mysql://user:pass@localhost/db``
+
+Reporter Configuration
+----------------------
+
+This reporter is used to store results in a database.
+
+A :ref:`connection<connections>` that uses the sql driver must be
+supplied to the reporter.
+
+zuul.conf contains the database connection and credentials. To store different
+reports in different databases you'll need to create a new connection per
+database.
+
+The SQL reporter does nothing on "start" or "merge-failure"; it only
+acts on "success" or "failure" reporting stages.
+
+**score**
+ A score to store for the result of the build. eg: -1 might indicate a failed
+ build.
+
+For example ::
+
+ - pipeline:
+ name: post-merge
+ success:
+ mydb_conn:
+ score: 1
+ failure:
+ mydb_conn:
+ score: -1
diff --git a/doc/source/admin/drivers/timer.rst b/doc/source/admin/drivers/timer.rst
new file mode 100644
index 0000000..c70df5c
--- /dev/null
+++ b/doc/source/admin/drivers/timer.rst
@@ -0,0 +1,24 @@
+:title: Timer Driver
+
+Timer
+=====
+
+The timer driver supports triggers only. It is used for configuring
+pipelines so that jobs run at scheduled times. No connection
+configuration is required.
+
+Trgger Configuration
+--------------------
+
+Timers don't require a special connection or driver. Instead they can
+simply be used by listing **timer** as the trigger.
+
+This trigger will run based on a cron-style time specification.
+It will enqueue an event into its pipeline for every project
+defined in the configuration. Any job associated with the
+pipeline will run in response to that event.
+
+**time**
+ The time specification in cron syntax. Only the 5 part syntax is
+ supported, not the symbolic names. Example: ``0 0 * * *`` runs at
+ midnight.
diff --git a/doc/source/admin/drivers/zuul.rst b/doc/source/admin/drivers/zuul.rst
new file mode 100644
index 0000000..a23c875
--- /dev/null
+++ b/doc/source/admin/drivers/zuul.rst
@@ -0,0 +1,40 @@
+:title: Zuul Driver
+
+Zuul
+====
+
+The Zuul driver supports triggers only. It is used for triggering
+pipelines based on internal Zuul events.
+
+Trigger Configuration
+---------------------
+
+Zuul events don't require a special connection or driver. Instead they
+can simply be used by listing **zuul** as the trigger.
+
+**event**
+ The event name. Currently supported:
+
+ *project-change-merged* when Zuul merges a change to a project, it
+ generates this event for every open change in the project.
+
+ *parent-change-enqueued* when Zuul enqueues a change into any
+ pipeline, it generates this event for every child of that
+ change.
+
+**pipeline**
+ Only available for ``parent-change-enqueued`` events. This is the
+ name of the pipeline in which the parent change was enqueued.
+
+**require-approval**
+ This may be used for any event. It requires that a certain kind of
+ approval be present for the current patchset of the change (the
+ approval could be added by the event in question). It follows the
+ same syntax as the :ref:`"approval" pipeline requirement
+ <pipeline-require-approval>`. For each specified criteria there must
+ exist a matching approval.
+
+**reject-approval**
+ This takes a list of approvals in the same format as
+ *require-approval* but will fail to enter the pipeline if there is a
+ matching approval.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
new file mode 100644
index 0000000..a2a2ee7
--- /dev/null
+++ b/doc/source/admin/index.rst
@@ -0,0 +1,20 @@
+Administrator's Guide
+=====================
+
+This guide is intended for administrators of Zuul systems. It covers
+installation, operation, and the portion of Zuul configuration that
+occurs outside of the projects upon which Zuul operates. Advanced
+users may be interested in some of the concepts described here, as
+well as understanding what features the underlying configuration
+provides to in-project configuration.
+
+.. toctree::
+ :maxdepth: 2
+
+ quick-start
+ installation
+ components
+ connections
+ tenants
+ monitoring
+ client
diff --git a/doc/source/admin/installation.rst b/doc/source/admin/installation.rst
new file mode 100644
index 0000000..bc61f7e
--- /dev/null
+++ b/doc/source/admin/installation.rst
@@ -0,0 +1,69 @@
+Installation
+============
+
+Install Zuul
+------------
+
+To install a Zuul release from PyPI, run::
+
+ pip install zuul
+
+Or from a git checkout, run::
+
+ pip install .
+
+That will also install Zuul's python dependencies. To minimize
+interaction with other python packages installed on a system, you may
+wish to install Zuul within a Python virtualenv.
+
+Zuul has several system-level dependencies as well. You can find a
+list of operating system packages in `bindep.txt` in Zuul's source
+directory.
+
+External Dependencies
+---------------------
+
+Zuul interacts with several other systems described below.
+
+Gearman
+~~~~~~~
+
+Gearman is a job distribution system that Zuul uses to communicate
+with its distributed components. The Zuul scheduler distributes work
+to Zuul mergers and executors use Gearman. You may supply your own
+gearman server, but the Zuul scheduler includes a built-in server
+which is recommended. Ensure that all Zuul hosts can communicate with
+the gearman server.
+
+Zuul distributes secrets to executors via gearman, so be sure to
+secure it with TLS and certificate authentication. Obtain (or
+generate) a certificate for both the server and the clients (they may
+use the same certificate or have individual certificates). They must
+be signed by a CA, but it can be your own CA.
+
+Nodepool
+~~~~~~~~
+
+In order to run all but the simplest jobs, Zuul uses a companion
+program, Nodepool, to supply the nodes (whether dynamic cloud
+instances or static hardware) used by jobs. Before starting Zuul,
+ensure you have Nodepool installed and any images you require built.
+Zuul only makes one requirement of these nodes: that it be able to log
+in given a username and ssh private key.
+
+.. TODO: SpamapS any zookeeper config recommendations?
+
+Nodepool uses Zookeeper to communicate internally among its
+components, and also to communicate with Zuul. You can run a simple
+single-node Zookeeper instance, or a multi-node cluster. Ensure that
+The host running the Zuul scheduler has access to the cluster.
+
+Ansible
+~~~~~~~
+
+Zuul uses Ansible to run jobs. Each version of Zuul is designed to
+work with a specific, contemporary version of Ansible. Zuul specifies
+that version of Ansible in its python package metadata, and normally
+the correct version will be installed automatically with Zuul.
+Because of the close integration of Zuul and Ansible, attempting to
+use other versions of Ansible with Zuul is not recommended.
diff --git a/doc/source/statsd.rst b/doc/source/admin/monitoring.rst
similarity index 76%
rename from doc/source/statsd.rst
rename to doc/source/admin/monitoring.rst
index fb6989e..2a6c959 100644
--- a/doc/source/statsd.rst
+++ b/doc/source/admin/monitoring.rst
@@ -1,15 +1,17 @@
-:title: Statsd reporting
+:title: Monitoring
+
+Monitoring
+==========
Statsd reporting
-================
+----------------
Zuul comes with support for the statsd protocol, when enabled and configured
(see below), the Zuul scheduler will emit raw metrics to a statsd receiver
-which let you in turn generate nice graphics. An example is OpenStack Zuul
-status page: http://status.openstack.org/zuul/
+which let you in turn generate nice graphics.
Configuration
--------------
+~~~~~~~~~~~~~
Statsd support uses the statsd python module. Note that Zuul will start without
the statsd python module, so an existing Zuul installation may be missing it.
@@ -27,17 +29,16 @@
STATSD_PORT=8125
Metrics
--------
+~~~~~~~
The metrics are emitted by the Zuul scheduler (`zuul/scheduler.py`):
**gerrit.event.<type> (counters)**
- Gerrit emits different kind of message over its `stream-events` interface. As
- a convenience, Zuul emits metrics to statsd which save you from having to use
- a different daemon to measure Gerrit events.
- The Gerrit events have different types defined by Gerrit itself, Zuul will
- relay any type of event reusing the name defined by Gerrit. Some of the
- events emitted are:
+ Gerrit emits different kind of message over its `stream-events`
+ interface. Zuul will report counters for each type of event it
+ receives from Gerrit.
+
+ Some of the events emitted are:
* patchset-created
* draft-published
@@ -52,18 +53,6 @@
Refer to your Gerrit installation documentation for an exhaustive list of
Gerrit event types.
-**zuul.node_type.**
- Holds metrics specifc to build nodes per label. The hierarchy is:
-
- #. **<build node label>** each of the labels associated to a build in
- Jenkins. It contains:
-
- #. **job.<jobname>** subtree detailing per job statistics:
-
- #. **wait_time** counter and timer of the wait time, with the
- difference of the job start time and the execute time, in
- milliseconds.
-
**zuul.pipeline.**
Holds metrics specific to jobs. The hierarchy is:
diff --git a/doc/source/admin/quick-start.rst b/doc/source/admin/quick-start.rst
new file mode 100644
index 0000000..9993775
--- /dev/null
+++ b/doc/source/admin/quick-start.rst
@@ -0,0 +1,120 @@
+Quick Start Guide
+=================
+
+This provides a very simple overview of Zuul. It is recommended to
+read the following sections for more details.
+
+Install Zuul
+------------
+
+You can get zuul from pypi via::
+
+ pip install zuul
+
+Zuul Components
+---------------
+
+Zuul provides the following components:
+
+ - **zuul-scheduler**: The main Zuul process. Handles receiving
+ events, executing jobs, collecting results and posting reports.
+ Coordinates the work of the other components.
+
+ - **zuul-merger**: Scale-out component that performs git merge
+ operations. Zuul performs a large number of git operations in
+ the course of its work. Adding merger processes can help speed
+ Zuul's processing. This component is optional (zero or more of
+ these can be run).
+
+ - **zuul-executor**: Scale-out component for executing jobs. At
+ least one of these is required. Depending on system
+ configuration, you can expect a single executor to handle up to
+ about 100 simultaneous jobs. Can handle the functions of a
+ merger if dedicated mergers are not provided. One or more of
+ these must be run.
+
+ - **gearman**: optional builtin gearman daemon provided by zuul-scheduler
+
+External components:
+
+ - **gearman**: A gearman daemon if the built-in daemon is not used.
+
+ - **zookeeper**: A zookeeper cluster (or single host) for
+ communicating with Nodepool.
+
+ - **nodepool**: Provides nodes for Zuul to use when executing jobs.
+
+
+Zuul Setup
+----------
+
+At minimum you need to provide **zuul.conf** and **main.yaml** placed
+in **/etc/zuul/**. The following example uses the builtin gearman
+service in Zuul, and a connection to Gerrit.
+
+**zuul.conf**::
+
+ [scheduler]
+ tenant_config=/etc/zuul/main.yaml
+
+ [gearman_server]
+ start=true
+
+ [gearman]
+ server=127.0.0.1
+
+ [connection gerrit]
+ driver=gerrit
+ server=git.example.com
+ port=29418
+ baseurl=https://git.example.com/gerrit/
+ user=zuul
+ sshkey=/home/zuul/.ssh/id_rsa
+
+See :ref:`components` and :ref:`connections` for more details.
+
+The following tells Zuul to read its configuration from and operate on
+the *example-project* project:
+
+**main.yaml**::
+
+ - tenant:
+ name: example-tenant
+ source:
+ gerrit:
+ untrusted-projects:
+ - example-project
+
+Starting Zuul
+-------------
+
+You can run any zuul process with the **-d** option to make it not
+daemonize. It's a good idea at first to confirm there's no issues with
+your configuration.
+
+To start, simply run::
+
+ zuul-scheduler
+
+Once run you should have two zuul-scheduler processes (if using the
+built-in gearman server, or one process otherwise).
+
+Before Zuul can run any jobs, it needs to load its configuration, most
+of which is in the git repositories that Zuul operates on. Start an
+executor to allow zuul to do that::
+
+ zuul-executor
+
+Zuul should now be able to read its configuration from the configured
+repo and process any jobs defined therein.
+
+Troubleshooting
+---------------
+
+You can use telnet to connect to gearman to check which Zuul
+components are online::
+
+ telnet <gearman_ip> 4730
+
+Useful commands are **workers** and **status** which you can run by just
+typing those commands once connected to gearman.
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
new file mode 100644
index 0000000..8872397
--- /dev/null
+++ b/doc/source/admin/tenants.rst
@@ -0,0 +1,95 @@
+:title: Tenant Configuration
+
+.. _tenant-config:
+
+Tenant Configuration
+====================
+
+After *zuul.conf* is configured, Zuul component servers will be able
+to start, but a tenant configuration is required in order for Zuul to
+perform any actions. The tenant configuration file specifies upon
+which projects Zuul should operate. These repositories are
+grouped into tenants. The configuration of each tenant is separate
+from the rest (no pipelines, jobs, etc are shared between them).
+
+A project may appear in more than one tenant; this may be useful if
+you wish to use common job definitions across multiple tenants.
+
+The tenant configuration file is specified by the *tenant_config*
+setting in the *scheduler* section of *zuul.yaml*. It is a YAML file
+which, like other Zuul configuration files, is a list of configuration
+objects, though only one type of object is supported, *tenant*.
+
+Tenant
+------
+
+A tenant is a collection of projects which share a Zuul
+configuration. An example tenant definition is::
+
+ - tenant:
+ name: my-tenant
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ - shared-jobs:
+ include: jobs
+ untrusted-projects:
+ - project1
+ - project2
+
+The following attributes are supported:
+
+**name** (required)
+ The name of the tenant. This may appear in URLs, paths, and
+ monitoring fields, and so should be restricted to URL friendly
+ characters (ASCII letters, numbers, hyphen and underscore) and you
+ should avoid changing it unless necessary.
+
+**source** (required)
+ A dictionary of sources to consult for projects. A tenant may
+ contain projects from multiple sources; each of those sources must
+ be listed here, along with the projects it supports. The name of a
+ :ref:`connection<connections>` is used as the dictionary key
+ (e.g. `gerrit` in the example above), and the value is a further
+ dictionary containing the keys below.
+
+ **config-projects**
+ A list of projects to be treated as config projects in this
+ tenant. The jobs in a config project are trusted, which means
+ they run with extra privileges, do not have their configuration
+ dynamically loaded for proposed changes, and zuul.yaml files are
+ only searched for in the master branch.
+
+ **untrusted-projects**
+ A list of projects to be treated as untrusted in this tenant. An
+ untrusted project is the typical project operated on by Zuul.
+ Their jobs run in a more restrictive environment, they may not
+ define pipelines, their configuration dynamically changes in
+ response to proposed changes, Zuul will read configuration files
+ in all of their branches.
+
+ Each of the projects listed may be either a simple string value, or
+ it may be a dictionary with the following keys:
+
+ **include**
+ Normally Zuul will load all of the configuration classes
+ appropriate for the type of project (config or untrusted) in
+ question. However, if you only want to load some items, the
+ *include* attribute can be used to specify that *only* the
+ specified classes should be loaded. Supplied as a string, or a
+ list of strings.
+
+ **exclude**
+ A list of configuration classes that should not be loaded.
+
+ The order of the projects listed in a tenant is important. A job
+ which is defined in one project may not be redefined in another
+ project; therefore, once a job appears in one project, a project
+ listed later will be unable to define a job with that name.
+ Further, some aspects of project configuration (such as the merge
+ mode) may only be set on the first appearance of a project
+ definition.
+
+ Zuul loads the configuration from all *config-projects* in the order
+ listed, followed by all *trusted-projects* in order.
diff --git a/doc/source/cloner.rst b/doc/source/cloner.rst
deleted file mode 100644
index 70577cc..0000000
--- a/doc/source/cloner.rst
+++ /dev/null
@@ -1,110 +0,0 @@
-:title: Zuul Cloner
-
-Zuul Cloner
-===========
-
-Zuul includes a simple command line client that may be used to clone
-repositories with Zuul references applied.
-
-Configuration
--------------
-
-Clone map
-'''''''''
-
-By default, Zuul cloner will clone the project under ``basepath`` which
-would create sub directories whenever a project name contains slashes. Since
-you might want to finely tweak the final destination, a clone map lets you
-change the destination on a per project basis. The configuration is done using
-a YAML file passed with ``-m``.
-
-With a project hierarchy such as::
-
- project
- thirdparty/plugins/plugin1
-
-You might want to get ``project`` straight in the base path, the clone map would be::
-
- clonemap:
- - name: 'project'
- dest: '.'
-
-Then to strip out ``thirdparty`` such that the plugins land under the
-``/plugins`` directory of the basepath, you can use regex and capturing
-groups::
-
- clonemap:
- - name: 'project'
- dest: '.'
- - name: 'thirdparty/(plugins/.*)'
- dest: '\1'
-
-The resulting workspace will contains::
-
- project -> ./
- thirdparty/plugins/plugin1 -> ./plugins/plugin1
-
-
-Zuul parameters
-'''''''''''''''
-
-The Zuul cloner reuses Zuul parameters such as ZUUL_BRANCH, ZUUL_REF or
-ZUUL_PROJECT. It will attempt to load them from the environment variables or
-you can pass them as parameters (in which case it will override the
-environment variable if it is set). The matching command line parameters use
-the ``zuul`` prefix hence ZUUL_REF can be passed to the cloner using
-``--zuul-ref``.
-
-Usage
------
-The general options that apply are:
-
-.. program-output:: zuul-cloner --help
-
-
-Ref lookup order
-''''''''''''''''
-
-The Zuul cloner will attempt to lookup references in this order:
-
- 1) Zuul reference for the indicated branch
- 2) Zuul reference for the master branch
- 3) The tip of the indicated branch
- 4) The tip of the master branch
-
-The "indicated branch" is one of the following:
-
- A) The project-specific override branch (from project_branches arg)
- B) The user specified branch (from the branch arg)
- C) ZUUL_BRANCH (from the zuul_branch arg)
-
-Clone order
------------
-
-When cloning repositories, the destination folder should not exist or
-``git clone`` will complain. This happens whenever cloning a sub project
-before its parent project. For example::
-
- zuul-cloner project/plugins/plugin1 project
-
-Will create the directory ``project`` when cloning the plugin. The
-cloner processes the clones in the order supplied, so you should swap the
-projects::
-
- zuul-cloner project project/plugins/plugin1
-
-Cached repositories
--------------------
-
-The ``--cache-dir`` option can be used to reduce network traffic by
-cloning from a local repository which may not be up to date.
-
-If the ``--cache-dir`` option is supplied, zuul-cloner will start by
-cloning any projects it processes from those found in that directory.
-The URL of origin remote of the resulting clone will be reset to use
-the ``git_base_url`` and then the remote will be updated so that the
-repository has all the information in the upstream repository.
-
-The default for ``--cache-dir`` is taken from the environment variable
-``ZUUL_CACHE_DIR``. A value provided explicitly on the command line
-overrides the environment variable setting.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index f8ae368..71c7697 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -28,7 +28,8 @@
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.blockdiag',
- 'sphinxcontrib.programoutput'
+ 'sphinxcontrib.programoutput',
+ 'oslosphinx'
]
#extensions = ['sphinx.ext.intersphinx']
#intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
diff --git a/doc/source/connections.rst b/doc/source/connections.rst
deleted file mode 100644
index 120d529..0000000
--- a/doc/source/connections.rst
+++ /dev/null
@@ -1,128 +0,0 @@
-:title: Connections
-
-.. _connections:
-
-Connections
-===========
-
-zuul coordinates talking to multiple different systems via the concept
-of connections. A connection is listed in the :ref:`zuulconf` file and is
-then referred to from the :ref:`layoutyaml`. This makes it possible to
-receive events from gerrit via one connection and post results from another
-connection that may report back as a different user.
-
-Gerrit
-------
-
-Create a connection with gerrit.
-
-**driver=gerrit**
-
-**server**
- FQDN of Gerrit server.
- ``server=review.example.com``
-
-**canonical_hostname**
- The canonical hostname associated with the git repos on the Gerrit
- server. Defaults to the value of **server**. This is used to
- identify repos from this connection by name and in preparing repos
- on the filesystem for use by jobs.
- ``canonical_hostname=git.example.com``
-
-**port**
- Optional: Gerrit server port.
- ``port=29418``
-
-**baseurl**
- Optional: path to Gerrit web interface. Defaults to ``https://<value
- of server>/``. ``baseurl=https://review.example.com/review_site/``
-
-**user**
- User name to use when logging into above server via ssh.
- ``user=zuul``
-
-**sshkey**
- Path to SSH key to use when logging into above server.
- ``sshkey=/home/zuul/.ssh/id_rsa``
-
-**keepalive**
- Optional: Keepalive timeout, 0 means no keepalive.
- ``keepalive=60``
-
-Gerrit Configuration
-~~~~~~~~~~~~~~~~~~~~
-
-Zuul will need access to a Gerrit user.
-
-Create an SSH keypair for Zuul to use if there isn't one already, and
-create a Gerrit user with that key::
-
- cat ~/id_rsa.pub | ssh -p29418 gerrit.example.com gerrit create-account --ssh-key - --full-name Jenkins jenkins
-
-Give that user whatever permissions will be needed on the projects you
-want Zuul to gate. For instance, you may want to grant ``Verified
-+/-1`` and ``Submit`` to the user. Additional categories or values may
-be added to Gerrit. Zuul is very flexible and can take advantage of
-those.
-
-GitHub
-------
-
-Create a connection with GitHub.
-
-**driver=github**
-
-**api_token**
- API token for accessing GitHub.
- See `Creating an access token for command-line use
- <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_.
-
-**webhook_token**
- Optional: Token for validating the webhook event payloads.
- If not specified, payloads are not validated.
- See `Securing your webhooks
- <https://developer.github.com/webhooks/securing/>`_.
-
-**sshkey**
- Path to SSH key to use when cloning github repositories.
- ``sshkey=/home/zuul/.ssh/id_rsa``
-
-**git_host**
- Optional: Hostname of the github install (such as a GitHub Enterprise)
- If not specified, defaults to ``github.com``
- ``git_host=github.myenterprise.com``
-
-SMTP
-----
-
-**driver=smtp**
-
-**server**
- SMTP server hostname or address to use.
- ``server=localhost``
-
-**port**
- Optional: SMTP server port.
- ``port=25``
-
-**default_from**
- Who the email should appear to be sent from when emailing the report.
- This can be overridden by individual pipelines.
- ``default_from=zuul@example.com``
-
-**default_to**
- Who the report should be emailed to by default.
- This can be overridden by individual pipelines.
- ``default_to=you@example.com``
-
-SQL
-----
-
- Only one connection per a database is permitted.
-
- **driver=sql**
-
- **dburi**
- Database connection information in the form of a URI understood by
- sqlalchemy. eg http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#database-urls
- ``dburi=mysql://user:pass@localhost/db``
diff --git a/doc/source/executors.rst b/doc/source/executors.rst
deleted file mode 100644
index 5f85f58..0000000
--- a/doc/source/executors.rst
+++ /dev/null
@@ -1,332 +0,0 @@
-:title: Executors
-
-.. _Gearman: http://gearman.org/
-
-.. _`Gearman Plugin`:
- https://wiki.jenkins-ci.org/display/JENKINS/Gearman+Plugin
-
-.. _`Turbo-Hipster`:
- https://git.openstack.org/cgit/openstack/turbo-hipster/
-
-.. _`Turbo-Hipster Documentation`:
- http://turbo-hipster.rtfd.org/
-
-.. _executors:
-
-Executors
-=========
-
-Zuul has a modular architecture for executing jobs. Currently, the
-only supported module interfaces with Gearman_. This design allows
-any system to run jobs for Zuul simply by interfacing with a Gearman
-server. The recommended way of integrating a new job-runner with Zuul
-is via this method.
-
-If Gearman is unsuitable, Zuul may be extended with a new executor
-module. Zuul makes very few assumptions about the interface to a
-executor -- if it can trigger jobs, cancel them, and receive success
-or failure reports, it should be able to be used with Zuul. Patches
-to this effect are welcome.
-
-Zuul Parameters
----------------
-
-Zuul will pass some parameters with every job it executes. These are
-for workers to be able to get the repositories into the state they are
-intended to be tested in. Builds can be triggered either by an action
-on a change or by a reference update. Both events share a common set
-of parameters and more specific parameters as follows:
-
-Common parameters
-~~~~~~~~~~~~~~~~~
-
-**ZUUL_UUID**
- Zuul provided key to link builds with Gerrit events.
-**ZUUL_REF**
- Zuul provided ref that includes commit(s) to build.
-**ZUUL_COMMIT**
- The commit SHA1 at the head of ZUUL_REF.
-**ZUUL_PROJECT**
- The project that triggered this build.
-**ZUUL_PIPELINE**
- The Zuul pipeline that is building this job.
-**ZUUL_URL**
- The URL for the zuul server as configured in zuul.conf.
- A test runner may use this URL as the basis for fetching
- git commits.
-**BASE_LOG_PATH**
- zuul suggests a path to store and address logs. This is deterministic
- and hence useful for where you want workers to upload to a specific
- destination or need them to have a specific final URL you can link to
- in advanced. For changes it is:
- "last-two-digits-of-change/change-number/patchset-number".
- For reference updates it is: "first-two-digits-of-newrev/newrev"
-**LOG_PATH**
- zuul also suggests a unique path for logs to the worker. This is
- "BASE_LOG_PATH/pipeline-name/job-name/uuid"
-**ZUUL_VOTING**
- Whether Zuul considers this job voting or not. Note that if Zuul is
- reconfigured during the run, the voting status of a job may change
- and this value will be out of date. Values are '1' if voting, '0'
- otherwise.
-
-Change related parameters
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following additional parameters will only be provided for builds
-associated with changes (i.e., in response to patchset-created or
-comment-added events):
-
-**ZUUL_BRANCH**
- The target branch for the change that triggered this build.
-**ZUUL_CHANGE**
- The Gerrit change ID for the change that triggered this build.
-**ZUUL_CHANGES**
- A caret character separated list of the changes upon which this build
- is dependent upon in the form of a colon character separated list
- consisting of project name, target branch, and revision ref.
-**ZUUL_CHANGE_IDS**
- All of the Gerrit change IDs that are included in this build (useful
- when the DependentPipelineManager combines changes for testing).
-**ZUUL_PATCHSET**
- The Gerrit patchset number for the change that triggered this build.
-
-Reference updated parameters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following additional parameters will only be provided for
-post-merge (ref-updated) builds:
-
-**ZUUL_OLDREV**
- The SHA1 of the old revision at this ref (recall the ref name is
- in ZUUL_REF).
-**ZUUL_NEWREV**
- The SHA1 of the new revision at this ref (recall the ref name is
- in ZUUL_REF).
-**ZUUL_SHORT_OLDREV**
- The shortened (7 character) SHA1 of the old revision.
-**ZUUL_SHORT_NEWREV**
- The shortened (7 character) SHA1 of the new revision.
-
-Unset revisions default to 00000000000000000000000000000000.
-
-Examples:
-
-When a reference is created::
-
- ZUUL_OLDREV=00000000000000000000000000000000
- ZUUL_NEWREV=123456789abcdef123456789abcdef12
- ZUUL_SHORT_OLDREV=0000000
- ZUUL_SHORT_NEWREV=1234567
-
-When a reference is deleted::
-
- ZUUL_OLDREV=123456789abcdef123456789abcdef12
- ZUUL_NEWREV=00000000000000000000000000000000
- ZUUL_SHORT_OLDREV=1234567
- ZUUL_SHORT_NEWREV=0000000
-
-And finally a reference being altered::
-
- ZUUL_OLDREV=123456789abcdef123456789abcdef12
- ZUUL_NEWREV=abcdef123456789abcdef123456789ab
- ZUUL_SHORT_OLDREV=1234567
- ZUUL_SHORT_NEWREV=abcdef1
-
-Your jobs can check whether the parameters are ``000000`` to act
-differently on each kind of event.
-
-Gearman
--------
-
-Gearman_ is a general-purpose protocol for distributing jobs to any
-number of workers. Zuul works with Gearman by sending specific
-information with job requests to Gearman, and expects certain
-information to be returned on completion. This protocol is described
-in `Zuul-Gearman Protocol`_.
-
-In order for Zuul to run any jobs, you will need a running Gearman
-server. Zuul includes a Gearman server, and it is recommended that it
-be used as it supports the following features needed by Zuul:
-
-* Canceling jobs in the queue (admin protocol command "cancel job").
-* Strict FIFO queue operation (gearmand's round-robin mode may be
- sufficient, but is untested).
-
-To enable the built-in server, see the ``gearman_server`` section of
-``zuul.conf``. Be sure that the host allows connections from Zuul and
-any workers (e.g., Jenkins masters) on TCP port 4730, and nowhere else
-(as the Gearman protocol does not include any provision for
-authentication).
-
-Gearman Jenkins Plugin
-~~~~~~~~~~~~~~~~~~~~~~
-
-The `Gearman Jenkins Plugin`_ makes it easy to use Jenkins with Zuul
-by providing an interface between Jenkins and Gearman. In this
-configuration, Zuul asks Gearman to run jobs, and Gearman can then
-distribute those jobs to any number of Jenkins systems (including
-multiple Jenkins masters).
-
-The `Gearman Plugin`_ can be installed in Jenkins in order to
-facilitate Jenkins running jobs for Zuul. Install the plugin and
-configure it with the hostname or IP address of your Gearman server
-and the port on which it is listening (4730 by default). It will
-automatically register all known Jenkins jobs as functions that Zuul
-can invoke via Gearman.
-
-Any number of masters can be configured in this way, and Gearman will
-distribute jobs to all of them as appropriate.
-
-No special Jenkins job configuration is needed to support triggering
-by Zuul.
-
-The Gearman Plugin will ensure the `Zuul Parameters`_ are supplied as
-Jenkins build parameters, so they will be available for use in the job
-configuration as well as to the running job as environment variables.
-
-Jenkins git plugin configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In order to test the correct build, configure the Jenkins Git SCM
-plugin as follows::
-
- Source Code Management:
- Git
- Repositories:
- Repository URL: <your Gerrit or Zuul repository URL>
- Advanced:
- Refspec: ${ZUUL_REF}
- Branches to build:
- Branch Specifier: ${ZUUL_COMMIT}
- Advanced:
- Clean after checkout: True
-
-That should be sufficient for a job that only builds a single project.
-If you have multiple interrelated projects (i.e., they share a Zuul
-Change Queue) that are built together, you may be able to configure
-the Git plugin to prepare them, or you may chose to use a shell script
-instead. As an example, the OpenStack project uses the following
-script to prepare the workspace for its integration testing:
-
- https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh
-
-Turbo Hipster Worker
-~~~~~~~~~~~~~~~~~~~~
-
-As an alternative to Jenkins, `Turbo-Hipster`_ is a small python
-project designed specifically as a zuul job worker which can be
-registered with gearman as a job runner. Please see the
-`Turbo-Hipster Documentation`_ for details on how to set it up.
-
-Zuul-Gearman Protocol
-~~~~~~~~~~~~~~~~~~~~~
-
-This section is only relevant if you intend to implement a new kind of
-worker that runs jobs for Zuul via Gearman. If you just want to use
-Jenkins, see `Gearman Jenkins Plugin`_ instead.
-
-The Zuul protocol as used with Gearman is as follows:
-
-Starting Builds
-^^^^^^^^^^^^^^^
-
-To start a build, Zuul invokes a Gearman function with the following
-format:
-
- build:FUNCTION_NAME
-
-where **FUNCTION_NAME** is the name of the job that should be run. If
-the job should run on a specific node (or class of node), Zuul will
-instead invoke:
-
- build:FUNCTION_NAME:NODE_NAME
-
-where **NODE_NAME** is the name or class of node on which the job
-should be run.
-
-Zuul sends the ZUUL_* parameters described in `Zuul Parameters`_
-encoded in JSON format as the argument included with the
-SUBMIT_JOB_UNIQ request to Gearman. A unique ID (equal to the
-ZUUL_UUID parameter) is also supplied to Gearman, and is accessible as
-an added Gearman parameter with GRAB_JOB_UNIQ.
-
-When a Gearman worker starts running a job for Zuul, it should
-immediately send a WORK_DATA packet with the following information
-encoded in JSON format:
-
-**name**
- The name of the job.
-
-**number**
- The build number (unique to this job).
-
-**manager**
- A unique identifier associated with the Gearman worker that can
- abort this build. See `Stopping Builds`_ for more information.
-
-**url** (optional)
- The URL with the status or results of the build. Will be used in
- the status page and the final report.
-
-To help with debugging builds a worker may send back some optional
-metadata:
-
-**worker_name** (optional)
- The name of the worker.
-
-**worker_hostname** (optional)
- The hostname of the worker.
-
-**worker_ips** (optional)
- A list of IPs for the worker.
-
-**worker_fqdn** (optional)
- The FQDN of the worker.
-
-**worker_program** (optional)
- The program name of the worker. For example Jenkins or turbo-hipster.
-
-**worker_version** (optional)
- The version of the software running the job.
-
-**worker_extra** (optional)
- A dictionary of any extra metadata you may want to pass along.
-
-It should then immediately send a WORK_STATUS packet with a value of 0
-percent complete. It may then optionally send subsequent WORK_STATUS
-packets with updated completion values.
-
-When the build is complete, it should send a final WORK_DATA packet
-with the following in JSON format:
-
-**result**
- Either the string 'SUCCESS' if the job succeeded, or any other value
- that describes the result if the job failed.
-
-Finally, it should send either a WORK_FAIL or WORK_COMPLETE packet as
-appropriate. A WORK_EXCEPTION packet will be interpreted as a
-WORK_FAIL, but the exception will be logged in Zuul's error log.
-
-Stopping Builds
-^^^^^^^^^^^^^^^
-
-If Zuul needs to abort a build already in progress, it will invoke the
-following function through Gearman:
-
- stop:MANAGER_NAME
-
-Where **MANAGER_NAME** is the name of the manager worker supplied in
-the initial WORK_DATA packet when the job started. This is used to
-direct the stop: function invocation to the correct Gearman worker
-that is capable of stopping that particular job. The argument to the
-function should be the following encoded in JSON format:
-
-**name**
- The job name of the build to stop.
-
-**number**
- The build number of the build to stop.
-
-The original job is expected to complete with a WORK_DATA and
-WORK_FAIL packet as described in `Starting Builds`_.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fb30b92..24ab31b 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,29 +1,24 @@
Zuul - A Project Gating System
==============================
-Zuul is a program that is used to gate the source code repository of a
-project so that changes are only merged if they pass tests.
+Zuul is a program that drives continuous integration, delivery, and
+deployment systems with a focus on project gating and interrelated
+projects.
-The main component of Zuul is the scheduler. It receives events
-related to proposed changes, triggers tests based on those events, and
-reports back.
+Zuul's documentation is organized in three guides based on audience.
+If Zuul is being used to gate or drive automation around your project,
+read the :doc:`user/index` to find out how to configure Zuul. If you
+are installing or operating a Zuul system, you will also find the
+:doc:`admin/index` useful. If you want help make Zuul itself better,
+take a look at the :doc:`developer/index`.
Contents:
.. toctree::
:maxdepth: 2
- quick-start
- gating
- connections
- triggers
- reporters
- zuul
- merger
- cloner
- executors
- statsd
- client
+ user/index
+ admin/index
developer/index
Indices and tables
diff --git a/doc/source/merger.rst b/doc/source/merger.rst
deleted file mode 100644
index d8de702..0000000
--- a/doc/source/merger.rst
+++ /dev/null
@@ -1,74 +0,0 @@
-:title: Merger
-
-Merger
-======
-
-The Zuul Merger is a separate component which communicates with the
-main Zuul server via Gearman. Its purpose is to speculatively merge
-the changes for Zuul in preparation for testing. The resulting git
-commits also must be served to the test workers, and the server(s)
-running the Zuul Merger are expected to do this as well. Because both
-of these tasks are resource intensive, any number of Zuul Mergers can
-be run in parallel on distinct hosts.
-
-Configuration
-~~~~~~~~~~~~~
-
-The Zuul Merger can read the same zuul.conf file as the main Zuul
-server and requires the ``gearman``, ``gerrit``, ``merger``, and
-``zuul`` sections (indicated fields only). Be sure the zuul_url is
-set appropriately on each host that runs a zuul-merger.
-
-Zuul References
-~~~~~~~~~~~~~~~
-
-As the DependentPipelineManager may combine several changes together
-for testing when performing speculative execution, determining exactly
-how the workspace should be set up when running a Job can be complex.
-To alleviate this problem, Zuul performs merges itself, merging or
-cherry-picking changes as required and identifies the result with a
-Git reference of the form ``refs/zuul/<branch>/Z<random sha1>``.
-Preparing the workspace is then a simple matter of fetching that ref
-and checking it out. The parameters that provide this information are
-described in :ref:`executors`.
-
-These references need to be made available via a Git repository that
-is available to workers (such as Jenkins). This is accomplished by
-serving Zuul's Git repositories directly.
-
-Serving Zuul Git Repos
-~~~~~~~~~~~~~~~~~~~~~~
-
-Zuul maintains its own copies of any needed Git repositories in the
-directory specified by ``git_dir`` in the ``merger`` section of
-zuul.conf (by default, /var/lib/zuul/git). To directly serve Zuul's
-Git repositories in order to provide Zuul refs for workers, you can
-configure Apache to do so using the following directives::
-
- SetEnv GIT_PROJECT_ROOT /var/lib/zuul/git
- SetEnv GIT_HTTP_EXPORT_ALL
-
- AliasMatch ^/p/(.*/objects/[0-9a-f]{2}/[0-9a-f]{38})$ /var/lib/zuul/git/$1
- AliasMatch ^/p/(.*/objects/pack/pack-[0-9a-f]{40}.(pack|idx))$ /var/lib/zuul/git/$1
- ScriptAlias /p/ /usr/lib/git-core/git-http-backend/
-
-Note that Zuul's Git repositories are not bare, which means they have
-a working tree, and are not suitable for public consumption (for
-instance, a clone will produce a repository in an unpredictable state
-depending on what the state of Zuul's repository is when the clone
-happens). They are, however, suitable for automated systems that
-respond to Zuul triggers.
-
-Clearing old references
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The references created under refs/zuul are not garbage collected. Since
-git fetch send them all to Gerrit to sync the repositories, the time
-spent on merge will slightly grow overtime and start being noticeable.
-
-To clean them you can use the ``tools/zuul-clear-refs.py`` script on
-each repositories. It will delete Zuul references that point to commits
-for which the commit date is older than a given amount of days (default
-360)::
-
- ./tools/zuul-clear-refs.py /path/to/zuul/git/repo
diff --git a/doc/source/quick-start.rst b/doc/source/quick-start.rst
deleted file mode 100644
index 6d16c92..0000000
--- a/doc/source/quick-start.rst
+++ /dev/null
@@ -1,162 +0,0 @@
-Quick Start Guide
-=================
-
-System Requirements
--------------------
-
-For most deployments zuul only needs 1-2GB. OpenStack uses a 30GB setup.
-
-Install Zuul
-------------
-
-You can get zuul from pypi via::
-
- pip install zuul
-
-Zuul Components
----------------
-
-Zuul provides the following components:
-
- - **zuul-server**: scheduler daemon which communicates with Gerrit and
- Gearman. Handles receiving events, executing jobs, collecting results
- and postingreports.
- - **zuul-merger**: speculative-merger which communicates with Gearman.
- Prepares Git repositories for jobs to test against. This additionally
- requires a web server hosting the Git repositories which can be cloned
- by the jobs.
- - **zuul-cloner**: client side script used to setup job workspace. It is
- used to clone the repositories prepared by the zuul-merger described
- previously.
- - **gearmand**: optional builtin gearman daemon provided by zuul-server
-
-External components:
-
- - Jenkins Gearman plugin: Used by Jenkins to connect to Gearman
-
-Zuul Communication
-------------------
-
-All the Zuul components communicate with each other using Gearman. As well as
-the following communication channels:
-
-zuul-server:
-
- - Gerrit
- - Gearman Daemon
-
-zuul-merger:
-
- - Gerrit
- - Gearman Daemon
-
-zuul-cloner:
-
- - http hosted zuul-merger git repos
-
-Jenkins:
-
- - Gearman Daemon via Jenkins Gearman Plugin
-
-Zuul Setup
-----------
-
-At minimum we need to provide **zuul.conf** and **layout.yaml** and placed
-in /etc/zuul/ directory. You will also need a zuul user and ssh key for the
-zuul user in Gerrit. The following example uses the builtin gearmand service
-in zuul.
-
-**zuul.conf**::
-
- [zuul]
- layout_config=/etc/zuul/layout.yaml
-
- [merger]
- git_dir=/git
- zuul_url=http://zuul.example.com/p
-
- [gearman_server]
- start=true
-
- [gearman]
- server=127.0.0.1
-
- [connection gerrit]
- driver=gerrit
- server=git.example.com
- port=29418
- baseurl=https://git.example.com/gerrit/
- user=zuul
- sshkey=/home/zuul/.ssh/id_rsa
-
-See :doc:`zuul` for more details.
-
-The following sets up a basic timer triggered job using zuul.
-
-**layout.yaml**::
-
- pipelines:
- - name: periodic
- source: gerrit
- manager: IndependentPipelineManager
- trigger:
- timer:
- - time: '0 * * * *'
-
- projects:
- - name: aproject
- periodic:
- - aproject-periodic-build
-
-Starting Zuul
--------------
-
-You can run zuul-server with the **-d** option to make it not daemonize. It's
-a good idea at first to confirm there's no issues with your configuration.
-
-Simply run::
-
- zuul-server
-
-Once run you should have 2 zuul-server processes::
-
- zuul 12102 1 0 Jan21 ? 00:15:45 /home/zuul/zuulvenv/bin/python /home/zuul/zuulvenv/bin/zuul-server -d
- zuul 12107 12102 0 Jan21 ? 00:00:01 /home/zuul/zuulvenv/bin/python /home/zuul/zuulvenv/bin/zuul-server -d
-
-Note: In this example zuul was installed in a virtualenv.
-
-The 2nd zuul-server process is gearmand running if you are using the builtin
-gearmand server, otherwise there will only be 1 process.
-
-Zuul won't actually process your Job queue however unless you also have a
-zuul-merger process running.
-
-Simply run::
-
- zuul-merger
-
-Zuul should now be able to process your periodic job as configured above once
-the Jenkins side of things is configured.
-
-Jenkins Setup
--------------
-
-Install the Jenkins Gearman Plugin via Jenkins Plugin management interface.
-Then naviage to **Manage > Configuration > Gearman** and setup the Jenkins
-server hostname/ip and port to connect to gearman.
-
-At this point gearman should be running your Jenkins jobs.
-
-Troubleshooting
----------------
-
-Checking Gearman function registration (jobs). You can use telnet to connect
-to gearman to check that Jenkins is registering your configured jobs in
-gearman::
-
- telnet <gearman_ip> 4730
-
-Useful commands are **workers** and **status** which you can run by just
-typing those commands once connected to gearman. Every job in your Jenkins
-master must appear when you run **workers** for Zuul to be able to run jobs
-against your Jenkins instance.
diff --git a/doc/source/reporters.rst b/doc/source/reporters.rst
deleted file mode 100644
index ae6ab1c..0000000
--- a/doc/source/reporters.rst
+++ /dev/null
@@ -1,143 +0,0 @@
-:title: Reporters
-
-Reporters
-=========
-
-Zuul can communicate results and progress back to configurable
-protocols. For example, after succeeding in a build a pipeline can be
-configured to post a positive review back to Gerrit.
-
-There are three stages when a report can be handled. That is on:
-Start, Success or Failure. Each stage can have multiple reports.
-For example, you can set verified on Gerrit and send an email.
-
-Gerrit
-------
-
-Zuul works with standard versions of Gerrit by invoking the
-``gerrit`` command over an SSH connection. It reports back to
-Gerrit using SSH.
-
-The dictionary passed to the Gerrit reporter is used for ``gerrit
-review`` arguments, with the boolean value of ``true`` simply
-indicating that the argument should be present without following it
-with a value. For example, ``verified: 1`` becomes ``gerrit review
---verified 1`` and ``submit: true`` becomes ``gerrit review
---submit``.
-
-A :ref:`connection` that uses the gerrit driver must be supplied to the
-trigger.
-
-GitHub
-------
-
-Zuul reports back to GitHub via GitHub API. Available reports include a PR
-comment containing the build results, a commit status on start, success and
-failure, an issue label addition/removal on the PR, and a merge of the PR
-itself. Status name, description, and context is taken from the pipeline.
-
-A :ref:`connection` that uses the github driver must be supplied to the
-reporter. It has the following options:
-
- **status**
- String value (``pending``, ``success``, ``failure``) that the reporter should
- set as the commit status on github.
- ``status: 'success'``
-
- **status-url**
- String value for a link url to set in the github status. Defaults to the zuul
- server status_url, or the empty string if that is unset.
-
- **comment**
- Boolean value (``true`` or ``false``) that determines if the reporter should
- add a comment to the pipeline status to the github pull request. Defaults
- to ``true``. Only used for Pull Request based events.
- ``comment: false``
-
- **merge**
- Boolean value (``true`` or ``false``) that determines if the reporter should
- merge the pull reqeust. Defaults to ``false``. Only used for Pull Request based
- events.
- ``merge=true``
-
- **label**
- List of strings each representing an exact label name which should be added
- to the pull request by reporter. Only used for Pull Request based events.
- ``label: 'test successful'``
-
- **unlabel**
- List of strings each representing an exact label name which should be removed
- from the pull request by reporter. Only used for Pull Request based events.
- ``unlabel: 'test failed'``
-
-SMTP
-----
-
-A simple email reporter is also available.
-
-A :ref:`connection` that uses the smtp driver must be supplied to the
-reporter.
-
-SMTP Configuration
-~~~~~~~~~~~~~~~~~~
-
-zuul.conf contains the SMTP server and default to/from as described
-in :ref:`zuulconf`.
-
-Each pipeline can overwrite the ``subject`` or the ``to`` or ``from`` address by
-providing alternatives as arguments to the reporter. For example, ::
-
- pipelines:
- - name: post-merge
- manager: IndependentPipelineManager
- source: my_gerrit
- trigger:
- my_gerrit:
- - event: change-merged
- success:
- outgoing_smtp:
- to: you@example.com
- failure:
- internal_smtp:
- to: you@example.com
- from: alternative@example.com
- subject: Change {change} failed
-
-SQL
----
-
-This reporter is used to store results in a database.
-
-A :ref:`connection` that uses the sql driver must be supplied to the
-reporter.
-
-SQL Configuration
-~~~~~~~~~~~~~~~~~
-
-zuul.conf contains the database connection and credentials. To store different
-reports in different databases you'll need to create a new connection per
-database.
-
-The sql reporter is used to store the results from individual builds rather
-than the change. As such the sql reporter does nothing on "start" or
-"merge-failure".
-
-**score**
- A score to store for the result of the build. eg: -1 might indicate a failed
- build similar to the vote posted back via the gerrit reporter.
-
-For example ::
-
- pipelines:
- - name: post-merge
- manager: IndependentPipelineManager
- source: my_gerrit
- trigger:
- my_gerrit:
- - event: change-merged
- success:
- mydb_conn:
- score: 1
- failure:
- mydb_conn:
- score: -1
diff --git a/doc/source/triggers.rst b/doc/source/triggers.rst
deleted file mode 100644
index 41a56a0..0000000
--- a/doc/source/triggers.rst
+++ /dev/null
@@ -1,247 +0,0 @@
-:title: Triggers
-
-Triggers
-========
-
-The process of merging a change starts with proposing a change to be
-merged. Zuul supports Gerrit and GitHub as triggering systems.
-Zuul's design is modular, so alternate triggering and reporting
-systems can be supported.
-
-Gerrit
-------
-
-Zuul works with standard versions of Gerrit by invoking the ``gerrit
-stream-events`` command over an SSH connection. It also reports back
-to Gerrit using SSH.
-
-If using Gerrit 2.7 or later, make sure the user is a member of a group
-that is granted the ``Stream Events`` permission, otherwise it will not
-be able to invoke the ``gerrit stream-events`` command over SSH.
-
-A connection name with the gerrit driver can take multiple events with
-the following options.
-
- **event**
- The event name from gerrit. Examples: ``patchset-created``,
- ``comment-added``, ``ref-updated``. This field is treated as a
- regular expression.
-
- **branch**
- The branch associated with the event. Example: ``master``. This
- field is treated as a regular expression, and multiple branches may
- be listed.
-
- **ref**
- On ref-updated events, the branch parameter is not used, instead the
- ref is provided. Currently Gerrit has the somewhat idiosyncratic
- behavior of specifying bare refs for branch names (e.g., ``master``),
- but full ref names for other kinds of refs (e.g., ``refs/tags/foo``).
- Zuul matches what you put here exactly against what Gerrit
- provides. This field is treated as a regular expression, and
- multiple refs may be listed.
-
- **ignore-deletes**
- When a branch is deleted, a ref-updated event is emitted with a newrev
- of all zeros specified. The ``ignore-deletes`` field is a boolean value
- that describes whether or not these newrevs trigger ref-updated events.
- The default is True, which will not trigger ref-updated events.
-
- **approval**
- This is only used for ``comment-added`` events. It only matches if
- the event has a matching approval associated with it. Example:
- ``code-review: 2`` matches a ``+2`` vote on the code review category.
- Multiple approvals may be listed.
-
- **email**
- This is used for any event. It takes a regex applied on the performer
- email, i.e. Gerrit account email address. If you want to specify
- several email filters, you must use a YAML list. Make sure to use non
- greedy matchers and to escapes dots!
- Example: ``email: ^.*?@example\.org$``.
-
- **email_filter** (deprecated)
- A deprecated alternate spelling of *email*. Only one of *email* or
- *email_filter* should be used.
-
- **username**
- This is used for any event. It takes a regex applied on the performer
- username, i.e. Gerrit account name. If you want to specify several
- username filters, you must use a YAML list. Make sure to use non greedy
- matchers and to escapes dots!
- Example: ``username: ^jenkins$``.
-
- **username_filter** (deprecated)
- A deprecated alternate spelling of *username*. Only one of *username* or
- *username_filter* should be used.
-
- **comment**
- This is only used for ``comment-added`` events. It accepts a list of
- regexes that are searched for in the comment string. If any of these
- regexes matches a portion of the comment string the trigger is
- matched. ``comment: retrigger`` will match when comments
- containing 'retrigger' somewhere in the comment text are added to a
- change.
-
- **comment_filter** (deprecated)
- A deprecated alternate spelling of *comment*. Only one of *comment* or
- *comment_filter* should be used.
-
- *require-approval*
- This may be used for any event. It requires that a certain kind
- of approval be present for the current patchset of the change (the
- approval could be added by the event in question). It follows the
- same syntax as the :ref:`"approval" pipeline requirement
- <pipeline-require-approval>`. For each specified criteria there must
- exist a matching approval.
-
- *reject-approval*
- This takes a list of approvals in the same format as
- *require-approval* but will fail to enter the pipeline if there is
- a matching approval.
-
-GitHub
-------
-
-Github webhook events can be configured as triggers.
-
-A connection name with the github driver can take multiple events with the
-following options.
-
- **event**
- The event from github. Supported events are ``pull_request``,
- ``pull_request_review``, and ``push``.
-
- A ``pull_request`` event will
- have associated action(s) to trigger from. The supported actions are:
-
- *opened* - pull request opened
-
- *changed* - pull request synchronized
-
- *closed* - pull request closed
-
- *reopened* - pull request reopened
-
- *comment* - comment added on pull request
-
- *labeled* - label added on pull request
-
- *unlabeled* - label removed from pull request
-
- *review* - review added on pull request
-
- *push* - head reference updated (pushed to branch)
-
- *status* - status set on commit
-
- A ``pull_request_review`` event will
- have associated action(s) to trigger from. The supported actions are:
-
- *submitted* - pull request review added
-
- *dismissed* - pull request review removed
-
- **branch**
- The branch associated with the event. Example: ``master``. This
- field is treated as a regular expression, and multiple branches may
- be listed. Used for ``pull_request`` and ``pull_request_review`` events.
-
- **comment**
- This is only used for ``pull_request`` ``comment`` actions. It accepts a
- list of regexes that are searched for in the comment string. If any of these
- regexes matches a portion of the comment string the trigger is matched.
- ``comment: retrigger`` will match when comments containing 'retrigger'
- somewhere in the comment text are added to a pull request.
-
- **label**
- This is only used for ``labeled`` and ``unlabeled`` ``pull_request`` actions.
- It accepts a list of strings each of which matches the label name in the
- event literally. ``label: recheck`` will match a ``labeled`` action when
- pull request is labeled with a ``recheck`` label. ``label: 'do not test'``
- will match a ``unlabeled`` action when a label with name ``do not test`` is
- removed from the pull request.
-
- **state**
- This is only used for ``pull_request_review`` events. It accepts a list of
- strings each of which is matched to the review state, which can be one of
- ``approved``, ``comment``, or ``request_changes``.
-
- **status**
- This is only used for ``status`` actions. It accepts a list of strings each of
- which matches the user setting the status, the status context, and the status
- itself in the format of ``user:context:status``. For example,
- ``zuul_github_ci_bot:check_pipeline:success``.
-
- **ref**
- This is only used for ``push`` events. This field is treated as a regular
- expression and multiple refs may be listed. Github always sends full ref
- name, eg. ``refs/tags/bar`` and this string is matched against the regexp.
-
-GitHub Configuration
-~~~~~~~~~~~~~~~~~~~~
-
-Configure GitHub `webhook events
-<https://developer.github.com/webhooks/creating/>`_.
-
-Set *Payload URL* to
-``http://<zuul-hostname>/connection/<connection-name>/payload``.
-
-Set *Content Type* to ``application/json``.
-
-Select *Events* you are interested in. See above for the supported events.
-
-Timer
------
-
-A simple timer trigger is available as well. It supports triggering
-jobs in a pipeline based on cron-style time instructions.
-
-Timers don't require a special connection or driver. Instead they can
-be used by listing **timer** as the trigger.
-
-This trigger will run based on a cron-style time specification.
-It will enqueue an event into its pipeline for every project
-defined in the configuration. Any job associated with the
-pipeline will run in response to that event.
-
- **time**
- The time specification in cron syntax. Only the 5 part syntax is
- supported, not the symbolic names. Example: ``0 0 * * *`` runs
- at midnight.
-
-Zuul
-----
-
-The Zuul trigger generates events based on internal actions in Zuul.
-Multiple events may be listed.
-
-Zuul events don't require a special connection or driver. Instead they
-can be used by listing **zuul** as the trigger.
-
- **event**
- The event name. Currently supported:
-
- *project-change-merged* when Zuul merges a change to a project,
- it generates this event for every open change in the project.
-
- *parent-change-enqueued* when Zuul enqueues a change into any
- pipeline, it generates this event for every child of that
- change.
-
- **pipeline**
- Only available for ``parent-change-enqueued`` events. This is the
- name of the pipeline in which the parent change was enqueued.
-
- *require-approval*
- This may be used for any event. It requires that a certain kind
- of approval be present for the current patchset of the change (the
- approval could be added by the event in question). It follows the
- same syntax as the :ref:`"approval" pipeline requirement
- <pipeline-require-approval>`. For each specified criteria there must
- exist a matching approval.
-
- *reject-approval*
- This takes a list of approvals in the same format as
- *require-approval* but will fail to enter the pipeline if there is
- a matching approval.
diff --git a/doc/source/user/concepts.rst b/doc/source/user/concepts.rst
new file mode 100644
index 0000000..6197396
--- /dev/null
+++ b/doc/source/user/concepts.rst
@@ -0,0 +1,85 @@
+:title: Zuul Concepts
+
+Zuul Concepts
+=============
+
+Zuul's configuration is organized around the concept of a *pipeline*.
+In Zuul, a pipeline encompasses a workflow process which can be
+applied to one or more projects. For instance, a "check" pipeline
+might describe the actions which should cause newly proposed changes
+to projects to be tested. A "gate" pipeline might implement
+:ref:`project_gating` to automate merging changes to projects only if
+their tests pass. A "post" pipeline might update published
+documentation for a project when changes land.
+
+The names "check", "gate", and "post" are arbitrary -- these are not
+concepts built into Zuul, but rather they are just a few common
+examples of workflow processes that can be described to Zuul and
+implemented as pipelines.
+
+Once a pipeline has been defined, any number of projects may be
+associated with it, each one specifying what jobs should be run for
+that project in a given pipeline.
+
+Pipelines have associated *triggers* which are descriptions of events
+which should cause something to be enqueued into a pipeline. For
+example, when a patchset is uploaded to Gerrit, a Gerrit
+*patchset-created* event is emitted. A pipeline configured to trigger
+on *patchset-created* events would then enqueue the associated change
+when Zuul receives that event. If there are jobs associated with that
+project and pipeline, they will be run. In addition to Gerrit, other
+triggers are available, including GitHub, timer, and zuul. See
+:ref:`drivers` for a full list of available triggers.
+
+Once all of the jobs for an item in a pipeline have been run, the
+pipeline's *reporters* are responsible for reporting the results of
+all of the jobs. Continuing the example from earlier, if a pipeline
+were configured with a Gerrit reporter, it would leave a review
+comment on the change and set any approval votes that are configured.
+There are several reporting phases available; each of which may be
+configured with any number of reporters. See :ref:`drivers` for a
+full list of available reporters.
+
+The items enqueued into a pipeline are each associated with a git ref.
+That ref may point to a proposed change, or it may be the tip of a
+branch or a tag. The triggering event determines the ref, and whether
+it represents a proposed or merged commit. Zuul prepares the ref for
+an item before running jobs. In the case of a proposed change, that
+means speculatively merging the change into its target branch. This
+means that any jobs that operate on the change will run with the git
+repo in the state it will be in after the change merges (which may be
+substantially different than the git repo state of the change itself
+since the repo may have merged other changes since the change was
+originally authored). Items in a pipeline may depend on other items,
+and if they do, all of their dependent changes will be included in the
+git repo state that Zuul prepares. For more detail on this process,
+see :ref:`project_gating` and :ref:`dependencies`.
+
+The configuration for nearly everything described above is held in
+files inside of the git repos upon which Zuul operates. Zuul's
+configuration is global, but distributed. Jobs which are defined in
+one project might be used in another project while pipelines are
+available to all projects. When Zuul starts, it reads its
+configuration from all of the projects it knows about, and when
+changes to its configuration are proposed, those changes may take
+effect temporarily as part of the proposed change, or immediately
+after the change merges, depending on the type of project in which the
+change appears.
+
+Jobs specify the type and quantity of nodes which they require.
+Before executing each job, Zuul will contact it's companion program,
+Nodepool, to supply them. Nodepool may be configured to supply static
+nodes or contact cloud providers to create or delete nodes as
+necessary. The types of nodes available to Zuul are determined by the
+Nodepool administrator.
+
+The executable contents of jobs themselves are Ansible playbooks.
+Ansible's support for orchestrating tasks on remote nodes is
+particularly suited to Zuul's support for multi-node testing. Ansible
+is also easy to use for simple tasks (such as executing a shell
+script) or sophisticated deployment scenarios. When Zuul runs
+Ansible, it attempts to do so in a manner most similar to the way that
+Ansible might be used to orchestrate remote systems. Ansible itself
+is run on the executor and acts remotely upon the test nodes supplied
+to a job. This facilitates continuous delivery by making it possible
+to use the same Ansible playbooks in testing and production.
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
new file mode 100644
index 0000000..0b2b5d4
--- /dev/null
+++ b/doc/source/user/config.rst
@@ -0,0 +1,963 @@
+:title: Project Configuration
+
+.. _project-config:
+
+Project Configuration
+=====================
+
+The following sections describe the main part of Zuul's configuration.
+All of what follows is found within files inside of the repositories
+that Zuul manages.
+
+Security Contexts
+-----------------
+
+When a system administrator configures Zuul to operate on a project,
+they specify one of two security contexts for that project. A
+*config-project* is one which is primarily tasked with holding
+configuration information and job content for Zuul. Jobs which are
+defined in a *config-project* are run with elevated privileges, and
+all Zuul configuration items are available for use. It is expected
+that changes to *config-projects* will undergo careful scrutiny before
+being merged.
+
+An *untrusted-project* is a project whose primary focus is not to
+operate Zuul, but rather it is one of the projects being tested or
+deployed. The Zuul configuration language available to these projects
+is somewhat restricted (as detailed in individual section below), and
+jobs defined in these projects run in a restricted execution
+environment since they may be operating on changes which have not yet
+undergone review.
+
+Configuration Loading
+---------------------
+
+When Zuul starts, it examines all of the git repositories which are
+specified by the system administrator in :ref:`tenant-config` and searches
+for files in the root of each repository. In the case of a
+*config-project*, Zuul looks for a file named `zuul.yaml`. In the
+case of an *untrusted-project*, Zuul looks first for `zuul.yaml` and
+if that is not found, `.zuul.yaml` (with a leading dot). In the case
+of an *untrusted-project*, the configuration from every branch is
+included, however, in the case of a *config-project*, only the
+`master` branch is examined.
+
+When a change is proposed to one of these files in an
+*untrusted-project*, the configuration proposed in the change is
+merged into the running configuration so that any changes to Zuul's
+configuration are self-testing as part of that change. If there is a
+configuration error, no jobs will be run and the error will be
+reported by any applicable pipelines. In the case of a change to a
+*config-project*, the new configuration is parsed and examined for
+errors, but the new configuration is not used in testing the change.
+This is because configuration in *config-projects* is able to access
+elevated privileges and should always be reviewed before being merged.
+
+As soon as a change containing a Zuul configuration change merges to
+any Zuul-managed repository, the new configuration takes effect
+immediately.
+
+Configuration Items
+-------------------
+
+The `zuul.yaml` and `.zuul.yaml` configuration files are
+YAML-formatted and are structured as a series of items, each of which
+is described below.
+
+.. _pipeline:
+
+Pipeline
+~~~~~~~~
+
+A pipeline describes a workflow operation in Zuul. It associates jobs
+for a given project with triggering and reporting events.
+
+Its flexible configuration allows for characterizing any number of
+workflows, and by specifying each as a named configuration, makes it
+easy to apply similar workflow operations to projects or groups of
+projects.
+
+By way of example, one of the primary uses of Zuul is to perform
+project gating. To do so, one can create a *gate* pipeline which
+tells Zuul that when a certain event (such as approval by a code
+reviewer) occurs, the corresponding change or pull request should be
+enqueued into the pipeline. When that happens, the jobs which have
+been configured to run for that project in the *gate* pipeline are
+run, and when they complete, the pipeline reports the results to the
+user.
+
+Pipeline configuration items may only appear in *config-projects*.
+
+Generally, a Zuul administrator would define a small number of
+pipelines which represent the workflow processes used in their
+environment. Each project can then be added to the available
+pipelines as appropriate.
+
+Here is an example *check* pipeline, which runs whenever a new
+patchset is created in Gerrit. If the associated jobs all report
+success, the pipeline reports back to Gerrit with a *Verified* vote of
++1, or if at least one of them fails, a -1::
+
+ - pipeline:
+ name: check
+ manager: independent
+ trigger:
+ my_gerrit:
+ - event: patchset-created
+ success:
+ my_gerrit:
+ verified: 1
+ failure:
+ my_gerrit
+ verified: -1
+
+.. TODO: See TODO for more annotated examples of common pipeline configurations.
+
+The attributes available on a pipeline are as follows (all are
+optional unless otherwise specified):
+
+**name** (required)
+ This is used later in the project definition to indicate what jobs
+ should be run for events in the pipeline.
+
+**manager** (required)
+ There are currently two schemes for managing pipelines:
+
+ .. _independent_pipeline_manager:
+
+ *independent*
+ Every event in this pipeline should be treated as independent of
+ other events in the pipeline. This is appropriate when the order of
+ events in the pipeline doesn't matter because the results of the
+ actions this pipeline performs can not affect other events in the
+ pipeline. For example, when a change is first uploaded for review,
+ you may want to run tests on that change to provide early feedback
+ to reviewers. At the end of the tests, the change is not going to
+ be merged, so it is safe to run these tests in parallel without
+ regard to any other changes in the pipeline. They are independent.
+
+ Another type of pipeline that is independent is a post-merge
+ pipeline. In that case, the changes have already merged, so the
+ results can not affect any other events in the pipeline.
+
+ .. _dependent_pipeline_manager:
+
+ *dependent*
+ The dependent pipeline manager is designed for gating. It ensures
+ that every change is tested exactly as it is going to be merged
+ into the repository. An ideal gating system would test one change
+ at a time, applied to the tip of the repository, and only if that
+ change passed tests would it be merged. Then the next change in
+ line would be tested the same way. In order to achieve parallel
+ testing of changes, the dependent pipeline manager performs
+ speculative execution on changes. It orders changes based on
+ their entry into the pipeline. It begins testing all changes in
+ parallel, assuming that each change ahead in the pipeline will pass
+ its tests. If they all succeed, all the changes can be tested and
+ merged in parallel. If a change near the front of the pipeline
+ fails its tests, each change behind it ignores whatever tests have
+ been completed and are tested again without the change in front.
+ This way gate tests may run in parallel but still be tested
+ correctly, exactly as they will appear in the repository when
+ merged.
+
+ For more detail on the theory and operation of Zuul's dependent
+ pipeline manager, see: :doc:`gating`.
+
+**allow-secrets**
+ This is a boolean which can be used to prevent jobs which require
+ secrets from running in this pipeline. Some pipelines run on
+ proposed changes and therefore execute code which has not yet been
+ reviewed. In such a case, allowing a job to use a secret could
+ result in that secret being exposed. The default is False, meaning
+ that in order to run jobs with secrets, this must be explicitly
+ enabled on each Pipeline where that is safe.
+
+ For more information, see :ref:`secret`.
+
+**description**
+ This field may be used to provide a textual description of the
+ pipeline. It may appear in the status page or in documentation.
+
+**success-message**
+ The introductory text in reports when all the voting jobs are
+ successful. Defaults to "Build successful."
+
+**failure-message**
+ The introductory text in reports when at least one voting job fails.
+ Defaults to "Build failed."
+
+**merge-failure-message**
+ The introductory text in the message reported when a change fails to
+ merge with the current state of the repository. Defaults to "Merge
+ failed."
+
+**footer-message**
+ Supplies additional information after test results. Useful for
+ adding information about the CI system such as debugging and contact
+ details.
+
+**trigger**
+ At least one trigger source must be supplied for each pipeline.
+ Triggers are not exclusive -- matching events may be placed in
+ multiple pipelines, and they will behave independently in each of
+ the pipelines they match.
+
+ Triggers are loaded from their connection name. The driver type of
+ the connection will dictate which options are available.
+ See :ref:`drivers`.
+
+**require**
+ If this section is present, it established pre-requisites for any
+ kind of item entering the Pipeline. Regardless of how the item is
+ to be enqueued (via any trigger or automatic dependency resolution),
+ the conditions specified here must be met or the item will not be
+ enqueued.
+
+.. _pipeline-require-approval:
+
+ **approval**
+ This requires that a certain kind of approval be present for the
+ current patchset of the change (the approval could be added by the
+ event in question). It takes several sub-parameters, all of which
+ are optional and are combined together so that there must be an
+ approval matching all specified requirements.
+
+ *username*
+ If present, an approval from this username is required. It is
+ treated as a regular expression.
+
+ *email*
+ If present, an approval with this email address is required. It
+ is treated as a regular expression.
+
+ *email-filter* (deprecated)
+ A deprecated alternate spelling of *email*. Only one of *email* or
+ *email_filter* should be used.
+
+ *older-than*
+ If present, the approval must be older than this amount of time
+ to match. Provide a time interval as a number with a suffix of
+ "w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
+ (seconds). Example ``48h`` or ``2d``.
+
+ *newer-than*
+ If present, the approval must be newer than this amount of time
+ to match. Same format as "older-than".
+
+ Any other field is interpreted as a review category and value
+ pair. For example ``verified: 1`` would require that the approval
+ be for a +1 vote in the "Verified" column. The value may either
+ be a single value or a list: ``verified: [1, 2]`` would match
+ either a +1 or +2 vote.
+
+ **open**
+ A boolean value (``true`` or ``false``) that indicates whether the change
+ must be open or closed in order to be enqueued.
+
+ **current-patchset**
+ A boolean value (``true`` or ``false``) that indicates whether the change
+ must be the current patchset in order to be enqueued.
+
+ **status**
+ A string value that corresponds with the status of the change
+ reported by the trigger.
+
+**reject**
+ If this section is present, it establishes pre-requisites that can
+ block an item from being enqueued. It can be considered a negative
+ version of **require**.
+
+ **approval**
+ This takes a list of approvals. If an approval matches the provided
+ criteria the change can not be entered into the pipeline. It follows
+ the same syntax as the :ref:`"require approval" pipeline above
+ <pipeline-require-approval>`.
+
+ Example to reject a change with any negative vote::
+
+ reject:
+ approval:
+ - code-review: [-1, -2]
+
+**dequeue-on-new-patchset**
+ Normally, if a new patchset is uploaded to a change that is in a
+ pipeline, the existing entry in the pipeline will be removed (with
+ jobs canceled and any dependent changes that can no longer merge as
+ well. To suppress this behavior (and allow jobs to continue
+ running), set this to ``false``. Default: ``true``.
+
+**ignore-dependencies**
+ In any kind of pipeline (dependent or independent), Zuul will
+ attempt to enqueue all dependencies ahead of the current change so
+ that they are tested together (independent pipelines report the
+ results of each change regardless of the results of changes ahead).
+ To ignore dependencies completely in an independent pipeline, set
+ this to ``true``. This option is ignored by dependent pipelines.
+ The default is: ``false``.
+
+**precedence**
+ Indicates how the build scheduler should prioritize jobs for
+ different pipelines. Each pipeline may have one precedence, jobs
+ for pipelines with a higher precedence will be run before ones with
+ lower. The value should be one of ``high``, ``normal``, or ``low``.
+ Default: ``normal``.
+
+The following options configure *reporters*. Reporters are
+complementary to triggers; where a trigger is an event on a connection
+which causes Zuul to enqueue an item, a reporter is the action
+performed on a connection when an item is dequeued after its jobs
+complete. The actual syntax for a reporter is defined by the driver
+which implements it. See :ref:`drivers` for more information.
+
+**success**
+ Describes where Zuul should report to if all the jobs complete
+ successfully. This section is optional; if it is omitted, Zuul will
+ run jobs and do nothing on success -- it will not report at all. If
+ the section is present, the listed reporters will be asked to report
+ on the jobs. The reporters are listed by their connection name. The
+ options available depend on the driver for the supplied connection.
+
+**failure**
+ These reporters describe what Zuul should do if at least one job
+ fails.
+
+**merge-failure**
+ These reporters describe what Zuul should do if it is unable to
+ merge in the patchset. If no merge-failure reporters are listed then
+ the ``failure`` reporters will be used to notify of unsuccessful
+ merges.
+
+**start**
+ These reporters describe what Zuul should do when a change is added
+ to the pipeline. This can be used, for example, to reset a
+ previously reported result.
+
+**disabled**
+ These reporters describe what Zuul should do when a pipeline is
+ disabled. See ``disable-after-consecutive-failures``.
+
+The following options can be used to alter Zuul's behavior to mitigate
+situations in which jobs are failing frequently (perhaps due to a
+problem with an external dependency, or unusually high
+non-deterministic test failures).
+
+**disable-after-consecutive-failures**
+ If set, a pipeline can enter a ''disabled'' state if too many changes
+ in a row fail. When this value is exceeded the pipeline will stop
+ reporting to any of the ``success``, ``failure`` or ``merge-failure``
+ reporters and instead only report to the ``disabled`` reporters.
+ (No ``start`` reports are made when a pipeline is disabled).
+
+**window**
+ Dependent pipeline managers only. Zuul can rate limit dependent
+ pipelines in a manner similar to TCP flow control. Jobs are only
+ started for items in the queue if they are within the actionable
+ window for the pipeline. The initial length of this window is
+ configurable with this value. The value given should be a positive
+ integer value. A value of ``0`` disables rate limiting on the
+ DependentPipelineManager. Default: ``20``.
+
+**window-floor**
+ Dependent pipeline managers only. This is the minimum value for the
+ window described above. Should be a positive non zero integer value.
+ Default: ``3``.
+
+**window-increase-type**
+ Dependent pipeline managers only. This value describes how the window
+ should grow when changes are successfully merged by zuul. A value of
+ ``linear`` indicates that ``window-increase-factor`` should be added
+ to the previous window value. A value of ``exponential`` indicates
+ that ``window-increase-factor`` should be multiplied against the
+ previous window value and the result will become the window size.
+ Default: ``linear``.
+
+**window-increase-factor**
+ Dependent pipeline managers only. The value to be added or multiplied
+ against the previous window value to determine the new window after
+ successful change merges.
+ Default: ``1``.
+
+**window-decrease-type**
+ Dependent pipeline managers only. This value describes how the window
+ should shrink when changes are not able to be merged by Zuul. A value
+ of ``linear`` indicates that ``window-decrease-factor`` should be
+ subtracted from the previous window value. A value of ``exponential``
+ indicates that ``window-decrease-factor`` should be divided against
+ the previous window value and the result will become the window size.
+ Default: ``exponential``.
+
+**window-decrease-factor**
+ Dependent pipline managers only. The value to be subtracted or divided
+ against the previous window value to determine the new window after
+ unsuccessful change merges.
+ Default: ``2``.
+
+
+.. _job:
+
+Job
+~~~
+
+A job is a unit of work performed by Zuul on an item enqueued into a
+pipeline. Items may run any number of jobs (which may depend on each
+other). Each job is an invocation of an Ansible playbook with a
+specific inventory of hosts. The actual tasks that are run by the job
+appear in the playbook for that job while the attributes that appear in the
+Zuul configuration specify information about when, where, and how the
+job should be run.
+
+Jobs in Zuul support inheritance. Any job may specify a single parent
+job, and any attributes not set on the child job are collected from
+the parent job. In this way, a configuration structure may be built
+starting with very basic jobs which describe characteristics that all
+jobs on the system should have, progressing through stages of
+specialization before arriving at a particular job. A job may inherit
+from any other job in any project (however, if the other job is marked
+as `final`, some attributes may not be overidden).
+
+Jobs also support a concept called variance. The first time a job
+definition appears is called the reference definition of the job.
+Subsequent job definitions with the same name are called variants.
+These may have different selection criteria which indicate to Zuul
+that, for instance, the job should behave differently on a different
+git branch. Unlike inheritance, all job variants must be defined in
+the same project.
+
+When Zuul decides to run a job, it performs a process known as
+freezing the job. Because any number of job variants may be
+applicable, Zuul collects all of the matching variants and applies
+them in the order they appeared in the configuration. The resulting
+frozen job is built from attributes gathered from all of the
+matching variants. In this way, exactly what is run is dependent on
+the pipeline, project, branch, and content of the item.
+
+In addition to the job's main playbook, each job may specify one or
+more pre- and post-playbooks. These are run, in order, before and
+after (respectively) the main playbook. They may be used to set up
+and tear down resources needed by the main playbook. When combined
+with inheritance, they provide powerful tools for job construction. A
+job only has a single main playbook, and when inheriting from a
+parent, the child's main playbook overrides (or replaces) the
+parent's. However, the pre- and post-playbooks are appended and
+prepended in a nesting fashion. So if a parent job and child job both
+specified pre and post playbooks, the sequence of playbooks run would
+be:
+
+* parent pre-run playbook
+* child pre-run playbook
+* child playbook
+* child post-run playbook
+* parent post-run playbook
+
+Further inheritance would nest even deeper.
+
+Here is an example of two job definitions::
+
+ - job:
+ name: base
+ pre-run: copy-git-repos
+ post-run: copy-logs
+
+ - job:
+ name: run-tests
+ parent: base
+ nodes:
+ - name: test-node
+ image: fedora
+
+The following attributes are available on a job; all are optional
+unless otherwise specified:
+
+**name** (required)
+ The name of the job. By default, Zuul looks for a playbook with
+ this name to use as the main playbook for the job. This name is
+ also referenced later in a project pipeline configuration.
+
+**parent**
+ Specifies a job to inherit from. The parent job can be defined in
+ this or any other project. Any attributes not specified on a job
+ will be collected from its parent.
+
+**description**
+ A textual description of the job. Not currently used directly by
+ Zuul, but it is used by the zuul-sphinx extension to Sphinx to
+ auto-document Zuul jobs (in which case it is interpreted as
+ ReStructuredText.
+
+**success-message**
+ Normally when a job succeeds, the string "SUCCESS" is reported as
+ the result for the job. If set, this option may be used to supply a
+ different string. Default: "SUCCESS".
+
+**failure-message**
+ Normally when a job fails, the string "FAILURE" is reported as
+ the result for the job. If set, this option may be used to supply a
+ different string. Default: "FAILURE".
+
+**success-url**
+ When a job succeeds, this URL is reported along with the result.
+ Default: none.
+
+**failure-url**
+ When a job fails, this URL is reported along with the result.
+ Default: none.
+
+**hold-following-changes**
+ In a dependent pipeline, this option may be used to indicate that no
+ jobs should start on any items which depend on the current item
+ until this job has completed successfully. This may be used to
+ conserve build resources, at the expense of inhibiting the
+ parallelization which speeds the processing of items in a dependent
+ pipeline. A boolean value, default: false.
+
+**voting**
+ Indicates whether the result of this job should be used in
+ determining the overall result of the item. A boolean value,
+ default: true.
+
+**semaphore**
+ The name of a :ref:`semaphore` which should be acquired and released
+ when the job begins and ends. If the semaphore is at maximum
+ capacity, then Zuul will wait until it can be acquired before
+ starting the job. Default: none.
+
+**tags**
+ Metadata about this job. Tags are units of information attached to
+ the job; they do not affect Zuul's behavior, but they can be used
+ within the job to characterize the job. For example, a job which
+ tests a certain subsystem could be tagged with the name of that
+ subsystem, and if the job's results are reported into a database,
+ then the results of all jobs affecting that subsystem could be
+ queried. This attribute is specified as a list of strings, and when
+ inheriting jobs or applying variants, tags accumulate in a set, so
+ the result is always a set of all the tags from all the jobs and
+ variants used in constructing the frozen job, with no duplication.
+ Default: none.
+
+**branches**
+ A regular expression (or list of regular expressions) which describe
+ on what branches a job should run (or in the case of variants: to
+ alter the behavior of a job for a certain branch).
+
+ If there is no job definition for a given job which matches the
+ branch of an item, then that job is not run for the item.
+ Otherwise, all of the job variants which match that branch (and any
+ other selection criteria) are used when freezing the job.
+
+ This example illustrates a job called *run-tests* which uses a
+ nodeset based on the current release of an operating system to
+ perform its tests, except when testing changes to the stable/2.0
+ branch, in which case it uses an older release::
+
+ - job:
+ name: run-tests
+ nodes: current-release
+
+ - job:
+ name: run-tests
+ branch: stable/2.0
+ nodes: old-release
+
+ In some cases, Zuul uses an implied value for the branch specifier
+ if none is supplied:
+
+ * For a job definition in a *config-project*, no implied branch
+ specifier is used. If no branch specifier appears, the job
+ applies to all branches.
+
+ * In the case of an *untrusted-project*, no implied branch specifier
+ is applied to the reference definition of a job. That is to say,
+ that if the first appearance of the job definition appears without
+ a branch specifier, then it will apply to all branches. Note that
+ when collecting its configuration, Zuul reads the `master` branch
+ of a given project first, then other branches in alphabetical
+ order.
+
+ * Any further job variants other than the reference definition in an
+ *untrusted-project* will, if they do not have a branch specifier,
+ will have an implied branch specifier for the current branch
+ applied.
+
+ This allows for the very simple and expected workflow where if a
+ project defines a job on the master branch with no branch specifier,
+ and then creates a new branch based on master, any changes to that
+ job definition within the new branch only affect that branch.
+
+**files**
+ This attribute indicates that the job should only run on changes
+ where the specified files are modified. This is a regular
+ expression or list of regular expressions. Default: none.
+
+**irrelevant-files**
+ This is a negative complement of `files`. It indicates that the job
+ should run unless *all* of the files changed match this list. In
+ other words, if the regular expression `docs/.*` is supplied, then
+ this job will not run if the only files changed are in the docs
+ directory. A regular expression or list of regular expressions.
+ Default: none.
+
+**auth**
+ Authentication information to be made available to the job. This is
+ a dictionary with two potential keys:
+
+ **inherit**
+ A boolean indicating that the authentication information referenced
+ by this job should be able to be inherited by child jobs. Normally
+ when a job inherits from another job, the auth section is not
+ included. This permits jobs to inherit the same basic structure and
+ playbook, but ensures that secret information is unable to be
+ exposed by a child job which may alter the job's behavior. If it is
+ safe for the contents of the authentication section to be used by
+ child jobs, set this to ``true``. Default: ``false``.
+
+ **secrets**
+ A list of secrets which may be used by the job. A :ref:`secret` is
+ a named collection of private information defined separately in the
+ configuration. The secrets that appear here must be defined in the
+ same project as this job definition.
+
+ In the future, other types of authentication information may be
+ added.
+
+**nodes**
+ A list of nodes which should be supplied to the job. This parameter
+ may be supplied either as a string, in which case it references a
+ :ref:`nodeset` definition which appears elsewhere in the
+ configuration, or a list, in which case it is interpreted in the
+ same way as a Nodeset definition (in essence, it is an anonymous
+ Node definition unique to this job). See the :ref:`nodeset`
+ reference for the syntax to use in that case.
+
+ If a job has an empty or no node definition, it will still run and
+ may be able to perform actions on the Zuul executor.
+
+**override-branch**
+ When Zuul runs jobs for a proposed change, it normally checks out
+ the branch associated with that change on every project present in
+ the job. If jobs are running on a ref (such as a branch tip or
+ tag), then that ref is normally checked out. This attribute is used
+ to override that behavior and indicate that this job should,
+ regardless of the branch for the queue item, use the indicated
+ branch instead. This can be used, for example, to run a previous
+ version of the software (from a stable maintenance branch) under
+ test even if the change being tested applies to a different branch
+ (this is only likely to be useful if there is some cross-branch
+ interaction with some component of the system being tested). See
+ also the project-specific **override-branch** attribute under
+ **required-projects** to apply this behavior to a subset of a job's
+ projects.
+
+**timeout**
+ The time in minutes that the job should be allowed to run before it
+ is automatically aborted and failure is reported. If no timeout is
+ supplied, the job may run indefinitely. Supplying a timeout is
+ highly recommended.
+
+**attempts**
+ When Zuul encounters an error running a job's pre-run playbook, Zuul
+ will stop and restart the job. Errors during the main or
+ post-run -playbook phase of a job are not affected by this parameter
+ (they are reported immediately). This parameter controls the number
+ of attempts to make before an error is reported. Default: 3.
+
+**pre-run**
+ The name of a playbook or list of playbooks to run before the main
+ body of a job. The playbook is expected to reside in the
+ `playbooks/` directory of the project where the job is defined.
+
+ When a job inherits from a parent, the child's pre-run playbooks are
+ run after the parent's. See :ref:`job` for more information.
+
+**post-run**
+ The name of a playbook or list of playbooks to run after the main
+ body of a job. The playbook is expected to reside in the
+ `playbooks/` directory of the project where the job is defined.
+
+ When a job inherits from a parent, the child's post-run playbooks
+ are run before the parent's. See :ref:`job` for more information.
+
+**run**
+ The name of the main playbook for this job. This parameter is not
+ normally necessary, as it defaults to the name of the job. However,
+ if a playbook with a different name is needed, it can be specified
+ here. The playbook is expected to reside in the `playbooks/`
+ directory of the project where the job is defined. When a child
+ inherits from a parent, a playbook with the name of the child job is
+ implicitly searched first, before falling back on the playbook used
+ by the parent job (unless the child job specifies a ``run``
+ attribute, in which case that value is used). Default: the name of
+ the job.
+
+**roles**
+ A list of Ansible roles to prepare for the job. Because a job runs
+ an Ansible playbook, any roles which are used by the job must be
+ prepared and installed by Zuul before the job begins. This value is
+ a list of dictionaries, each of which indicates one of two types of
+ roles: a Galaxy role, which is simply a role that is installed from
+ Ansible Galaxy, or a Zuul role, which is a role provided by a
+ project managed by Zuul. Zuul roles are able to benefit from
+ speculative merging and cross-project dependencies when used by jobs
+ in untrusted projects.
+
+ A project which supplies a role may be structured in one of two
+ configurations: a bare role (in which the role exists at the root of
+ the project), or a contained role (in which the role exists within
+ the `roles/` directory of the project, perhaps along with other
+ roles). In the case of a contained role, the `roles/` directory of
+ the project is added to the role search path. In the case of a bare
+ role, the project itself is added to the role search path. In case
+ the name of the project is not the name under which the role should
+ be installed (and therefore referenced from Ansible), the `name`
+ attribute may be used to specify an alternate.
+
+ .. note:: galaxy roles are not yet implemented
+
+ **galaxy**
+ The name of the role in Ansible Galaxy. If this attribute is
+ supplied, Zuul will search Ansible Galaxy for a role by this name
+ and install it. Mutually exclusive with ``zuul``; either
+ ``galaxy`` or ``zuul`` must be supplied.
+
+ **zuul**
+ The name of a Zuul project which supplies the role. Mutually
+ exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must be
+ supplied.
+
+ **name**
+ The installation name of the role. In the case of a bare role,
+ the role will be made available under this name. Ignored in the
+ case of a contained role.
+
+**required-projects**
+ A list of other projects which are used by this job. Any Zuul
+ projects specified here will also be checked out by Zuul into the
+ working directory for the job. Speculative merging and cross-repo
+ dependencies will be honored.
+
+ The format for this attribute is either a list of strings or
+ dictionaries. Strings are interpreted as project names,
+ dictionaries may have the following attributes:
+
+ **name**
+ The name of the required project.
+
+ **override-branch**
+ When Zuul runs jobs for a proposed change, it normally checks out
+ the branch associated with that change on every project present in
+ the job. If jobs are running on a ref (such as a branch tip or
+ tag), then that ref is normally checked out. This attribute is
+ used to override that behavior and indicate that this job should,
+ regardless of the branch for the queue item, use the indicated
+ branch instead, for only this project. See also the
+ **override-branch** attribute of jobs to apply the same behavior
+ to all projects in a job.
+
+**vars**
+
+A dictionary of variables to supply to Ansible. When inheriting from
+a job (or creating a variant of a job) vars are merged with previous
+definitions. This means a variable definition with the same name will
+override a previously defined variable, but new variable names will be
+added to the set of defined variables.
+
+**dependencies**
+ A list of other jobs upon which this job depends. Zuul will not
+ start executing this job until all of its dependencies have
+ completed successfully, and if one or more of them fail, this job
+ will not be run.
+
+**allowed-projects**
+ A list of Zuul projects which may use this job. By default, a job
+ may be used by any other project known to Zuul, however, some jobs
+ use resources or perform actions which are not appropriate for other
+ projects. In these cases, a list of projects which are allowed to
+ use this job may be supplied. If this list is not empty, then it
+ must be an exhaustive list of all projects permitted to use the job.
+ The current project (where the job is defined) is not automatically
+ included, so if it should be able to run this job, then it must be
+ explicitly listed. Default: the empty list (all projects may use
+ the job).
+
+
+.. _project:
+
+Project
+~~~~~~~
+
+A project corresponds to a source code repository with which Zuul is
+configured to interact. The main responsibility of the `Project`
+configuration item is to specify which jobs should run in which
+pipelines for a given project. Within each `Project` definition, a
+section for each `Pipeline` may appear. This project-pipeline
+definition is what determines how a project participates in a
+pipeline.
+
+Consider the following `Project` definition::
+
+ - project:
+ name: yoyodyne
+ check:
+ jobs:
+ - check-syntax
+ - unit-tests
+ gate:
+ queue: integrated
+ jobs:
+ - unit-tests
+ - integration-tests
+
+The project has two project-pipeline stanzas, one for the `check`
+pipeline, and one for `gate`. Each specifies which jobs shuld run
+when a change for that project enteres the respective pipeline -- when
+a change enters `check`, the `check-syntax` and `unit-test` jobs are
+run.
+
+Pipelines which use the dependent pipeline manager (e.g., the `gate`
+example shown earlier) maintain separate queues for groups of
+projects. When Zuul serializes a set of changes which represent
+future potential project states, it must know about all of the
+projects within Zuul which may have an effect on the outcome of the
+jobs it runs. If project *A* uses project *B* as a library, then Zuul
+must be told about that relationship so that it knows to serialize
+changes to A and B together, so that it does not merge a change to B
+while it is testing a change to A.
+
+Zuul could simply assume that all projects are related, or even infer
+relationships by which projects a job indicates it uses, however, in a
+large system that would become unwieldy very quickly, and
+unnecessarily delay changes to unrelated projects. To allow for
+flexibility in the construction of groups of related projects, the
+change queues used by dependent pipeline managers are specified
+manually. To group two or more related projects into a shared queue
+for a dependent pipeline, set the ``queue`` parameter to the same
+value for those projects.
+
+The `gate` project-pipeline definition above specifies that this
+project participates in the `integrated` shared queue for that
+pipeline.
+
+In addition to a project-pipeline definition for one or more
+`Pipelines`, the following attributes may appear in a Project:
+
+**name** (required)
+ The name of the project. If Zuul is configured with two or more
+ unique projects with the same name, the canonical hostname for the
+ project should be included (e.g., `git.example.com/foo`).
+
+**templates**
+ A list of :ref:`project-template` references; the project-pipeline
+ definitions of each Project Template will be applied to this
+ project. If more than one template includes jobs for a given
+ pipeline, they will be combined, as will any jobs specified in
+ project-pipeline definitions on the project itself.
+
+.. _project-template:
+
+Project Template
+~~~~~~~~~~~~~~~~
+
+A Project Template defines one or more project-pipeline definitions
+which can be re-used by multiple projects.
+
+A Project Template uses the same syntax as a :ref:`project`
+definition, however, in the case of a template, the ``name`` attribute
+does not refer to the name of a project, but rather names the template
+so that it can be referenced in a `Project` definition.
+
+.. _secret:
+
+Secret
+~~~~~~
+
+A Secret is a collection of private data for use by one or more jobs.
+In order to maintain the security of the data, the values are usually
+encrypted, however, data which are not sensitive may be provided
+unencrypted as well for convenience.
+
+A Secret may only be used by jobs defined within the same project. To
+use a secret, a :ref:`job` must specify the secret within its `auth`
+section. To protect against jobs in other repositories declaring a
+job with a secret as a parent and then exposing that secret, jobs
+which inherit from a job with secrets will not inherit the secrets
+themselves. To alter that behavior, see the `inherit` job attribute.
+Further, jobs which do not permit children to inherit secrets (the
+default) are also automatically marked `final`, meaning that their
+execution related attributes may not be changed in a project-pipeline
+stanza. This is to protect against a job with secrets defined in one
+project being used by another project in a way which might expose the
+secrets. If a job with secrets is unsafe to be used by other
+projects, the `allowed-projects` job attribute can be used to restrict
+the projects which can invoke that job. Finally, pipelines which are
+used to execute proposed but unreviewed changes can set the
+`allow-secrets` attribute to indicate that they should not supply
+secrets at all in order to protect against someone proposing a change
+which exposes a secret.
+
+The following attributes are required:
+
+**name** (required)
+ The name of the secret, used in a :ref:`Job` definition to request
+ the secret.
+
+**data** (required)
+ A dictionary which will be added to the Ansible variables available
+ to the job. The values can either be plain text strings, or
+ encrypted values. See :ref:`encryption` for more information.
+
+.. _nodeset:
+
+Nodeset
+~~~~~~~
+
+A Nodeset is a named collection of nodes for use by a job. Jobs may
+specify what nodes they require individually, however, by defining
+groups of node types once and referring to them by name, job
+configuration may be simplified.
+
+A Nodeset requires two attributes:
+
+**name** (required)
+ The name of the Nodeset, to be referenced by a :ref:`job`.
+
+**nodes** (required)
+ A list of node definitions, each of which has the following format:
+
+ **name** (required)
+ The name of the node. This will appear in the Ansible inventory
+ for the job.
+
+ **label** (required)
+ The Nodepool label for the node. Zuul will request a node with
+ this label.
+
+.. _semaphore:
+
+Semaphore
+~~~~~~~~~
+
+Semaphores can be used to restrict the number of certain jobs which
+are running at the same time. This may be useful for jobs which
+access shared or limited resources. A semaphore has a value which
+represents the maximum number of jobs which use that semaphore at the
+same time.
+
+Semaphores are never subject to dynamic reconfiguration. If the value
+of a semaphore is changed, it will take effect only when the change
+where it is updated is merged. An example follows::
+
+ - semaphore:
+ name: semaphore-foo
+ max: 5
+ - semaphore:
+ name: semaphore-bar
+ max: 3
+
+The following attributes are available:
+
+**name** (required)
+ The name of the semaphore, referenced by jobs.
+
+**max**
+ The maximum number of running jobs which can use this semaphore.
+ Defaults to 1.
diff --git a/doc/source/user/encryption.rst b/doc/source/user/encryption.rst
new file mode 100644
index 0000000..fdf2c5a
--- /dev/null
+++ b/doc/source/user/encryption.rst
@@ -0,0 +1,46 @@
+:title: Encryption
+
+.. _encryption:
+
+Encryption
+==========
+
+Zuul supports storing encrypted data directly in the git repositories
+of projects it operates on. If you have a job which requires private
+information in order to run (e.g., credentials to interact with a
+third-party service) those credentials can be stored along with the
+job definition.
+
+Each project in Zuul has its own automatically generated RSA keypair
+which can be used by anyone to encrypt a secret and only Zuul is able
+to decrypt it. Zuul serves each project's public key using its
+build-in webserver. They can be fetched at the path
+``/keys/<source>/<project>.pub`` where ``<project>`` is the name of a
+project and ``<source>`` is the name of that project's connection in
+the main Zuul configuration file.
+
+Zuul currently supports one encryption scheme, PKCS#1 with OAEP, which
+can not store secrets longer than the key length, 4096 bits. The
+padding used by this scheme ensures that someone examining the
+encrypted data can not determine the length of the plaintext version
+of the data, except to know that it is not longer than 4096 bits.
+
+In the config files themselves, Zuul uses an extensible method of
+specifying the encryption scheme used for a secret so that other
+schemes may be added later. To specify a secret, use the
+``!encrypted/pkcs1-oaep`` YAML tag along with the base64 encoded
+value. For example::
+
+ - secret:
+ name: test_secret
+ data:
+ password: !encrypted/pkcs1-oaep |
+ BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi1wGZZ
+ ...
+
+Zuul provides a standalone script to make encrypting values easy; it
+can be found at `tools/encrypt_secret.py` in the Zuul source
+directory.
+
+.. program-output:: python3 ../../tools/encrypt_secret.py --help
+
diff --git a/doc/source/gating.rst b/doc/source/user/gating.rst
similarity index 62%
rename from doc/source/gating.rst
rename to doc/source/user/gating.rst
index c10ba83..3398892 100644
--- a/doc/source/gating.rst
+++ b/doc/source/user/gating.rst
@@ -1,16 +1,18 @@
:title: Project Gating
+.. _project_gating:
+
Project Gating
==============
Traditionally, many software development projects merge changes from
developers into the repository, and then identify regressions
resulting from those changes (perhaps by running a test suite with a
-continuous integration system such as Jenkins), followed by more
-patches to fix those bugs. When the mainline of development is
-broken, it can be very frustrating for developers and can cause lost
-productivity, particularly so when the number of contributors or
-contributions is large.
+continuous integration system), followed by more patches to fix those
+bugs. When the mainline of development is broken, it can be very
+frustrating for developers and can cause lost productivity,
+particularly so when the number of contributors or contributions is
+large.
The process of gating attempts to prevent changes that introduce
regressions from being merged. This keeps the mainline of development
@@ -25,9 +27,6 @@
this process, with a particular emphasis on ensuring large numbers of
changes are tested correctly.
-Zuul was designed to handle the workflow of the OpenStack project, but
-can be used with any project.
-
Testing in parallel
-------------------
@@ -42,18 +41,17 @@
developers to create changes at a rate faster than they can be tested
and merged.
-Zuul's DependentPipelineManager allows for parallel execution of test
-jobs for gating while ensuring changes are tested correctly, exactly
-as if they had been tested one at a time. It does this by performing
-speculative execution of test jobs; it assumes that all jobs will
-succeed and tests them in parallel accordingly. If they do succeed,
-they can all be merged. However, if one fails, then changes that were
-expecting it to succeed are re-tested without the failed change. In
-the best case, as many changes as execution contexts are available may
-be tested in parallel and merged at once. In the worst case, changes
-are tested one at a time (as each subsequent change fails, changes
-behind it start again). In practice, the OpenStack project observes
-something closer to the best case.
+Zuul's :ref:`dependent pipeline manager<dependent_pipeline_manager>`
+allows for parallel execution of test jobs for gating while ensuring
+changes are tested correctly, exactly as if they had been tested one
+at a time. It does this by performing speculative execution of test
+jobs; it assumes that all jobs will succeed and tests them in parallel
+accordingly. If they do succeed, they can all be merged. However, if
+one fails, then changes that were expecting it to succeed are
+re-tested without the failed change. In the best case, as many
+changes as execution contexts are available may be tested in parallel
+and merged at once. In the worst case, changes are tested one at a
+time (as each subsequent change fails, changes behind it start again).
For example, if a core developer approves five changes in rapid
succession::
@@ -220,80 +218,34 @@
other projects currently enqueued in the gate (since they will
eventually be merged and might introduce breaking features).
-Such relationships can be defined in Zuul configuration by registering
-a job in a DependentPipeline of several projects. Whenever a change
-enters such a pipeline, it will create references for the other
-projects as well. As an example, given a main project ``acme`` and a
-plugin ``plugin`` you can define a job ``acme-tests`` which should be
-run for both projects:
+Such relationships can be defined in Zuul configuration by placing
+projects in a shared queue within a dependent pipeline. Whenever
+changes for any project enter a pipeline with such a shared queue,
+they are tested together, such that the commits for the changes ahead
+in the queue are automatically present in the jobs for the changes
+behind them. See :ref:`project` for more details.
-.. code-block:: yaml
+A given dependent pipeline may have as many shared change queues as
+necessary, so groups of related projects may share a change queue
+without interfering with unrelated projects. Independent pipelines do
+not use shared change queues, however, they may still be used to test
+changes across projects using cross-project dependencies.
- pipelines:
- - name: gate
- manager: DependentPipelineManager
+.. _dependencies:
- projects::
- - name: acme
- gate:
- - acme-tests
- - name: plugin
- gate:
- - acme-tests # Register job again
+Cross-Project Dependencies
+--------------------------
-Whenever a change enters the ``gate`` pipeline queue, Zuul creates a reference
-for it. For each subsequent change, an additional reference is created for the
-changes ahead in the queue. As a result, you will always be able to fetch the
-future state of your project dependencies for each change in the queue.
+Zuul permits users to specify dependencies across projects. Using a
+special footer in Git commit messages, users may specify that a change
+depends on another change in any repository known to Zuul.
-Based on the pipeline and project definitions above, three changes are
-inserted in the ``gate`` pipeline with the associated references:
+Zuul's cross-project dependencies behave like a directed acyclic graph
+(DAG), like git itself, to indicate a one-way dependency relationship
+between changes in different git repositories. Change A may depend on
+B, but B may not depend on A.
- ======== ======= ====== =========
- Change Project Branch Zuul Ref.
- ======== ======= ====== =========
- Change 1 acme master master/Z1
- Change 2 plugin stable stable/Z2
- Change 3 plugin master master/Z3
- ======== ======= ====== =========
-
-Since the changes enter a DependentPipelineManager pipeline, Zuul creates
-additional references:
-
- ====== ======= ========= =============================
- Change Project Zuul Ref. Description
- ====== ======= ========= =============================
- 1 acme master/Z1 acme master + change 1
- ------ ------- --------- -----------------------------
- 2 acme master/Z2 acme master + change 1
- 2 plugin stable/Z2 plugin stable + change 2
- ------ ------- --------- -----------------------------
- 3 acme master/Z3 acme master + change 1
- 3 plugin stable/Z3 plugin stable + change 2
- 3 plugin master/Z3 plugin master + change 3
- ====== ======= ========= =============================
-
-In order to test change 3, you would clone both repositories and simply
-fetch the Z3 reference for each combination of project/branch you are
-interested in testing. For example, you could fetch ``acme`` with
-master/Z3 and ``plugin`` with master/Z3 and thus have ``acme`` with
-change 1 applied as the expected state for when Change 3 would merge.
-When your job fetches several repositories without changes ahead in the
-queue, they may not have a Z reference in which case you can just check
-out the branch.
-
-
-Cross Repository Dependencies
------------------------------
-
-Zuul permits users to specify dependencies across repositories. Using
-a special header in Git commit messages, Users may specify that a
-change depends on another change in any repository known to Zuul.
-
-Zuul's cross-repository dependencies (CRD) behave like a directed
-acyclic graph (DAG), like git itself, to indicate a one-way dependency
-relationship between changes in different git repositories. Change A
-may depend on B, but B may not depend on A.
+.. TODO: update for v3 crd syntax
To use them, include ``Depends-On: <gerrit-change-id>`` in the footer of
a commit message. Use the full Change-ID ('I' + 40 characters).
@@ -302,10 +254,10 @@
Dependent Pipeline
~~~~~~~~~~~~~~~~~~
-When Zuul sees CRD changes, it serializes them in the usual manner when
-enqueuing them into a pipeline. This means that if change A depends on
-B, then when they are added to a dependent pipeline, B will appear first
-and A will follow:
+When Zuul sees changes with cross-project dependencies, it serializes
+them in the usual manner when enqueuing them into a pipeline. This
+means that if change A depends on B, then when they are added to a
+dependent pipeline, B will appear first and A will follow:
.. blockdiag::
:align: center
@@ -333,25 +285,26 @@
.. note::
- If changes with CRD do not share a change queue then Zuul is unable
- to enqueue them together, and the first will be required to merge
- before the second is enqueued.
+ If changes with cross-project dependencies do not share a change
+ queue then Zuul is unable to enqueue them together, and the first
+ will be required to merge before the second is enqueued.
Independent Pipeline
~~~~~~~~~~~~~~~~~~~~
When changes are enqueued into an independent pipeline, all of the
-related dependencies (both normal git-dependencies that come from parent
-commits as well as CRD changes) appear in a dependency graph, as in a
-dependent pipeline. This means that even in an independent pipeline,
-your change will be tested with its dependencies. So changes that were
-previously unable to be fully tested until a related change landed in a
-different repository may now be tested together from the start.
+related dependencies (both normal git-dependencies that come from
+parent commits as well as cross-project dependencies) appear in a
+dependency graph, as in a dependent pipeline. This means that even in
+an independent pipeline, your change will be tested with its
+dependencies. Changes that were previously unable to be fully tested
+until a related change landed in a different repository may now be
+tested together from the start.
-All of the changes are still independent (so you will note that the
-whole pipeline does not share a graph as in a dependent pipeline), but
-for each change tested, all of its dependencies are visually connected
-to it, and they are used to construct the git references that Zuul uses
+All of the changes are still independent (you will note that the whole
+pipeline does not share a graph as in a dependent pipeline), but for
+each change tested, all of its dependencies are visually connected to
+it, and they are used to construct the git repositories that Zuul uses
when testing.
When looking at this graph on the status page, you will note that the
@@ -383,6 +336,8 @@
additionally will appear as its own red or green dot for its test.
+.. TODO: relevant for v3?
+
Multiple Changes
~~~~~~~~~~~~~~~~
@@ -462,10 +417,12 @@
B, C <- A
}
+.. TODO: update for v3
+
Cycles
~~~~~~
-If a cycle is created by use of CRD, Zuul will abort its work very
-early. There will be no message in Gerrit and no changes that are part
-of the cycle will be enqueued into any pipeline. This is to protect
-Zuul from infinite loops.
+If a cycle is created by use of cross-project dependencies, Zuul will
+abort its work very early. There will be no message in Gerrit and no
+changes that are part of the cycle will be enqueued into any pipeline.
+This is to protect Zuul from infinite loops.
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
new file mode 100644
index 0000000..3eca04b
--- /dev/null
+++ b/doc/source/user/index.rst
@@ -0,0 +1,18 @@
+User's Guide
+============
+
+This guide is for all users of Zuul. If you work on a project where
+Zuul is used to drive automation (whether that's testing proposed
+changes, building artifacts, or deploying builds), this guide will
+help you understand the concepts that underly Zuul, and how to
+configure it to meet your needs.
+
+
+.. toctree::
+ :maxdepth: 2
+
+ concepts
+ gating
+ config
+ jobs
+ encryption
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
new file mode 100644
index 0000000..5637552
--- /dev/null
+++ b/doc/source/user/jobs.rst
@@ -0,0 +1,103 @@
+:title: Job Content
+
+Job Content
+===========
+
+Zuul jobs are implemneted as Ansible playbooks. Zuul prepares the
+repositories used for a job, installs any required Ansible roles, and
+then executes the job's playbooks. Any setup or artifact collection
+required is the responsibility of the job itself. While this flexible
+arrangement allows for almost any kind of job to be run by Zuul,
+batteries are included. Zuul has a standard library of jobs upon
+which to build.
+
+Working Directory
+-----------------
+
+Before starting each job, the Zuul executor creates a directory to
+hold all of the content related to the job. This includes some
+directories which are used by Zuul to configure and run Ansible and
+may not be accessible, as well as a directory tree, under ``work/``,
+that is readable and writable by the job. The hierarchy is:
+
+**work/**
+ The working directory of the job.
+
+**work/src/**
+ Contains the prepared git repositories for the job.
+
+**work/logs/**
+ Where the Ansible log for the job is written; your job
+ may place other logs here as well.
+
+Git Repositories
+----------------
+
+The git repositories in ``work/src`` contain the repositories for all
+of the projects specified in the ``required-projects`` section of the
+job, plus the project associated with the queue item if it isn't
+already in that list. In the case of a proposed change, that change
+and all of the changes ahead of it in the pipeline queue will already
+be merged into their respective repositories and target branches. The
+change's project will have the change's branch checked out, as will
+all of the other projects, if that branch exists (otherwise, a
+fallback or default branch will be used). If your job needs to
+operate on multiple branches, simply checkout the appropriate branches
+of these git repos to ensure that the job results reflect the proposed
+future state that Zuul is testing, and all dependencies are present.
+Do not use any git remotes; the local repositories are guaranteed to
+be up to date.
+
+The repositories will be placed on the filesystem in directories
+corresponding with the canonical hostname of their source connection.
+For example::
+
+ work/src/git.example.com/project1
+ work/src/github.com/project2
+
+Is the layout that would be present for a job which included project1
+from the connection associated to git.example.com and project2 from
+GitHub. This helps avoid collisions between projects with the same
+name, and some language environments, such as Go, expect repositories
+in this format.
+
+Note that these git repositories are located on the executor; in order
+to be useful to most kinds of jobs, they will need to be present on
+the test nodes. The ``base`` job in the standard library contains a
+pre-playbook which copies the repositories to all of the job's nodes.
+It is recommended to always inherit from this base job to ensure that
+behavior.
+
+.. TODO: link to base job documentation and/or document src (and logs?) directory
+
+Zuul Variables
+--------------
+
+Zuul supplies not only the variables specified by the job definition
+to Ansible, but also some variables from the executor itself. They
+are:
+
+**zuul.executor.hostname**
+ The hostname of the executor.
+
+**zuul.executor.src_root**
+ The path to the source directory.
+
+**zuul.executor.log_root**
+ The path to the logs directory.
+
+SSH Keys
+--------
+
+Zuul starts each job with an SSH agent running and the key used to
+access the job's nodes added to that agent. Generally you won't need
+to be aware of this since Ansible will use this when performing any
+tasks on remote nodes. However, under some circumstances you may want
+to interact with the agent. For example, you may wish to add a key
+provided as a secret to the job in order to access a specific host, or
+you may want to, in a pre-playbook, replace the key used to log into
+the assigned nodes in order to further protect it from being abused by
+untrusted job content.
+
+.. TODO: describe standard lib and link to published docs for it.
+
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
deleted file mode 100644
index f07a859..0000000
--- a/doc/source/zuul.rst
+++ /dev/null
@@ -1,967 +0,0 @@
-:title: Zuul
-
-Zuul
-====
-
-Configuration
--------------
-
-Zuul has three configuration files:
-
-**zuul.conf**
- Connection information for Gerrit and Gearman, locations of the
- other config files. (required)
-**layout.yaml**
- Project and pipeline configuration -- what Zuul does. (required)
-**logging.conf**
- Python logging config. (optional)
-
-Examples of each of the three files can be found in the etc/ directory
-of the source distribution.
-
-.. _zuulconf:
-
-zuul.conf
-~~~~~~~~~
-
-Zuul will look for ``/etc/zuul/zuul.conf`` or ``~/zuul.conf`` to
-bootstrap its configuration. Alternately, you may specify ``-c
-/path/to/zuul.conf`` on the command line.
-
-Gerrit and Gearman connection information are each described in a
-section of zuul.conf. The location of the other two configuration
-files (as well as the location of the PID file when running Zuul as a
-server) are specified in a third section.
-
-The three sections of this config and their options are documented below.
-You can also find an example zuul.conf file in the git
-`repository
-<https://git.openstack.org/cgit/openstack-infra/zuul/tree/etc/zuul.conf-sample>`_
-
-gearman
-"""""""
-
-Client connection information for gearman. If using Zuul's builtin gearmand
-server just set **server** to 127.0.0.1.
-
-**server**
- Hostname or IP address of the Gearman server.
- ``server=gearman.example.com`` (required)
-
-**port**
- Port on which the Gearman server is listening.
- ``port=4730`` (optional)
-
-**ssl_ca**
- Optional: An openssl file containing a set of concatenated “certification authority” certificates
- in PEM formet.
-
-**ssl_cert**
- Optional: An openssl file containing the client public certificate in PEM format.
-
-**ssl_key**
- Optional: An openssl file containing the client private key in PEM format.
-
-gearman_server
-""""""""""""""
-
-The builtin gearman server. Zuul can fork a gearman process from itself rather
-than connecting to an external one.
-
-**start**
- Whether to start the internal Gearman server (default: False).
- ``start=true``
-
-**listen_address**
- IP address or domain name on which to listen (default: all addresses).
- ``listen_address=127.0.0.1``
-
-**log_config**
- Path to log config file for internal Gearman server.
- ``log_config=/etc/zuul/gearman-logging.yaml``
-
-**ssl_ca**
- Optional: An openssl file containing a set of concatenated “certification authority” certificates
- in PEM formet.
-
-**ssl_cert**
- Optional: An openssl file containing the server public certificate in PEM format.
-
-**ssl_key**
- Optional: An openssl file containing the server private key in PEM format.
-
-webapp
-""""""
-
-**listen_address**
- IP address or domain name on which to listen (default: 0.0.0.0).
- ``listen_address=127.0.0.1``
-
-**port**
- Port on which the webapp is listening (default: 8001).
- ``port=8008``
-
-zuul
-""""
-
-Zuul's main configuration section. At minimum zuul must be able to find
-layout.yaml to be useful.
-
-.. note:: Must be provided when running zuul-server
-
-.. _layout_config:
-
-**layout_config**
- Path to layout config file. Used by zuul-server only.
- ``layout_config=/etc/zuul/layout.yaml``
-
-**log_config**
- Path to log config file. Used by zuul-server only.
- ``log_config=/etc/zuul/logging.yaml``
-
-**pidfile**
- Path to PID lock file. Used by zuul-server only.
- ``pidfile=/var/run/zuul/zuul.pid``
-
-**state_dir**
- Path to directory that Zuul should save state to. Used by all Zuul
- commands.
- ``state_dir=/var/lib/zuul``
-
-**jobroot_dir**
- Path to directory that Zuul should store temporary job files.
- ``jobroot_dir=/tmp``
-
-**report_times**
- Boolean value (``true`` or ``false``) that determines if Zuul should
- include elapsed times for each job in the textual report. Used by
- zuul-server only.
- ``report_times=true``
-
-**status_url**
- URL that will be posted in Zuul comments made to Gerrit changes when
- starting jobs for a change. Used by zuul-server only.
- ``status_url=https://zuul.example.com/status``
-
-**status_expiry**
- Zuul will cache the status.json file for this many seconds. This is an
- optional value and ``1`` is used by default.
- ``status_expiry=1``
-
-
-merger
-""""""
-
-The zuul-merger process configuration. Detailed documentation on this process
-can be found on the :doc:`merger` page.
-
-.. note:: Must be provided when running zuul-merger. Both services may share the
- same configuration (and even host) or otherwise have an individual
- zuul.conf.
-
-**git_dir**
- Directory that Zuul should clone local git repositories to.
- ``git_dir=/var/lib/zuul/git``
-
-**git_user_email**
- Optional: Value to pass to `git config user.email`.
- ``git_user_email=zuul@example.com``
-
-**git_user_name**
- Optional: Value to pass to `git config user.name`.
- ``git_user_name=zuul``
-
-**zuul_url**
- URL of this merger's git repos, accessible to test workers. Usually
- "http://zuul.example.com/p" or "http://zuul-merger01.example.com/p"
- depending on whether the merger is co-located with the Zuul server.
-
-**log_config**
- Path to log config file for the merger process.
- ``log_config=/etc/zuul/logging.yaml``
-
-**pidfile**
- Path to PID lock file for the merger process.
- ``pidfile=/var/run/zuul-merger/merger.pid``
-
-executor
-""""""""
-
-The zuul-executor process configuration.
-
-**finger_port**
- Port to use for finger log streamer.
- ``finger_port=79``
-
-**git_dir**
- Directory that Zuul should clone local git repositories to.
- ``git_dir=/var/lib/zuul/git``
-
-**log_config**
- Path to log config file for the executor process.
- ``log_config=/etc/zuul/logging.yaml``
-
-**private_key_file**
- SSH private key file to be used when logging into worker nodes.
- ``private_key_file=~/.ssh/id_rsa``
-
-**user**
- User ID for the zuul-executor process. In normal operation as a daemon,
- the executor should be started as the ``root`` user, but it will drop
- privileges to this user during startup.
- ``user=zuul``
-
-.. _connection:
-
-connection ArbitraryName
-""""""""""""""""""""""""
-
-A connection can be listed with any arbitrary name. The required
-parameters are specified in the :ref:`connections` documentation
-depending on what driver you are using.
-
-.. _layoutyaml:
-
-layout.yaml
-~~~~~~~~~~~
-
-This is the main configuration file for Zuul, where all of the pipelines
-and projects are defined, what tests should be run, and what actions
-Zuul should perform. There are three sections: pipelines, jobs, and
-projects.
-
-Pipelines
-"""""""""
-
-Zuul can have any number of independent pipelines. Whenever a matching
-Gerrit event is found for a pipeline, that event is added to the
-pipeline, and the jobs specified for that pipeline are run. When all
-jobs specified for the pipeline that were triggered by an event are
-completed, Zuul reports back to Gerrit the results.
-
-There are no pre-defined pipelines in Zuul, rather you can define
-whatever pipelines you need in the layout file. This is a very flexible
-system that can accommodate many kinds of workflows.
-
-Here is a quick example of a pipeline definition followed by an
-explanation of each of the parameters::
-
- - name: check
- manager: IndependentPipelineManager
- source: my_gerrit
- trigger:
- my_gerrit:
- - event: patchset-created
- success:
- my_gerrit:
- verified: 1
- failure:
- my_gerrit
- verified: -1
-
-**name**
- This is used later in the project definition to indicate what jobs
- should be run for events in the pipeline.
-
-**description**
- This is an optional field that may be used to provide a textual
- description of the pipeline.
-
-**source**
- A required field that specifies a connection that provides access to
- the change objects that this pipeline operates on. The name of the
- connection as per the zuul.conf should be specified. The driver used
- for the connection named will be the source. Currently only ``gerrit``
- drivers are supported.
-
-**success-message**
- An optional field that supplies the introductory text in message
- reported back to Gerrit when all the voting builds are successful.
- Defaults to "Build successful."
-
-**failure-message**
- An optional field that supplies the introductory text in message
- reported back to Gerrit when at least one voting build fails.
- Defaults to "Build failed."
-
-**merge-failure-message**
- An optional field that supplies the introductory text in message
- reported back to Gerrit when a change fails to merge with the
- current state of the repository.
- Defaults to "Merge failed."
-
-**footer-message**
- An optional field to supply additional information after test results.
- Useful for adding information about the CI system such as debugging
- and contact details.
-
-**manager**
- There are currently two schemes for managing pipelines:
-
- *IndependentPipelineManager*
- Every event in this pipeline should be treated as independent of
- other events in the pipeline. This is appropriate when the order of
- events in the pipeline doesn't matter because the results of the
- actions this pipeline performs can not affect other events in the
- pipeline. For example, when a change is first uploaded for review,
- you may want to run tests on that change to provide early feedback
- to reviewers. At the end of the tests, the change is not going to
- be merged, so it is safe to run these tests in parallel without
- regard to any other changes in the pipeline. They are independent.
-
- Another type of pipeline that is independent is a post-merge
- pipeline. In that case, the changes have already merged, so the
- results can not affect any other events in the pipeline.
-
- *DependentPipelineManager*
- The dependent pipeline manager is designed for gating. It ensures
- that every change is tested exactly as it is going to be merged
- into the repository. An ideal gating system would test one change
- at a time, applied to the tip of the repository, and only if that
- change passed tests would it be merged. Then the next change in
- line would be tested the same way. In order to achieve parallel
- testing of changes, the dependent pipeline manager performs
- speculative execution on changes. It orders changes based on
- their entry into the pipeline. It begins testing all changes in
- parallel, assuming that each change ahead in the pipeline will pass
- its tests. If they all succeed, all the changes can be tested and
- merged in parallel. If a change near the front of the pipeline
- fails its tests, each change behind it ignores whatever tests have
- been completed and are tested again without the change in front.
- This way gate tests may run in parallel but still be tested
- correctly, exactly as they will appear in the repository when
- merged.
-
- One important characteristic of the DependentPipelineManager is that
- it analyzes the jobs that are triggered by different projects, and
- if those projects have jobs in common, it treats those projects as
- related, and they share a single virtual queue of changes. Thus,
- if there is a job that performs integration testing on two
- projects, those two projects will automatically share a virtual
- change queue. If a third project does not invoke that job, it
- will be part of a separate virtual change queue, and changes to
- it will not depend on changes to the first two jobs.
-
- For more detail on the theory and operation of Zuul's
- DependentPipelineManager, see: :doc:`gating`.
-
-**trigger**
- At least one trigger source must be supplied for each pipeline.
- Triggers are not exclusive -- matching events may be placed in
- multiple pipelines, and they will behave independently in each of
- the pipelines they match.
-
- Triggers are loaded from their connection name. The driver type of
- the connection will dictate which options are available.
- See :doc:`triggers`.
-
-**require**
- If this section is present, it established pre-requisites for any
- kind of item entering the Pipeline. Regardless of how the item is
- to be enqueued (via any trigger or automatic dependency resolution),
- the conditions specified here must be met or the item will not be
- enqueued.
-
-.. _pipeline-require-approval:
-
- **approval**
- This requires that a certain kind of approval be present for the
- current patchset of the change (the approval could be added by the
- event in question). It takes several sub-parameters, all of which
- are optional and are combined together so that there must be an
- approval matching all specified requirements.
-
- *username*
- If present, an approval from this username is required. It is
- treated as a regular expression.
-
- *email*
- If present, an approval with this email address is required. It
- is treated as a regular expression.
-
- *email-filter* (deprecated)
- A deprecated alternate spelling of *email*. Only one of *email* or
- *email_filter* should be used.
-
- *older-than*
- If present, the approval must be older than this amount of time
- to match. Provide a time interval as a number with a suffix of
- "w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
- (seconds). Example ``48h`` or ``2d``.
-
- *newer-than*
- If present, the approval must be newer than this amount of time
- to match. Same format as "older-than".
-
- Any other field is interpreted as a review category and value
- pair. For example ``verified: 1`` would require that the approval
- be for a +1 vote in the "Verified" column. The value may either
- be a single value or a list: ``verified: [1, 2]`` would match
- either a +1 or +2 vote.
-
- **open**
- A boolean value (``true`` or ``false``) that indicates whether the change
- must be open or closed in order to be enqueued.
-
- **current-patchset**
- A boolean value (``true`` or ``false``) that indicates whether the change
- must be the current patchset in order to be enqueued.
-
- **status**
- A string value that corresponds with the status of the change
- reported by the trigger. For example, when using the Gerrit
- trigger, status values such as ``NEW`` or ``MERGED`` may be useful.
-
-**reject**
- If this section is present, it establishes pre-requisites that can
- block an item from being enqueued. It can be considered a negative
- version of **require**.
-
- **approval**
- This takes a list of approvals. If an approval matches the provided
- criteria the change can not be entered into the pipeline. It follows
- the same syntax as the :ref:`"require approval" pipeline above
- <pipeline-require-approval>`.
-
- Example to reject a change with any negative vote::
-
- reject:
- approval:
- - code-review: [-1, -2]
-
-**dequeue-on-new-patchset**
- Normally, if a new patchset is uploaded to a change that is in a
- pipeline, the existing entry in the pipeline will be removed (with
- jobs canceled and any dependent changes that can no longer merge as
- well. To suppress this behavior (and allow jobs to continue
- running), set this to ``false``. Default: ``true``.
-
-**ignore-dependencies**
- In any kind of pipeline (dependent or independent), Zuul will
- attempt to enqueue all dependencies ahead of the current change so
- that they are tested together (independent pipelines report the
- results of each change regardless of the results of changes ahead).
- To ignore dependencies completely in an independent pipeline, set
- this to ``true``. This option is ignored by dependent pipelines.
- The default is: ``false``.
-
-**success**
- Describes where Zuul should report to if all the jobs complete
- successfully.
- This section is optional; if it is omitted, Zuul will run jobs and
- do nothing on success; it will not even report a message to Gerrit.
- If the section is present, the listed reporter plugins will be
- asked to report on the jobs.
- The reporters are listed by their connection name. The options
- available depend on the driver for the supplied connection.
- See :doc:`reporters` for more details.
-
-**failure**
- Uses the same syntax as **success**, but describes what Zuul should
- do if at least one job fails.
-
-**merge-failure**
- Uses the same syntax as **success**, but describes what Zuul should
- do if it is unable to merge in the patchset. If no merge-failure
- reporters are listed then the ``failure`` reporters will be used to
- notify of unsuccessful merges.
-
-**start**
- Uses the same syntax as **success**, but describes what Zuul should
- do when a change is added to the pipeline manager. This can be used,
- for example, to reset the value of the Verified review category.
-
-**disabled**
- Uses the same syntax as **success**, but describes what Zuul should
- do when a pipeline is disabled.
- See ``disable-after-consecutive-failures``.
-
-**disable-after-consecutive-failures**
- If set, a pipeline can enter a ''disabled'' state if too many changes
- in a row fail. When this value is exceeded the pipeline will stop
- reporting to any of the ``success``, ``failure`` or ``merge-failure``
- reporters and instead only report to the ``disabled`` reporters.
- (No ``start`` reports are made when a pipeline is disabled).
-
-**precedence**
- Indicates how the build scheduler should prioritize jobs for
- different pipelines. Each pipeline may have one precedence, jobs
- for pipelines with a higher precedence will be run before ones with
- lower. The value should be one of ``high``, ``normal``, or ``low``.
- Default: ``normal``.
-
-**window**
- DependentPipelineManagers only. Zuul can rate limit
- DependentPipelineManagers in a manner similar to TCP flow control.
- Jobs are only started for changes in the queue if they sit in the
- actionable window for the pipeline. The initial length of this window
- is configurable with this value. The value given should be a positive
- integer value. A value of ``0`` disables rate limiting on the
- DependentPipelineManager.
- Default: ``20``.
-
-**window-floor**
- DependentPipelineManagers only. This is the minimum value for the
- window described above. Should be a positive non zero integer value.
- Default: ``3``.
-
-**window-increase-type**
- DependentPipelineManagers only. This value describes how the window
- should grow when changes are successfully merged by zuul. A value of
- ``linear`` indicates that ``window-increase-factor`` should be added
- to the previous window value. A value of ``exponential`` indicates
- that ``window-increase-factor`` should be multiplied against the
- previous window value and the result will become the window size.
- Default: ``linear``.
-
-**window-increase-factor**
- DependentPipelineManagers only. The value to be added or multiplied
- against the previous window value to determine the new window after
- successful change merges.
- Default: ``1``.
-
-**window-decrease-type**
- DependentPipelineManagers only. This value describes how the window
- should shrink when changes are not able to be merged by Zuul. A value
- of ``linear`` indicates that ``window-decrease-factor`` should be
- subtracted from the previous window value. A value of ``exponential``
- indicates that ``window-decrease-factor`` should be divided against
- the previous window value and the result will become the window size.
- Default: ``exponential``.
-
-**window-decrease-factor**
- DependentPipelineManagers only. The value to be subtracted or divided
- against the previous window value to determine the new window after
- unsuccessful change merges.
- Default: ``2``.
-
-Some example pipeline configurations are included in the sample layout
-file. The first is called a *check* pipeline::
-
- - name: check
- manager: IndependentPipelineManager
- trigger:
- my_gerrit:
- - event: patchset-created
- success:
- my_gerrit:
- verified: 1
- failure:
- my_gerrit:
- verified: -1
-
-This will trigger jobs each time a new patchset (or change) is
-uploaded to Gerrit, and report +/-1 values to Gerrit in the
-``verified`` review category. ::
-
- - name: gate
- manager: DependentPipelineManager
- trigger:
- my_gerrit:
- - event: comment-added
- approval:
- - approved: 1
- success:
- my_gerrit:
- verified: 2
- submit: true
- failure:
- my_gerrit:
- verified: -2
-
-This will trigger jobs whenever a reviewer leaves a vote of ``1`` in the
-``approved`` review category in Gerrit (a non-standard category).
-Changes will be tested in such a way as to guarantee that they will be
-merged exactly as tested, though that will happen in parallel by
-creating a virtual queue of dependent changes and performing
-speculative execution of jobs. ::
-
- - name: post
- manager: IndependentPipelineManager
- trigger:
- my_gerrit:
- - event: ref-updated
- ref: ^(?!refs/).*$
-
-This will trigger jobs whenever a change is merged to a named branch
-(e.g., ``master``). No output will be reported to Gerrit. This is
-useful for side effects such as creating per-commit tarballs. ::
-
- - name: silent
- manager: IndependentPipelineManager
- trigger:
- my_gerrit:
- - event: patchset-created
-
-This also triggers jobs when changes are uploaded to Gerrit, but no
-results are reported to Gerrit. This is useful for jobs that are in
-development and not yet ready to be presented to developers. ::
-
- pipelines:
- - name: post-merge
- manager: IndependentPipelineManager
- trigger:
- my_gerrit:
- - event: change-merged
- success:
- my_gerrit:
- force-message: True
- failure:
- my_gerrit:
- force-message: True
-
-The ``change-merged`` events happen when a change has been merged in the git
-repository. The change is thus closed and Gerrit will not accept modifications
-to the review scoring such as ``code-review`` or ``verified``. By using the
-``force-message: True`` parameter, Zuul will pass ``--force-message`` to the
-``gerrit review`` command, thus making sure the message is actually
-sent back to Gerrit regardless of approval scores.
-That kind of pipeline is nice to run regression or performance tests.
-
-.. note::
- The ``change-merged`` event does not include the commit sha1 which can be
- hazardous, it would let you report back to Gerrit though. If you were to
- build a tarball for a specific commit, you should consider instead using
- the ``ref-updated`` event which does include the commit sha1 (but lacks the
- Gerrit change number).
-
-
-.. _jobs:
-
-Jobs
-""""
-
-The jobs section is optional, and can be used to set attributes of
-jobs that are independent of their association with a project. For
-example, if a job should return a customized message on failure, that
-may be specified here. Otherwise, Zuul does not need to be told about
-each job as it builds a list from the project specification.
-
-**name**
- The name of the job. This field is treated as a regular expression
- and will be applied to each job that matches.
-
-**queue-name (optional)**
- Zuul will automatically combine projects that share a job into
- shared change queues for dependent pipeline managers. In order to
- report statistics about these queues, it is convenient for them to
- have names. Zuul can automatically name change queues, however
- these can grow quite long and are prone to changing as projects in
- the queue change. If you assign a queue-name to a job, Zuul will
- use that as the name for the shared change queue that contains that
- job instead of the automatically generated one. It is an error for
- a shared change queue to have more than one job with a queue-name if
- they are not the same.
-
-**failure-message (optional)**
- The message that should be reported to Gerrit if the job fails.
-
-**success-message (optional)**
- The message that should be reported to Gerrit if the job fails.
-
-**failure-pattern (optional)**
- The URL that should be reported to Gerrit if the job fails.
- Defaults to the build URL or the url_pattern configured in
- zuul.conf. May be supplied as a string pattern with substitutions
- as described in url_pattern in :ref:`zuulconf`.
-
-**success-pattern (optional)**
- The URL that should be reported to Gerrit if the job succeeds.
- Defaults to the build URL or the url_pattern configured in
- zuul.conf. May be supplied as a string pattern with substitutions
- as described in url_pattern in :ref:`zuulconf`.
-
-**hold-following-changes (optional)**
- This is a boolean that indicates that changes that follow this
- change in a dependent change pipeline should wait until this job
- succeeds before executing. If this is applied to a very short job
- that can predict whether longer jobs will fail early, this can be
- used to reduce the number of jobs that Zuul will execute and
- ultimately have to cancel. In that case, a small amount of
- parallelization of jobs is traded for more efficient use of testing
- resources. On the other hand, to apply this to a long running job
- would largely defeat the parallelization of dependent change testing
- that is the main feature of Zuul. Default: ``false``.
-
-**semaphore (optional)**
- This is a string that names a semaphore that should be observed by this
- job. The semaphore defines how many jobs which reference that semaphore
- can be enqueued at a time. This applies across all pipelines in the same
- tenant. The max value of the semaphore can be specified in the config
- repositories and defaults to 1.
-
-**branch (optional)**
- This job should only be run on matching branches. This field is
- treated as a regular expression and multiple branches may be
- listed.
-
-**files (optional)**
- This job should only be run if at least one of the files involved in
- the change (added, deleted, or modified) matches at least one of the
- file patterns listed here. This field is treated as a regular
- expression and multiple expressions may be listed.
-
-**skip-if (optional)**
-
- This job should not be run if all the patterns specified by the
- optional fields listed below match on their targets. When multiple
- sets of parameters are provided, this job will be skipped if any set
- matches. For example: ::
-
- jobs:
- - name: check-tempest-dsvm-neutron
- skip-if:
- - project: ^openstack/neutron$
- branch: ^stable/juno$
- all-files-match-any:
- - ^neutron/tests/.*$
- - ^tools/.*$
- - all-files-match-any:
- - ^doc/.*$
- - ^.*\.rst$
-
- With this configuration, the job would be skipped for a neutron
- patchset for the stable/juno branch provided that every file in the
- change matched at least one of the specified file regexes. The job
- will also be skipped for any patchset that modified only the doc
- tree or rst files.
-
- *project* (optional)
- The regular expression to match against the project of the change.
-
- *branch* (optional)
- The regular expression to match against the branch or ref of the
- change.
-
- *all-files-match-any* (optional)
- A list of regular expressions intended to match the files involved
- in the change. This parameter will be considered matching a
- change only if all files in a change match at least one of these
- expressions.
-
- The pattern for '/COMMIT_MSG' is always matched on and does not
- have to be included. Exception is merge commits (without modified
- files), in this case '/COMMIT_MSG' is not matched, and job is not
- skipped. In case of merge commits it's assumed that list of modified
- files isn't predictible and CI should be run.
-
-**voting (optional)**
- Boolean value (``true`` or ``false``) that indicates whatever
- a job is voting or not. Default: ``true``.
-
-**attempts (optional)**
- Number of attempts zuul will execute a job. Once reached, zuul will report
- RETRY_LIMIT as the job result.
- Defaults to 3.
-
-**tags (optional)**
- A list of arbitrary strings which will be associated with the job.
-
-Here is an example of setting the failure message for jobs that check
-whether a change merges cleanly::
-
- - name: ^.*-merge$
- failure-message: This change or one of its cross-repo dependencies
- was unable to be automatically merged with the current state of
- its repository. Please rebase the change and upload a new
- patchset.
-
-Projects
-""""""""
-
-The projects section indicates what jobs should be run in each pipeline
-for events associated with each project. It contains a list of
-projects. Here is an example::
-
- - name: example/project
- check:
- - project-merge:
- - project-unittest
- - project-pep8
- - project-pyflakes
- gate:
- - project-merge:
- - project-unittest
- - project-pep8
- - project-pyflakes
- post:
- - project-publish
-
-**name**
- The name of the project (as known by Gerrit).
-
-**merge-mode (optional)**
- An optional value that indicates what strategy should be used to
- merge changes to this project. Supported values are:
-
- ** merge-resolve **
- Equivalent to 'git merge -s resolve'. This corresponds closely to
- what Gerrit performs (using JGit) for a project if the "Merge if
- necessary" merge mode is selected and "Automatically resolve
- conflicts" is checked. This is the default.
-
- ** merge **
- Equivalent to 'git merge'.
-
- ** cherry-pick **
- Equivalent to 'git cherry-pick'.
-
-This is followed by a section for each of the pipelines defined above.
-Pipelines may be omitted if no jobs should run for this project in a
-given pipeline. Within the pipeline section, the jobs that should be
-executed are listed. If a job is entered as a dictionary key, then
-jobs contained within that key are only executed if the key job
-succeeds. In the above example, project-unittest, project-pep8, and
-project-pyflakes are only executed if project-merge succeeds.
-Furthermore, project-finaltest is executed only if project-unittest,
-project-pep8 and project-pyflakes all succeed. This can help avoid
-running unnecessary jobs while maximizing parallelism. It is also
-useful when distributing results between jobs.
-
-The special job named ``noop`` is internal to Zuul and will always
-return ``SUCCESS`` immediately. This can be useful if you require
-that all changes be processed by a pipeline but a project has no jobs
-that can be run on it.
-
-.. seealso:: The OpenStack Zuul configuration for a comprehensive example: https://git.openstack.org/cgit/openstack-infra/project-config/tree/zuul/layout.yaml
-
-Project Templates
-"""""""""""""""""
-
-Whenever you have lot of similar projects (such as plugins for a project) you
-will most probably want to use the same pipeline configurations. The
-project templates let you define pipelines and job name templates to trigger.
-One can then just apply the template on its project which make it easier to
-update several similar projects. As an example::
-
- project-templates:
- # Name of the template
- - name: plugin-triggering
- # Definition of pipelines just like for a `project`
- check:
- - '{jobprefix}-merge':
- - '{jobprefix}-pep8'
- - '{jobprefix}-pyflakes'
- gate:
- - '{jobprefix}-merge':
- - '{jobprefix}-unittest'
- - '{jobprefix}-pep8'
- - '{jobprefix}-pyflakes'
-
-In your projects definition, you will then apply the template using the template
-key::
-
- projects:
- - name: plugin/foobar
- template:
- - name: plugin-triggering
- jobprefix: plugin-foobar
-
-You can pass several parameters to a template. A ``parameter`` value
-will be used for expansion of ``{parameter}`` in the template
-strings. The parameter ``name`` will be automatically provided and
-will contain the short name of the project, that is the portion of the
-project name after the last ``/`` character.
-
-Multiple templates can be combined in a project, and the jobs from all
-of those templates will be added to the project. Individual jobs may
-also be added::
-
- projects:
- - name: plugin/foobar
- template:
- - name: plugin-triggering
- jobprefix: plugin-foobar
- - name: plugin-extras
- jobprefix: plugin-foobar
- check:
- - foobar-extra-special-job
-
-Individual jobs may optionally be added to pipelines (e.g. check,
-gate, et cetera) for a project, in addition to those provided by
-templates.
-
-The order of the jobs listed in the project (which only affects the
-order of jobs listed on the report) will be the jobs from each
-template in the order listed, followed by any jobs individually listed
-for the project.
-
-Note that if multiple templates are used for a project and one
-template specifies a job that is also specified in another template,
-or specified in the project itself, the configuration defined by
-either the last template or the project itself will take priority.
-
-
-Semaphores
-""""""""""
-
-When using semaphores the maximum value of each one can be specified in their
-respective config repositories. Unspecified semaphores default to 1::
-
- - semaphore:
- name: semaphore-foo
- max: 5
- - semaphore:
- name: semaphore-bar
- max: 3
-
-
-logging.conf
-~~~~~~~~~~~~
-This file is optional. If provided, it should be a standard
-:mod:`logging.config` module configuration file. If not present, Zuul will
-output all log messages of DEBUG level or higher to the console.
-
-Starting Zuul
--------------
-
-To start Zuul, run **zuul-server**::
-
- usage: zuul-server [-h] [-c CONFIG] [-l LAYOUT] [-d] [-t] [--version]
-
- Project gating system.
-
- optional arguments:
- -h, --help show this help message and exit
- -c CONFIG specify the config file
- -l LAYOUT specify the layout file
- -d do not run as a daemon
- -t validate layout file syntax
- --version show zuul version
-
-You may want to use the ``-d`` argument while you are initially setting
-up Zuul so you can detect any configuration errors quickly. Under
-normal operation, omit ``-d`` and let Zuul run as a daemon.
-
-If you send signal 1 (SIGHUP) to the zuul-server process, Zuul will
-stop executing new jobs, wait until all executing jobs are finished,
-reload its layout.yaml, and resume. Changes to any connections or
-the PID file will be ignored until Zuul is restarted.
-
-If you send a SIGUSR1 to the zuul-server process, Zuul will stop
-executing new jobs, wait until all executing jobs are finished,
-then exit. While waiting to exit Zuul will queue Gerrit events and
-save these events prior to exiting. When Zuul starts again it will
-read these saved events and act on them.
-
-If you need to abort Zuul and intend to manually requeue changes for
-jobs which were running in its pipelines, prior to terminating you can
-use the zuul-changes.py tool script to simplify the process. For
-example, this would give you a list of zuul-enqueue commands to requeue
-changes for the gate and check pipelines respectively::
-
- ./tools/zuul-changes.py http://zuul.openstack.org/ gate
- ./tools/zuul-changes.py http://zuul.openstack.org/ check
-
-If you send a SIGUSR2 to the zuul-server process, or the forked process
-that runs the Gearman daemon, Zuul will dump a stack trace for each
-running thread into its debug log. It is written under the log bucket
-``zuul.stack_dump``. This is useful for tracking down deadlock or
-otherwise slow threads.
-
-When `yappi <https://code.google.com/p/yappi/>`_ (Yet Another Python
-Profiler) is available, additional functions' and threads' stats are
-emitted as well. The first SIGUSR2 will enable yappi, on the second
-SIGUSR2 it dumps the information collected, resets all yappi state and
-stops profiling. This is to minimize the impact of yappi on a running
-system.
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index 2909ea6..48f23a5 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -4,6 +4,9 @@
;ssl_cert=/path/to/client.pem
;ssl_key=/path/to/client.key
+[zookeeper]
+hosts=127.0.0.1:2181
+
[gearman_server]
start=true
;ssl_ca=/path/to/ca.pem
@@ -11,12 +14,13 @@
;ssl_key=/path/to/server.key
[zuul]
-layout_config=/etc/zuul/layout.yaml
+status_url=https://zuul.example.com/status
+
+[scheduler]
+tenant_config=/etc/zuul/main.yaml
log_config=/etc/zuul/logging.conf
pidfile=/var/run/zuul/zuul.pid
state_dir=/var/lib/zuul
-status_url=https://jenkins.example.com/zuul/status
-zookeeper_hosts=127.0.0.1:2181
[merger]
git_dir=/var/lib/zuul/git
diff --git a/test-requirements.txt b/test-requirements.txt
index baf6cad..eea1d69 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,5 +11,6 @@
testrepository>=0.0.17
testtools>=0.9.32
sphinxcontrib-programoutput
+oslosphinx
mock
PyMySQL
diff --git a/tests/base.py b/tests/base.py
index ff1f531..921fcd1 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -554,7 +554,7 @@
def __init__(self, github, number, project, branch,
subject, upstream_root, files=[], number_of_commits=1,
- writers=[]):
+ writers=[], body=None):
"""Creates a new PR with several commits.
Sends an event about opened PR."""
self.github = github
@@ -563,6 +563,7 @@
self.project = project
self.branch = branch
self.subject = subject
+ self.body = body
self.number_of_commits = 0
self.upstream_root = upstream_root
self.files = []
@@ -602,6 +603,9 @@
def getPullRequestClosedEvent(self):
return self._getPullRequestEvent('closed')
+ def getPullRequestEditedEvent(self):
+ return self._getPullRequestEvent('edited')
+
def addComment(self, message):
self.comments.append(message)
self._updateTimeStamp()
@@ -723,6 +727,10 @@
}
return (name, data)
+ def editBody(self, body):
+ self.body = body
+ self._updateTimeStamp()
+
def _getRepo(self):
repo_path = os.path.join(self.upstream_root, self.project)
return git.Repo(repo_path)
@@ -830,7 +838,8 @@
'repo': {
'full_name': self.project
}
- }
+ },
+ 'body': self.body
},
'sender': {
'login': 'ghuser'
@@ -868,12 +877,14 @@
self.upstream_root = upstream_root
self.merge_failure = False
self.merge_not_allowed_count = 0
+ self.reports = []
- def openFakePullRequest(self, project, branch, subject, files=[]):
+ def openFakePullRequest(self, project, branch, subject, files=[],
+ body=None):
self.pr_number += 1
pull_request = FakeGithubPullRequest(
self, self.pr_number, project, branch, subject, self.upstream_root,
- files=files)
+ files=files, body=body)
self.pull_requests.append(pull_request)
return pull_request
@@ -911,7 +922,7 @@
'http://localhost:%s/connection/%s/payload'
% (port, self.connection_name),
data=payload, headers=headers)
- urllib.request.urlopen(req)
+ return urllib.request.urlopen(req)
def getPull(self, project, number):
pr = self.pull_requests[number - 1]
@@ -934,7 +945,9 @@
}
},
'files': pr.files,
- 'labels': pr.labels
+ 'labels': pr.labels,
+ 'merged': pr.is_merged,
+ 'body': pr.body
}
return data
@@ -980,10 +993,14 @@
return ['master']
def commentPull(self, project, pr_number, message):
+ # record that this got reported
+ self.reports.append((project, pr_number, 'comment'))
pull_request = self.pull_requests[pr_number - 1]
pull_request.addComment(message)
def mergePull(self, project, pr_number, commit_message='', sha=None):
+ # record that this got reported
+ self.reports.append((project, pr_number, 'merge'))
pull_request = self.pull_requests[pr_number - 1]
if self.merge_failure:
raise Exception('Pull request was not merged')
@@ -999,6 +1016,8 @@
def setCommitStatus(self, project, sha, state, url='', description='',
context='default', user='zuul'):
+ # record that this got reported
+ self.reports.append((project, sha, 'status', (user, context, state)))
# always insert a status to the front of the list, to represent
# the last status provided for a commit.
# Since we're bypassing github API, which would require a user, we
@@ -1015,13 +1034,34 @@
})
def labelPull(self, project, pr_number, label):
+ # record that this got reported
+ self.reports.append((project, pr_number, 'label', label))
pull_request = self.pull_requests[pr_number - 1]
pull_request.addLabel(label)
def unlabelPull(self, project, pr_number, label):
+ # record that this got reported
+ self.reports.append((project, pr_number, 'unlabel', label))
pull_request = self.pull_requests[pr_number - 1]
pull_request.removeLabel(label)
+ def _getNeededByFromPR(self, change):
+ prs = []
+ pattern = re.compile(r"Depends-On.*https://%s/%s/pull/%s" %
+ (self.server, change.project.name,
+ change.number))
+ for pr in self.pull_requests:
+ if not pr.body:
+ body = ''
+ else:
+ body = pr.body
+ if pattern.search(body):
+ # Get our version of a pull so that it's a dict
+ pull = self.getPull(pr.project, pr.number)
+ prs.append(pull)
+
+ return prs
+
class BuildHistory(object):
def __init__(self, **kw):
@@ -1846,12 +1886,16 @@
self.merger_src_root = os.path.join(self.test_root, "merger-git")
self.executor_src_root = os.path.join(self.test_root, "executor-git")
self.state_root = os.path.join(self.test_root, "lib")
+ self.merger_state_root = os.path.join(self.test_root, "merger-lib")
+ self.executor_state_root = os.path.join(self.test_root, "executor-lib")
if os.path.exists(self.test_root):
shutil.rmtree(self.test_root)
os.makedirs(self.test_root)
os.makedirs(self.upstream_root)
os.makedirs(self.state_root)
+ os.makedirs(self.merger_state_root)
+ os.makedirs(self.executor_state_root)
# Make per test copy of Configuration.
self.setup_config()
@@ -1862,13 +1906,16 @@
shutil.copy('{}.pub'.format(src_private_key_file),
'{}.pub'.format(self.private_key_file))
os.chmod(self.private_key_file, 0o0600)
- self.config.set('zuul', 'tenant_config',
- os.path.join(FIXTURE_DIR,
- self.config.get('zuul', 'tenant_config')))
+ self.config.set('scheduler', 'tenant_config',
+ os.path.join(
+ FIXTURE_DIR,
+ self.config.get('scheduler', 'tenant_config')))
+ self.config.set('scheduler', 'state_dir', self.state_root)
self.config.set('merger', 'git_dir', self.merger_src_root)
+ self.config.set('merger', 'state_dir', self.merger_state_root)
self.config.set('executor', 'git_dir', self.executor_src_root)
- self.config.set('zuul', 'state_dir', self.state_root)
self.config.set('executor', 'private_key_file', self.private_key_file)
+ self.config.set('executor', 'state_dir', self.executor_state_root)
self.statsd = FakeStatsd()
# note, use 127.0.0.1 rather than localhost to avoid getting ipv6
@@ -2009,9 +2056,14 @@
self.config = configparser.ConfigParser()
self.config.read(os.path.join(FIXTURE_DIR, self.config_file))
+ sections = ['zuul', 'scheduler', 'executor', 'merger']
+ for section in sections:
+ if not self.config.has_section(section):
+ self.config.add_section(section)
+
if not self.setupSimpleLayout():
if hasattr(self, 'tenant_config_file'):
- self.config.set('zuul', 'tenant_config',
+ self.config.set('scheduler', 'tenant_config',
self.tenant_config_file)
git_path = os.path.join(
os.path.dirname(
@@ -2069,7 +2121,7 @@
'untrusted-projects': untrusted_projects}}}}]
f.write(yaml.dump(config).encode('utf8'))
f.close()
- self.config.set('zuul', 'tenant_config',
+ self.config.set('scheduler', 'tenant_config',
os.path.join(FIXTURE_DIR, f.name))
self.init_repo('common-config')
@@ -2082,7 +2134,7 @@
if self.create_project_keys:
return
- path = self.config.get('zuul', 'tenant_config')
+ path = self.config.get('scheduler', 'tenant_config')
with open(os.path.join(FIXTURE_DIR, path)) as f:
tenant_config = yaml.safe_load(f.read())
for tenant in tenant_config:
@@ -2591,7 +2643,7 @@
- org/project1
- org/project2\n""" % path)
f.close()
- self.config.set('zuul', 'tenant_config',
+ self.config.set('scheduler', 'tenant_config',
os.path.join(FIXTURE_DIR, f.name))
self.setupAllProjectKeys()
diff --git a/tests/fixtures/config/pre-playbook/git/common-config/playbooks/post.yaml b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/post.yaml
new file mode 100644
index 0000000..2e512b1
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/post.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ tasks:
+ - file:
+ path: "{{zuul._test.test_root}}/{{zuul.uuid}}.post.flag"
+ state: touch
diff --git a/tests/fixtures/config/pre-playbook/git/common-config/playbooks/pre.yaml b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/pre.yaml
new file mode 100644
index 0000000..13c2208
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/pre.yaml
@@ -0,0 +1,8 @@
+- hosts: all
+ tasks:
+ - copy:
+ src: "{{zuul._test.test_root}}/{{zuul.uuid}}.flag"
+ dest: "{{zuul._test.test_root}}/{{zuul.uuid}}.failed"
+ - file:
+ path: "{{zuul._test.test_root}}/{{zuul.uuid}}.pre.flag"
+ state: touch
diff --git a/tests/fixtures/config/pre-playbook/git/common-config/playbooks/python27.yaml b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/python27.yaml
new file mode 100644
index 0000000..dbb64a5
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/common-config/playbooks/python27.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ tasks:
+ - file:
+ path: "{{zuul._test.test_root}}/{{zuul.uuid}}.main.flag"
+ state: touch
diff --git a/tests/fixtures/config/pre-playbook/git/common-config/zuul.yaml b/tests/fixtures/config/pre-playbook/git/common-config/zuul.yaml
new file mode 100644
index 0000000..3de0d6d
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/common-config/zuul.yaml
@@ -0,0 +1,18 @@
+- pipeline:
+ name: check
+ manager: independent
+ allow-secrets: true
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- job:
+ name: python27
+ pre-run: playbooks/pre
+ post-run: playbooks/post
diff --git a/tests/fixtures/config/pre-playbook/git/org_project/.zuul.yaml b/tests/fixtures/config/pre-playbook/git/org_project/.zuul.yaml
new file mode 100644
index 0000000..89a5674
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/org_project/.zuul.yaml
@@ -0,0 +1,5 @@
+- project:
+ name: org/project
+ check:
+ jobs:
+ - python27
diff --git a/tests/fixtures/config/pre-playbook/git/org_project/README b/tests/fixtures/config/pre-playbook/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/pre-playbook/main.yaml b/tests/fixtures/config/pre-playbook/main.yaml
new file mode 100644
index 0000000..6033879
--- /dev/null
+++ b/tests/fixtures/config/pre-playbook/main.yaml
@@ -0,0 +1,9 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
+
diff --git a/tests/fixtures/layouts/crd-github.yaml b/tests/fixtures/layouts/crd-github.yaml
new file mode 100644
index 0000000..11bdf76
--- /dev/null
+++ b/tests/fixtures/layouts/crd-github.yaml
@@ -0,0 +1,79 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ github:
+ - event: pull_request
+ action: edited
+ start:
+ github: {}
+ success:
+ github: {}
+ failure:
+ github: {}
+
+- pipeline:
+ name: gate
+ manager: dependent
+ trigger:
+ github:
+ - event: pull_request
+ action: edited
+ start:
+ github: {}
+ success:
+ github:
+ merge: true
+ failure:
+ github: {}
+
+- job:
+ name: project1-test
+- job:
+ name: project2-test
+- job:
+ name: project3-test
+- job:
+ name: project4-test
+- job:
+ name: project5-test
+- job:
+ name: project6-test
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - project1-test
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - project2-test
+
+- project:
+ name: org/project3
+ gate:
+ queue: cogated
+ jobs:
+ - project3-test
+
+- project:
+ name: org/project4
+ gate:
+ queue: cogated
+ jobs:
+ - project4-test
+
+- project:
+ name: org/project5
+ gate:
+ jobs:
+ - project5-test
+
+- project:
+ name: org/project6
+ gate:
+ jobs:
+ - project6-test
diff --git a/tests/fixtures/layouts/reporting-github.yaml b/tests/fixtures/layouts/reporting-github.yaml
index d054df7..0fdec85 100644
--- a/tests/fixtures/layouts/reporting-github.yaml
+++ b/tests/fixtures/layouts/reporting-github.yaml
@@ -34,6 +34,29 @@
github:
comment: false
+- pipeline:
+ name: push-reporting
+ description: Uncommon reporting
+ manager: independent
+ trigger:
+ github:
+ - event: push
+ - event: pull_request
+ action: opened
+ start:
+ github:
+ comment: true
+ status: 'pending'
+ success:
+ github:
+ comment: true
+ status: 'success'
+ merge: true
+ failure:
+ github:
+ comment: true
+ status: 'failure'
+
- job:
name: project-test1
@@ -45,3 +68,9 @@
reporting:
jobs:
- project-test1
+
+- project:
+ name: org/project2
+ push-reporting:
+ jobs:
+ - project-test1
diff --git a/tests/fixtures/zuul-connections-gerrit-and-github.conf b/tests/fixtures/zuul-connections-gerrit-and-github.conf
index 69e7f8b..64757d8 100644
--- a/tests/fixtures/zuul-connections-gerrit-and-github.conf
+++ b/tests/fixtures/zuul-connections-gerrit-and-github.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=config/multi-driver/main.yaml
[merger]
diff --git a/tests/fixtures/zuul-connections-merger.conf b/tests/fixtures/zuul-connections-merger.conf
index 4499493..df465d5 100644
--- a/tests/fixtures/zuul-connections-merger.conf
+++ b/tests/fixtures/zuul-connections-merger.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[webapp]
status_url=http://zuul.example.com/status
[merger]
diff --git a/tests/fixtures/zuul-connections-multiple-gerrits.conf b/tests/fixtures/zuul-connections-multiple-gerrits.conf
index 43b00ef..66a6926 100644
--- a/tests/fixtures/zuul-connections-multiple-gerrits.conf
+++ b/tests/fixtures/zuul-connections-multiple-gerrits.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=main.yaml
[merger]
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
index 8a998cf..3262294 100644
--- a/tests/fixtures/zuul-connections-same-gerrit.conf
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=config/zuul-connections-same-gerrit/main.yaml
[merger]
diff --git a/tests/fixtures/zuul-git-driver.conf b/tests/fixtures/zuul-git-driver.conf
index b6d3473..4321871 100644
--- a/tests/fixtures/zuul-git-driver.conf
+++ b/tests/fixtures/zuul-git-driver.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=config/zuul-connections-same-gerrit/main.yaml
[merger]
diff --git a/tests/fixtures/zuul-github-driver.conf b/tests/fixtures/zuul-github-driver.conf
index dc28f98..3d61ab6 100644
--- a/tests/fixtures/zuul-github-driver.conf
+++ b/tests/fixtures/zuul-github-driver.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[webapp]
status_url=http://zuul.example.com/status/#{change.number},{change.patchset}
[merger]
@@ -23,4 +23,4 @@
[connection github_ent]
driver=github
sshkey=/home/zuul/.ssh/id_rsa
-git_host=github.enterprise.io
+server=github.enterprise.io
diff --git a/tests/fixtures/zuul-push-reqs.conf b/tests/fixtures/zuul-push-reqs.conf
index c5272aa..4faac13 100644
--- a/tests/fixtures/zuul-push-reqs.conf
+++ b/tests/fixtures/zuul-push-reqs.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[webapp]
status_url=http://zuul.example.com/status
[merger]
diff --git a/tests/fixtures/zuul-sql-driver.conf b/tests/fixtures/zuul-sql-driver.conf
index 6fdd081..688d65b 100644
--- a/tests/fixtures/zuul-sql-driver.conf
+++ b/tests/fixtures/zuul-sql-driver.conf
@@ -1,9 +1,8 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=main.yaml
-url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
[merger]
git_dir=/tmp/zuul-test/merger-git
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index c4cfe70..d6de76c 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -1,7 +1,7 @@
[gearman]
server=127.0.0.1
-[zuul]
+[scheduler]
tenant_config=main.yaml
[merger]
diff --git a/tests/unit/test_bubblewrap.py b/tests/unit/test_bubblewrap.py
index 675221e..d94b3f2 100644
--- a/tests/unit/test_bubblewrap.py
+++ b/tests/unit/test_bubblewrap.py
@@ -15,10 +15,12 @@
import subprocess
import tempfile
import testtools
+import time
import os
from zuul.driver import bubblewrap
from zuul.executor.server import SshAgent
+from tests.base import iterate_timeout
class TestBubblewrap(testtools.TestCase):
@@ -61,12 +63,15 @@
po = bwrap.getPopen(work_dir=work_dir,
ansible_dir=ansible_dir,
ssh_auth_sock=ssh_agent.env['SSH_AUTH_SOCK'])
- leak_time = 7
+ leak_time = 60
# Use hexadecimal notation to avoid false-positive
true_proc = po(['bash', '-c', 'sleep 0x%X & disown' % leak_time])
self.assertEqual(0, true_proc.wait())
cmdline = "sleep\x000x%X\x00" % leak_time
- sleep_proc = [pid for pid in os.listdir("/proc") if
- os.path.isfile("/proc/%s/cmdline" % pid) and
- open("/proc/%s/cmdline" % pid).read() == cmdline]
- self.assertEqual(len(sleep_proc), 0, "Processes leaked")
+ for x in iterate_timeout(30, "process to exit"):
+ sleep_proc = [pid for pid in os.listdir("/proc") if
+ os.path.isfile("/proc/%s/cmdline" % pid) and
+ open("/proc/%s/cmdline" % pid).read() == cmdline]
+ if not sleep_proc:
+ break
+ time.sleep(1)
diff --git a/tests/unit/test_github_crd.py b/tests/unit/test_github_crd.py
new file mode 100644
index 0000000..bc7d499
--- /dev/null
+++ b/tests/unit/test_github_crd.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tests.base import ZuulTestCase, simple_layout
+
+
+class TestGithubCrossRepoDeps(ZuulTestCase):
+ """Test Github cross-repo dependencies"""
+ config_file = 'zuul-github-driver.conf'
+
+ @simple_layout('layouts/crd-github.yaml', driver='github')
+ def test_crd_independent(self):
+ "Test cross-repo dependences on an independent pipeline"
+
+ # Create a change in project1 that a project2 change will depend on
+ A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
+
+ # Create a commit in B that sets the dependency on A
+ msg = "Depends-On: https://github.com/org/project1/pull/%s" % A.number
+ B = self.fake_github.openFakePullRequest('org/project2', 'master', 'B',
+ body=msg)
+
+ # Make an event to re-use
+ event = B.getPullRequestEditedEvent()
+
+ self.fake_github.emitEvent(event)
+ self.waitUntilSettled()
+
+ # The changes for the job from project2 should include the project1
+ # PR contet
+ changes = self.getJobFromHistory(
+ 'project2-test', 'org/project2').changes
+
+ self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
+ A.head_sha,
+ B.number,
+ B.head_sha))
+
+ # There should be no more changes in the queue
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
+
+ @simple_layout('layouts/crd-github.yaml', driver='github')
+ def test_crd_dependent(self):
+ "Test cross-repo dependences on a dependent pipeline"
+
+ # Create a change in project3 that a project4 change will depend on
+ A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
+
+ # Create a commit in B that sets the dependency on A
+ msg = "Depends-On: https://github.com/org/project3/pull/%s" % A.number
+ B = self.fake_github.openFakePullRequest('org/project4', 'master', 'B',
+ body=msg)
+
+ # Make an event to re-use
+ event = B.getPullRequestEditedEvent()
+
+ self.fake_github.emitEvent(event)
+ self.waitUntilSettled()
+
+ # The changes for the job from project4 should include the project3
+ # PR contet
+ changes = self.getJobFromHistory(
+ 'project4-test', 'org/project4').changes
+
+ self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
+ A.head_sha,
+ B.number,
+ B.head_sha))
+
+ self.assertTrue(A.is_merged)
+ self.assertTrue(B.is_merged)
+
+ @simple_layout('layouts/crd-github.yaml', driver='github')
+ def test_crd_unshared_dependent(self):
+ "Test cross-repo dependences on unshared dependent pipeline"
+
+ # Create a change in project1 that a project2 change will depend on
+ A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
+
+ # Create a commit in B that sets the dependency on A
+ msg = "Depends-On: https://github.com/org/project5/pull/%s" % A.number
+ B = self.fake_github.openFakePullRequest('org/project6', 'master', 'B',
+ body=msg)
+
+ # Make an event for B
+ event = B.getPullRequestEditedEvent()
+
+ # Emit for B, which should not enqueue A because they do not share
+ # A queue. Since B depends on A, and A isn't enqueue, B will not run
+ self.fake_github.emitEvent(event)
+ self.waitUntilSettled()
+
+ self.assertEqual(0, len(self.history))
+
+ # Enqueue A alone, let it finish
+ self.fake_github.emitEvent(A.getPullRequestEditedEvent())
+ self.waitUntilSettled()
+
+ self.assertTrue(A.is_merged)
+ self.assertFalse(B.is_merged)
+ self.assertEqual(1, len(self.history))
+
+ # With A merged, B should go through
+ self.fake_github.emitEvent(event)
+ self.waitUntilSettled()
+
+ self.assertTrue(B.is_merged)
+ self.assertEqual(2, len(self.history))
+
+ @simple_layout('layouts/crd-github.yaml', driver='github')
+ def test_crd_cycle(self):
+ "Test cross-repo dependency cycles"
+
+ # A -> B -> A
+ msg = "Depends-On: https://github.com/org/project6/pull/2"
+ A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A',
+ body=msg)
+ msg = "Depends-On: https://github.com/org/project5/pull/1"
+ B = self.fake_github.openFakePullRequest('org/project6', 'master', 'B',
+ body=msg)
+
+ self.fake_github.emitEvent(A.getPullRequestEditedEvent())
+ self.waitUntilSettled()
+
+ self.assertFalse(A.is_merged)
+ self.assertFalse(B.is_merged)
+ self.assertEqual(0, len(self.history))
+
+ @simple_layout('layouts/crd-github.yaml', driver='github')
+ def test_crd_needed_changes(self):
+ "Test cross-repo needed changes discovery"
+
+ # Given change A and B, where B depends on A, when A
+ # completes B should be enqueued (using a shared queue)
+
+ # Create a change in project3 that a project4 change will depend on
+ A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
+
+ # Set B to depend on A
+ msg = "Depends-On: https://github.com/org/project3/pull/%s" % A.number
+ B = self.fake_github.openFakePullRequest('org/project4', 'master', 'B',
+ body=msg)
+
+ # Enqueue A, which when finished should enqueue B
+ self.fake_github.emitEvent(A.getPullRequestEditedEvent())
+ self.waitUntilSettled()
+
+ # The changes for the job from project4 should include the project3
+ # PR contet
+ changes = self.getJobFromHistory(
+ 'project4-test', 'org/project4').changes
+
+ self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
+ A.head_sha,
+ B.number,
+ B.head_sha))
+
+ self.assertTrue(A.is_merged)
+ self.assertTrue(B.is_merged)
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index ba8e497..f360866 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -14,6 +14,7 @@
import re
from testtools.matchers import MatchesRegex, StartsWith
+import urllib
import time
from tests.base import ZuulTestCase, simple_layout, random_sha1
@@ -320,6 +321,46 @@
self.assertThat(report_status['url'][len(base):],
MatchesRegex('^[a-fA-F0-9]{32}\/$'))
+ @simple_layout('layouts/reporting-github.yaml', driver='github')
+ def test_push_reporting(self):
+ project = 'org/project2'
+ # pipeline reports pull status both on start and success
+ self.executor_server.hold_jobs_in_build = True
+ pevent = self.fake_github.getPushEvent(project=project,
+ ref='refs/heads/master')
+
+ self.fake_github.emitEvent(pevent)
+ self.waitUntilSettled()
+
+ # there should only be one report, a status
+ self.assertEqual(1, len(self.fake_github.reports))
+ # Verify the user/context/state of the status
+ status = ('zuul', 'tenant-one/push-reporting', 'pending')
+ self.assertEqual(status, self.fake_github.reports[0][-1])
+
+ # free the executor, allow the build to finish
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ # Now there should be a second report, the success of the build
+ self.assertEqual(2, len(self.fake_github.reports))
+ # Verify the user/context/state of the status
+ status = ('zuul', 'tenant-one/push-reporting', 'success')
+ self.assertEqual(status, self.fake_github.reports[-1][-1])
+
+ # now make a PR which should also comment
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_github.openFakePullRequest(project, 'master', 'A')
+ self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
+ self.waitUntilSettled()
+
+ # Now there should be a four reports, a new comment
+ # and status
+ self.assertEqual(4, len(self.fake_github.reports))
+ self.executor_server.release()
+ self.waitUntilSettled()
+
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge(self):
# pipeline merges the pull request on success
@@ -544,3 +585,18 @@
new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
+
+ @simple_layout('layouts/basic-github.yaml', driver='github')
+ def test_ping_event(self):
+ # Test valid ping
+ pevent = {'repository': {'full_name': 'org/project'}}
+ req = self.fake_github.emitEvent(('ping', pevent))
+ self.assertEqual(req.status, 200, "Ping event didn't succeed")
+
+ # Test invalid ping
+ pevent = {'repository': {'full_name': 'unknown-project'}}
+ self.assertRaises(
+ urllib.error.HTTPError,
+ self.fake_github.emitEvent,
+ ('ping', pevent),
+ )
diff --git a/tests/unit/test_github_requirements.py b/tests/unit/test_github_requirements.py
index 135f7ab..f125d1e 100644
--- a/tests/unit/test_github_requirements.py
+++ b/tests/unit/test_github_requirements.py
@@ -240,13 +240,10 @@
# The first negative review from derp should not cause it to be
# enqueued
- for i in range(1, 4):
- submitted_at = time.time() - 72 * 60 * 60
- A.addReview('derp', 'CHANGES_REQUESTED',
- submitted_at)
- self.fake_github.emitEvent(comment)
- self.waitUntilSettled()
- self.assertEqual(len(self.history), 0)
+ A.addReview('derp', 'CHANGES_REQUESTED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
# A positive review from derp should cause it to be enqueued
A.addReview('derp', 'APPROVED')
@@ -256,6 +253,37 @@
self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
@simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_pipeline_require_review_comment_masked(self):
+ "Test pipeline requirement: review comments on top of votes"
+
+ A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
+ # Add derp to writers
+ A.writers.append('derp')
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # The first negative review from derp should not cause it to be
+ # enqueued
+ A.addReview('derp', 'CHANGES_REQUESTED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive review is required, so provide it
+ A.addReview('derp', 'APPROVED')
+
+ # Add a comment review on top to make sure we can still enqueue
+ A.addReview('derp', 'COMMENTED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_review_newer_than(self):
A = self.fake_github.openFakePullRequest('org/project6', 'master', 'A')
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index eb17966..e9eee54 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -3380,13 +3380,13 @@
raise Exception("Timeout waiting for gearman server to report "
+ "back to the client")
build = list(self.executor_client.builds.values())[0]
- if build.worker.name == "My Worker":
+ if build.worker.name == self.executor_server.hostname:
break
else:
time.sleep(0)
self.log.debug(build)
- self.assertEqual("My Worker", build.worker.name)
+ self.assertEqual(self.executor_server.hostname, build.worker.name)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
@@ -3553,7 +3553,7 @@
raise Exception("Timeout waiting for gearman server to report "
+ "back to the client")
build = list(self.executor_client.builds.values())[0]
- if build.worker.name == "My Worker":
+ if build.worker.name == self.executor_server.hostname:
break
else:
time.sleep(0)
@@ -5061,6 +5061,46 @@
self.executor_server.release()
self.waitUntilSettled()
+ def test_semaphore_new_patchset(self):
+ "Test new patchset with job semaphores"
+ self.executor_server.hold_jobs_in_build = True
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ check_pipeline = tenant.layout.pipelines['check']
+
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+ semaphore = tenant.semaphore_handler.semaphores['test-semaphore']
+ self.assertEqual(len(semaphore), 1)
+
+ A.addPatchset()
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
+ self.waitUntilSettled()
+
+ self.assertTrue('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+ semaphore = tenant.semaphore_handler.semaphores['test-semaphore']
+ self.assertEqual(len(semaphore), 1)
+
+ items = check_pipeline.getAllItems()
+ self.assertEqual(items[0].change.number, '1')
+ self.assertEqual(items[0].change.patchset, '2')
+ self.assertTrue(items[0].live)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ # The semaphore should be released
+ self.assertFalse('test-semaphore' in
+ tenant.semaphore_handler.semaphores)
+
def test_semaphore_reconfigure(self):
"Test reconfigure with job semaphores"
self.executor_server.hold_jobs_in_build = True
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 42bac3b..327f745 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -134,6 +134,71 @@
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1')])
+ def test_dynamic_config_new_patchset(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ check_pipeline = tenant.layout.pipelines['check']
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - project-test2
+ """)
+
+ in_repo_playbook = textwrap.dedent(
+ """
+ - hosts: all
+ tasks: []
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf,
+ 'playbooks/project-test2.yaml': in_repo_playbook}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ items = check_pipeline.getAllItems()
+ self.assertEqual(items[0].change.number, '1')
+ self.assertEqual(items[0].change.patchset, '1')
+ self.assertTrue(items[0].live)
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test2
+
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - project-test1
+ - project-test2
+ """)
+ file_dict = {'.zuul.yaml': in_repo_conf,
+ 'playbooks/project-test2.yaml': in_repo_playbook}
+
+ A.addPatchset(files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
+
+ self.waitUntilSettled()
+
+ items = check_pipeline.getAllItems()
+ self.assertEqual(items[0].change.number, '1')
+ self.assertEqual(items[0].change.patchset, '2')
+ self.assertTrue(items[0].live)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
def test_in_repo_branch(self):
in_repo_conf = textwrap.dedent(
"""
@@ -478,6 +543,31 @@
self.assertIn(fail.format("failpost", build_failpost.uuid), msg)
+class TestPrePlaybooks(AnsibleZuulTestCase):
+ # A temporary class to hold new tests while others are disabled
+
+ tenant_config_file = 'config/pre-playbook/main.yaml'
+
+ def test_pre_playbook_fail(self):
+ # Test that we run the post playbooks (but not the actual
+ # playbook) when a pre-playbook fails.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ build = self.getJobFromHistory('python27')
+ self.assertIsNone(build.result)
+ self.assertIn('RETRY_LIMIT', A.messages[0])
+ flag_path = os.path.join(self.test_root, build.uuid +
+ '.main.flag')
+ self.assertFalse(os.path.exists(flag_path))
+ pre_flag_path = os.path.join(self.test_root, build.uuid +
+ '.pre.flag')
+ self.assertFalse(os.path.exists(pre_flag_path))
+ post_flag_path = os.path.join(self.test_root, build.uuid +
+ '.post.flag')
+ self.assertTrue(os.path.exists(post_flag_path))
+
+
class TestBrokenConfig(ZuulTestCase):
# Test that we get an appropriate syntax error if we start with a
# broken config.
diff --git a/tools/encrypt_secret.py b/tools/encrypt_secret.py
old mode 100644
new mode 100755
index e36b24e..72429e9
--- a/tools/encrypt_secret.py
+++ b/tools/encrypt_secret.py
@@ -13,11 +13,19 @@
# under the License.
import argparse
+import base64
import os
import subprocess
import sys
import tempfile
-import urllib
+
+# we to import Request and urlopen differently for python 2 and 3
+try:
+ from urllib.request import Request
+ from urllib.request import urlopen
+except ImportError:
+ from urllib2 import Request
+ from urllib2 import urlopen
DESCRIPTION = """Encrypt a secret for Zuul.
@@ -50,9 +58,9 @@
"to standard output.")
args = parser.parse_args()
- req = urllib.request.Request("%s/keys/%s/%s.pub" % (
+ req = Request("%s/keys/%s/%s.pub" % (
args.url, args.source, args.project))
- pubkey = urllib.request.urlopen(req)
+ pubkey = urlopen(req)
if args.infile:
with open(args.infile) as f:
@@ -70,18 +78,18 @@
pubkey_file.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate(plaintext)
+ (stdout, stderr) = p.communicate(plaintext.encode("utf-8"))
if p.returncode != 0:
raise Exception("Return code %s from openssl" % p.returncode)
- ciphertext = stdout.encode('base64')
+ ciphertext = base64.b64encode(stdout)
finally:
os.unlink(pubkey_file.name)
if args.outfile:
- with open(args.outfile, "w") as f:
+ with open(args.outfile, "wb") as f:
f.write(ciphertext)
else:
- print(ciphertext)
+ print(ciphertext.decode("utf-8"))
if __name__ == '__main__':
diff --git a/zuul/ansible/action/normal.py b/zuul/ansible/action/normal.py
index b18cb51..74e732e 100644
--- a/zuul/ansible/action/normal.py
+++ b/zuul/ansible/action/normal.py
@@ -27,7 +27,16 @@
or self._task.delegate_to == 'localhost'
or (self._task.delegate_to
and self._task.delegate_to.startswtih('127.'))):
- return dict(
- failed=True,
- msg="Executing local code is prohibited")
+ if self._task.action == 'stat':
+ paths._fail_if_unsafe(self._task.args['path'])
+ elif self._task.action == 'file':
+ dest = self._task.args.get(
+ 'path', self._task.args.get(
+ 'dest', self._task.args.get(
+ 'name')))
+ paths._fail_if_unsafe(dest)
+ else:
+ return dict(
+ failed=True,
+ msg="Executing local code is prohibited")
return super(ActionModule, self).run(tmp, task_vars)
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index e3d1e14..fb15f1c 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -92,6 +92,8 @@
self._play = None
self._streamers = []
self.configure_logger()
+ self._items_done = False
+ self._deferred_result = None
def configure_logger(self):
# ansible appends timestamp, user and pid to the log lines emitted
@@ -117,7 +119,7 @@
def _read_log(self, host, ip, log_id, task_name, hosts):
self._log("[%s] Starting to log %s for task %s"
- % (host, log_id, task_name), executor=True)
+ % (host, log_id, task_name), job=False, executor=True)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
@@ -170,6 +172,9 @@
ip = play_vars[host].get(
'ansible_host', play_vars[host].get(
'ansible_inventory_host'))
+ if ip in ('localhost', '127.0.0.1'):
+ # Don't try to stream from localhost
+ continue
streamer = threading.Thread(
target=self._read_log, args=(
host, ip, log_id, task_name, hosts))
@@ -187,34 +192,50 @@
msg = "[Zuul] Log Stream did not terminate"
self._log(msg, job=True, executor=True)
- def _process_result_for_localhost(self, result):
+ def _process_result_for_localhost(self, result, is_task=True):
+ result_dict = dict(result._result)
+ localhost_names = ('localhost', '127.0.0.1')
is_localhost = False
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ delegated_vars = result_dict.get('_ansible_delegated_vars', None)
if delegated_vars:
delegated_host = delegated_vars['ansible_host']
- if delegated_host in ('localhost', '127.0.0.1'):
+ if delegated_host in localhost_names:
+ is_localhost = True
+ else:
+ task_host = result._host.get_name()
+ task_hostvars = result._task._variable_manager._hostvars[task_host]
+ if task_hostvars['ansible_host'] in localhost_names:
is_localhost = True
- if not is_localhost:
+ if not is_localhost and is_task:
self._stop_streamers()
if result._task.action in ('command', 'shell'):
- stdout_lines = zuul_filter_result(result._result)
+ stdout_lines = zuul_filter_result(result_dict)
if is_localhost:
for line in stdout_lines:
- ts, ln = (x.strip() for x in line.split(' | ', 1))
- self._log("localhost | %s " % ln, ts=ts)
+ hostname = self._get_hostname(result)
+ self._log("%s | %s " % (hostname, line.strip()))
def v2_runner_on_failed(self, result, ignore_errors=False):
- self._process_result_for_localhost(result)
- self._handle_exception(result._result)
+ result_dict = dict(result._result)
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
+ self._handle_exception(result_dict)
+
+ if result_dict.get('msg') == 'All items completed':
+ result_dict['status'] = 'ERROR'
+ self._deferred_result = result_dict
+ return
+
+ self._process_result_for_localhost(result)
+
+ if result._task.loop and 'results' in result_dict:
+ # items have their own events
+ pass
else:
self._log_message(
result=result,
msg="Results: => {results}".format(
- results=self._dump_results(result._result)),
+ results=self._dump_results(result_dict)),
status='ERROR')
if ignore_errors:
self._log_message(result, "Ignoring Errors", status="ERROR")
@@ -224,35 +245,107 @@
and self._last_task_banner != result._task._uuid):
self._print_task_banner(result._task)
- self._clean_results(result._result, result._task.action)
- self._process_result_for_localhost(result)
-
- if result._task.action in ('include', 'include_role'):
+ if result._task.action in ('include', 'include_role', 'setup'):
return
- if result._result.get('changed', False):
+ result_dict = dict(result._result)
+
+ self._clean_results(result_dict, result._task.action)
+
+ if result_dict.get('changed', False):
status = 'changed'
else:
status = 'ok'
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
+ if (result_dict.get('msg') == 'All items completed'
+ and not self._items_done):
+ result_dict['status'] = status
+ self._deferred_result = result_dict
+ return
- self._handle_warnings(result._result)
+ if not result._task.loop:
+ self._process_result_for_localhost(result)
+ else:
+ self._items_done = False
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
+ self._handle_warnings(result_dict)
+
+ if result._task.loop and 'results' in result_dict:
+ # items have their own events
+ pass
+
elif result._task.action not in ('command', 'shell'):
self._log_message(
result=result,
msg="Results: => {results}".format(
- results=self._dump_results(result._result)),
+ results=self._dump_results(result_dict)),
status=status)
+ elif 'results' in result_dict:
+ for res in result_dict['results']:
+ self._log_message(
+ result,
+ "Runtime: {delta} Start: {start} End: {end}".format(**res))
+ elif result_dict.get('msg') == 'All items completed':
+ self._log_message(result, result_dict['msg'])
else:
self._log_message(
result,
"Runtime: {delta} Start: {start} End: {end}".format(
- **result._result))
+ **result_dict))
+
+ def v2_runner_item_on_ok(self, result):
+ result_dict = dict(result._result)
+ self._process_result_for_localhost(result, is_task=False)
+
+ if result_dict.get('changed', False):
+ status = 'changed'
+ else:
+ status = 'ok'
+
+ if result._task.action not in ('command', 'shell'):
+ self._log_message(
+ result=result,
+ msg="Item: {item} => {results}".format(
+ item=result_dict['item'],
+ results=self._dump_results(result_dict)),
+ status=status)
+ else:
+ self._log_message(
+ result,
+ "Item: {item} Runtime: {delta}"
+ " Start: {start} End: {end}".format(**result_dict))
+
+ if self._deferred_result:
+ self._process_deferred(result)
+
+ def v2_runner_item_on_failed(self, result):
+ result_dict = dict(result._result)
+ self._process_result_for_localhost(result, is_task=False)
+
+ if result._task.action not in ('command', 'shell'):
+ self._log_message(
+ result=result,
+ msg="Item: {item} => {results}".format(
+ item=result_dict['item'],
+ results=self._dump_results(result_dict)),
+ status='ERROR')
+ else:
+ self._log_message(
+ result,
+ "Item: {item} Runtime: {delta}"
+ " Start: {start} End: {end}".format(**result_dict))
+
+ if self._deferred_result:
+ self._process_deferred(result)
+
+ def _process_deferred(self, result):
+ self._items_done = True
+ result_dict = self._deferred_result
+ self._deferred_result = None
+
+ self._log_message(
+ result, "All items complete",
+ status=result_dict['status'])
def _print_task_banner(self, task):
@@ -260,6 +353,10 @@
args = ''
task_args = task.args.copy()
+ if task.loop:
+ task_type = 'LOOP'
+ else:
+ task_type = 'TASK'
is_shell = task_args.pop('_uses_shell', False)
if is_shell and task_name == 'command':
task_name = 'shell'
@@ -273,7 +370,8 @@
args = u', '.join(u'%s=%s' % a for a in task_args.items())
args = u' %s' % args
- msg = "TASK [{task}{args}]".format(
+ msg = "{task_type} [{task}{args}]".format(
+ task_type=task_type,
task=task_name,
args=args)
self._log(msg)
diff --git a/zuul/ansible/library/command.py b/zuul/ansible/library/command.py
index 99392cc..00020c7 100644
--- a/zuul/ansible/library/command.py
+++ b/zuul/ansible/library/command.py
@@ -148,7 +148,7 @@
self.logfile_name = LOG_STREAM_FILE.format(log_uuid=log_uuid)
def __enter__(self):
- self.logfile = open(self.logfile_name, 'a', 0)
+ self.logfile = open(self.logfile_name, 'ab', buffering=0)
return self
def __exit__(self, etype, value, tb):
@@ -161,7 +161,7 @@
# consistent.
ts = datetime.datetime.now()
outln = '%s | %s' % (ts, ln)
- self.logfile.write(outln)
+ self.logfile.write(outln.encode('utf-8'))
def follow(fd, log_uuid):
@@ -172,8 +172,8 @@
if not line:
break
_log_lines.append(line)
- if not line.endswith('\n'):
- line += '\n'
+ if not line.endswith(b'\n'):
+ line += b'\n'
newline_warning = True
console.addLine(line)
if newline_warning:
@@ -396,6 +396,7 @@
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
+ e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
diff --git a/zuul/ansible/library/zuul_console.py b/zuul/ansible/library/zuul_console.py
index 42f41f0..ac85dec 100644
--- a/zuul/ansible/library/zuul_console.py
+++ b/zuul/ansible/library/zuul_console.py
@@ -59,7 +59,7 @@
class Console(object):
def __init__(self, path):
self.path = path
- self.file = open(path)
+ self.file = open(path, 'rb')
self.stat = os.stat(path)
self.size = self.stat.st_size
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 57ecfa3..6a1a214 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -40,9 +40,6 @@
# Similar situation with gear and statsd.
-DEFAULT_FINGER_PORT = 79
-
-
class Executor(zuul.cmd.ZuulApp):
def parse_arguments(self):
@@ -64,7 +61,7 @@
self.args = parser.parse_args()
def send_command(self, cmd):
- state_dir = get_default(self.config, 'zuul', 'state_dir',
+ state_dir = get_default(self.config, 'executor', 'state_dir',
'/var/lib/zuul', expand_user=True)
path = os.path.join(state_dir, 'executor.socket')
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -114,9 +111,9 @@
self.user = get_default(self.config, 'executor', 'user', 'zuul')
- if self.config.has_option('zuul', 'jobroot_dir'):
+ if self.config.has_option('executor', 'jobroot_dir'):
self.jobroot_dir = os.path.expanduser(
- self.config.get('zuul', 'jobroot_dir'))
+ self.config.get('executor', 'jobroot_dir'))
if not os.path.isdir(self.jobroot_dir):
print("Invalid jobroot_dir: {jobroot_dir}".format(
jobroot_dir=self.jobroot_dir))
@@ -127,8 +124,10 @@
self.setup_logging('executor', 'log_config')
self.log = logging.getLogger("zuul.Executor")
- self.finger_port = int(get_default(self.config, 'executor',
- 'finger_port', DEFAULT_FINGER_PORT))
+ self.finger_port = int(
+ get_default(self.config, 'executor', 'finger_port',
+ zuul.executor.server.DEFAULT_FINGER_PORT)
+ )
self.start_log_streamer()
self.change_privs()
@@ -136,7 +135,8 @@
ExecutorServer = zuul.executor.server.ExecutorServer
self.executor = ExecutorServer(self.config, self.connections,
jobdir_root=self.jobroot_dir,
- keep_jobdir=self.args.keep_jobdir)
+ keep_jobdir=self.args.keep_jobdir,
+ log_streaming_port=self.finger_port)
self.executor.start()
signal.signal(signal.SIGUSR2, zuul.cmd.stack_dump_handler)
diff --git a/zuul/cmd/merger.py b/zuul/cmd/merger.py
index 97f208c..c5cfd6c 100755
--- a/zuul/cmd/merger.py
+++ b/zuul/cmd/merger.py
@@ -80,7 +80,7 @@
server.read_config()
server.configure_connections(source_only=True)
- state_dir = get_default(server.config, 'zuul', 'state_dir',
+ state_dir = get_default(server.config, 'merger', 'state_dir',
'/var/lib/zuul', expand_user=True)
test_fn = os.path.join(state_dir, 'test')
try:
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index b32deaf..b7b12fe 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -59,7 +59,7 @@
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.log.debug("Reconfiguration triggered")
self.read_config()
- self.setup_logging('zuul', 'log_config')
+ self.setup_logging('scheduler', 'log_config')
try:
self.sched.reconfigure(self.config)
except Exception:
@@ -140,7 +140,7 @@
self.config.getboolean('gearman_server', 'start')):
self.start_gear_server()
- self.setup_logging('zuul', 'log_config')
+ self.setup_logging('scheduler', 'log_config')
self.log = logging.getLogger("zuul.Scheduler")
self.sched = zuul.scheduler.Scheduler(self.config)
@@ -150,13 +150,12 @@
nodepool = zuul.nodepool.Nodepool(self.sched)
zookeeper = zuul.zk.ZooKeeper()
- zookeeper_hosts = get_default(self.config, 'zuul', 'zookeeper_hosts',
- '127.0.0.1:2181')
+ zookeeper_hosts = get_default(self.config, 'zookeeper',
+ 'hosts', '127.0.0.1:2181')
zookeeper.connect(zookeeper_hosts)
- cache_expiry = get_default(self.config, 'zuul', 'status_expiry', 1)
-
+ cache_expiry = get_default(self.config, 'webapp', 'status_expiry', 1)
listen_address = get_default(self.config, 'webapp', 'listen_address',
'0.0.0.0')
port = get_default(self.config, 'webapp', 'port', 8001)
@@ -208,7 +207,7 @@
if scheduler.args.validate:
sys.exit(scheduler.test_config())
- pid_fn = get_default(scheduler.config, 'zuul', 'pidfile',
+ pid_fn = get_default(scheduler.config, 'scheduler', 'pidfile',
'/var/run/zuul-scheduler/zuul-scheduler.pid',
expand_user=True)
pid = pid_file_module.TimeoutPIDLockFile(pid_fn, 10)
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 84227f8..4246206 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -1134,7 +1134,7 @@
job = merger.getFiles(
project.source.connection.connection_name,
project.name, branch,
- files=['.zuul.yaml'])
+ files=['zuul.yaml', '.zuul.yaml'])
job.source_context = model.SourceContext(
project, branch, '', False)
jobs.append(job)
@@ -1324,15 +1324,16 @@
def _loadDynamicProjectData(self, config, project, files, trusted):
if trusted:
branches = ['master']
- fn = 'zuul.yaml'
else:
branches = project.source.getProjectBranches(project)
- fn = '.zuul.yaml'
for branch in branches:
incdata = None
- data = files.getFile(project.source.connection.connection_name,
- project.name, branch, fn)
+ for fn in ['zuul.yaml', '.zuul.yaml']:
+ data = files.getFile(project.source.connection.connection_name,
+ project.name, branch, fn)
+ if data:
+ break
if data:
source_context = model.SourceContext(project, branch,
fn, trusted)
diff --git a/zuul/driver/bubblewrap/__init__.py b/zuul/driver/bubblewrap/__init__.py
index 9e9a26e..5ec2448 100644
--- a/zuul/driver/bubblewrap/__init__.py
+++ b/zuul/driver/bubblewrap/__init__.py
@@ -70,35 +70,11 @@
name = 'bubblewrap'
log = logging.getLogger("zuul.BubblewrapDriver")
- bwrap_command = [
- 'bwrap',
- '--dir', '/tmp',
- '--tmpfs', '/tmp',
- '--dir', '/var',
- '--dir', '/var/tmp',
- '--dir', '/run/user/{uid}',
- '--ro-bind', '/usr', '/usr',
- '--ro-bind', '/lib', '/lib',
- '--ro-bind', '/lib64', '/lib64',
- '--ro-bind', '/bin', '/bin',
- '--ro-bind', '/sbin', '/sbin',
- '--ro-bind', '/etc/resolv.conf', '/etc/resolv.conf',
- '--ro-bind', '{ssh_auth_sock}', '{ssh_auth_sock}',
- '--dir', '{work_dir}',
- '--bind', '{work_dir}', '{work_dir}',
- '--dev', '/dev',
- '--dir', '{user_home}',
- '--chdir', '{work_dir}',
- '--unshare-all',
- '--share-net',
- '--die-with-parent',
- '--uid', '{uid}',
- '--gid', '{gid}',
- '--file', '{uid_fd}', '/etc/passwd',
- '--file', '{gid_fd}', '/etc/group',
- ]
mounts_map = {'rw': [], 'ro': []}
+ def __init__(self):
+ self.bwrap_command = self._bwrap_command()
+
def reconfigure(self, tenant):
pass
@@ -128,7 +104,9 @@
# Need users and groups
uid = os.getuid()
- passwd = pwd.getpwuid(uid)
+ passwd = list(pwd.getpwuid(uid))
+ # Replace our user's actual home directory with the work dir.
+ passwd = passwd[:5] + [kwargs['work_dir']] + passwd[6:]
passwd_bytes = b':'.join(
['{}'.format(x).encode('utf8') for x in passwd])
(passwd_r, passwd_w) = os.pipe()
@@ -150,7 +128,6 @@
kwargs['gid'] = gid
kwargs['uid_fd'] = passwd_r
kwargs['gid_fd'] = group_r
- kwargs['user_home'] = passwd.pw_dir
command = [x.format(**kwargs) for x in bwrap_command]
self.log.debug("Bubblewrap command: %s",
@@ -160,6 +137,38 @@
return wrapped_popen
+ def _bwrap_command(self):
+ bwrap_command = [
+ 'bwrap',
+ '--dir', '/tmp',
+ '--tmpfs', '/tmp',
+ '--dir', '/var',
+ '--dir', '/var/tmp',
+ '--dir', '/run/user/{uid}',
+ '--ro-bind', '/usr', '/usr',
+ '--ro-bind', '/lib', '/lib',
+ '--ro-bind', '/bin', '/bin',
+ '--ro-bind', '/sbin', '/sbin',
+ '--ro-bind', '/etc/resolv.conf', '/etc/resolv.conf',
+ '--ro-bind', '{ssh_auth_sock}', '{ssh_auth_sock}',
+ '--dir', '{work_dir}',
+ '--bind', '{work_dir}', '{work_dir}',
+ '--dev', '/dev',
+ '--chdir', '{work_dir}',
+ '--unshare-all',
+ '--share-net',
+ '--die-with-parent',
+ '--uid', '{uid}',
+ '--gid', '{gid}',
+ '--file', '{uid_fd}', '/etc/passwd',
+ '--file', '{gid_fd}', '/etc/group',
+ ]
+
+ if os.path.isdir('/lib64'):
+ bwrap_command.extend(['--ro-bind', '/lib64', '/lib64'])
+
+ return bwrap_command
+
def main(args=None):
logging.basicConfig(level=logging.DEBUG)
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 39a81bc..924a42f 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -237,6 +237,7 @@
class GerritConnection(BaseConnection):
driver_name = 'gerrit'
log = logging.getLogger("zuul.GerritConnection")
+ iolog = logging.getLogger("zuul.GerritConnection.io")
depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
re.MULTILINE | re.IGNORECASE)
replication_timeout = 300
@@ -631,8 +632,8 @@
data = json.loads(lines[0])
if not data:
return False
- self.log.debug("Received data from Gerrit query: \n%s" %
- (pprint.pformat(data)))
+ self.iolog.debug("Received data from Gerrit query: \n%s" %
+ (pprint.pformat(data)))
return data
def simpleQuery(self, query):
@@ -662,8 +663,8 @@
if not data:
return False, more_changes
- self.log.debug("Received data from Gerrit query: \n%s" %
- (pprint.pformat(data)))
+ self.iolog.debug("Received data from Gerrit query: \n%s" %
+ (pprint.pformat(data)))
return data, more_changes
# gerrit returns 500 results by default, so implement paging
@@ -717,14 +718,17 @@
stdin.write(stdin_data)
out = stdout.read().decode('utf-8')
- self.log.debug("SSH received stdout:\n%s" % out)
+ self.iolog.debug("SSH received stdout:\n%s" % out)
ret = stdout.channel.recv_exit_status()
self.log.debug("SSH exit status: %s" % ret)
err = stderr.read().decode('utf-8')
- self.log.debug("SSH received stderr:\n%s" % err)
+ if err.strip():
+ self.log.debug("SSH received stderr:\n%s" % err)
+
if ret:
+ self.log.debug("SSH received stdout:\n%s" % out)
raise Exception("Gerrit error executing %s" % command)
return (out, err)
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 7a3491e..1a9e37b 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -18,6 +18,7 @@
import hmac
import hashlib
import time
+import re
import cachecontrol
from cachecontrol.cache import DictCache
@@ -72,7 +73,12 @@
self._validate_signature(request)
- self.__dispatch_event(request)
+ try:
+ self.__dispatch_event(request)
+ except webob.exc.HTTPNotFound:
+ raise
+ except:
+ self.log.exception("Exception handling Github event:")
def __dispatch_event(self, request):
try:
@@ -88,7 +94,8 @@
except AttributeError:
message = "Unhandled X-Github-Event: {0}".format(event)
self.log.debug(message)
- raise webob.exc.HTTPBadRequest(message)
+ # Returns empty 200 on unhandled events
+ raise webob.exc.HTTPOk()
try:
json_body = request.json_body
@@ -113,6 +120,8 @@
try:
event = method(json_body)
+ except webob.exc.HTTPNotFound:
+ raise
except:
self.log.exception('Exception when handling event:')
event = None
@@ -172,6 +181,8 @@
elif action == 'unlabeled':
event.action = 'unlabeled'
event.label = body['label']['name']
+ elif action == 'edited':
+ event.action = 'edited'
else:
return None
@@ -213,6 +224,14 @@
event.action = body.get('action')
return event
+ def _event_ping(self, body):
+ project_name = body['repository']['full_name']
+ if not self.connection.getProject(project_name):
+ self.log.warning("Ping received for unknown project %s" %
+ project_name)
+ raise webob.exc.HTTPNotFound("Sorry, this project is not "
+ "registered")
+
def _event_status(self, body):
action = body.get('action')
if action == 'pending':
@@ -334,9 +353,9 @@
self._change_cache = {}
self.projects = {}
self.git_ssh_key = self.connection_config.get('sshkey')
- self.git_host = self.connection_config.get('git_host', 'github.com')
+ self.server = self.connection_config.get('server', 'github.com')
self.canonical_hostname = self.connection_config.get(
- 'canonical_hostname', self.git_host)
+ 'canonical_hostname', self.server)
self.source = driver.getSource(self)
self._github = None
@@ -353,6 +372,12 @@
DictCache(),
cache_etags=True)
+ # The regex is based on the connection host. We do not yet support
+ # cross-connection dependency gathering
+ self.depends_on_re = re.compile(
+ r"^Depends-On: https://%s/.+/.+/pull/[0-9]+$" % self.server,
+ re.MULTILINE | re.IGNORECASE)
+
def onLoad(self):
webhook_listener = GithubWebhookListener(self)
self.registerHttpHandler(self.payload_path,
@@ -363,8 +388,8 @@
self.unregisterHttpHandler(self.payload_path)
def _createGithubClient(self):
- if self.git_host != 'github.com':
- url = 'https://%s/' % self.git_host
+ if self.server != 'github.com':
+ url = 'https://%s/' % self.server
github = github3.GitHubEnterprise(url)
else:
github = github3.GitHub()
@@ -427,7 +452,7 @@
data = {'iat': now, 'exp': expiry, 'iss': self.app_id}
app_token = jwt.encode(data,
self.app_key,
- algorithm='RS256')
+ algorithm='RS256').decode('utf-8')
url = ACCESS_TOKEN_URL % installation_id
headers = {'Accept': PREVIEW_JSON_ACCEPT,
@@ -476,8 +501,6 @@
if event.change_number:
change = self._getChange(project, event.change_number,
event.patch_number, refresh=refresh)
- change.refspec = event.refspec
- change.branch = event.branch
change.url = event.change_url
change.updated_at = self._ghTimestampToDate(event.updated_at)
change.source_event = event
@@ -495,8 +518,9 @@
change = Ref(project)
return change
- def _getChange(self, project, number, patchset, refresh=False):
- key = '%s/%s/%s' % (project.name, number, patchset)
+ def _getChange(self, project, number, patchset=None, refresh=False,
+ history=None):
+ key = (project.name, number, patchset)
change = self._change_cache.get(key)
if change and not refresh:
return change
@@ -507,41 +531,142 @@
change.patchset = patchset
self._change_cache[key] = change
try:
- self._updateChange(change)
+ self._updateChange(change, history)
except Exception:
if key in self._change_cache:
del self._change_cache[key]
raise
return change
- def _updateChange(self, change):
+ def _getDependsOnFromPR(self, body):
+ prs = []
+ seen = set()
+
+ for match in self.depends_on_re.findall(body):
+ if match in seen:
+ self.log.debug("Ignoring duplicate Depends-On: %s" % (match,))
+ continue
+ seen.add(match)
+ # Get the github url
+ url = match.rsplit()[-1]
+ # break it into the parts we need
+ _, org, proj, _, num = url.rsplit('/', 4)
+ # Get a pull object so we can get the head sha
+ pull = self.getPull('%s/%s' % (org, proj), int(num))
+ prs.append(pull)
+
+ return prs
+
+ def _getNeededByFromPR(self, change):
+ prs = []
+ seen = set()
+ # This shouldn't return duplicate issues, but code as if it could
+
+ # This leaves off the protocol, but looks for the specific GitHub
+ # hostname, the org/project, and the pull request number.
+ pattern = 'Depends-On %s/%s/pull/%s' % (self.server,
+ change.project.name,
+ change.number)
+ query = '%s type:pr is:open in:body' % pattern
+ github = self.getGithubClient()
+ for issue in github.search_issues(query=query):
+ pr = issue.issue.pull_request().as_dict()
+ if not pr.get('url'):
+ continue
+ if issue in seen:
+ continue
+ # the issue provides no good description of the project :\
+ org, proj, _, num = pr.get('url').split('/')[-4:]
+ self.log.debug("Found PR %s/%s/%s needs %s/%s" %
+ (org, proj, num, change.project.name,
+ change.number))
+ prs.append(pr)
+ seen.add(issue)
+
+ log_rate_limit(self.log, github)
+ return prs
+
+ def _updateChange(self, change, history=None):
+
+ # If this change is already in the history, we have a cyclic
+ # dependency loop and we do not need to update again, since it
+ # was done in a previous frame.
+ if history and (change.project.name, change.number) in history:
+ return change
+
self.log.info("Updating %s" % (change,))
change.pr = self.getPull(change.project.name, change.number)
+ change.refspec = "refs/pull/%s/head" % change.number
+ change.branch = change.pr.get('base').get('ref')
change.files = change.pr.get('files')
change.title = change.pr.get('title')
change.open = change.pr.get('state') == 'open'
+ change.is_merged = change.pr.get('merged')
change.status = self._get_statuses(change.project,
change.patchset)
change.reviews = self.getPullReviews(change.project,
change.number)
change.labels = change.pr.get('labels')
+ change.body = change.pr.get('body')
+ # ensure body is at least an empty string
+ if not change.body:
+ change.body = ''
+
+ if history is None:
+ history = []
+ else:
+ history = history[:]
+ history.append((change.project.name, change.number))
+
+ needs_changes = []
+
+ # Get all the PRs this may depend on
+ for pr in self._getDependsOnFromPR(change.body):
+ proj = pr.get('base').get('repo').get('full_name')
+ pull = pr.get('number')
+ self.log.debug("Updating %s: Getting dependent "
+ "pull request %s/%s" %
+ (change, proj, pull))
+ project = self.source.getProject(proj)
+ dep = self._getChange(project, pull,
+ patchset=pr.get('head').get('sha'),
+ history=history)
+ if (not dep.is_merged) and dep not in needs_changes:
+ needs_changes.append(dep)
+
+ change.needs_changes = needs_changes
+
+ needed_by_changes = []
+ for pr in self._getNeededByFromPR(change):
+ proj = pr.get('base').get('repo').get('full_name')
+ pull = pr.get('number')
+ self.log.debug("Updating %s: Getting needed "
+ "pull request %s/%s" %
+ (change, proj, pull))
+ project = self.source.getProject(proj)
+ dep = self._getChange(project, pull,
+ patchset=pr.get('head').get('sha'),
+ history=history)
+ if not dep.is_merged:
+ needed_by_changes.append(dep)
+ change.needed_by_changes = needed_by_changes
return change
def getGitUrl(self, project):
if self.git_ssh_key:
- return 'ssh://git@%s/%s.git' % (self.git_host, project)
+ return 'ssh://git@%s/%s.git' % (self.server, project)
if self.app_id:
installation_key = self._get_installation_key(project)
return 'https://x-access-token:%s@%s/%s' % (installation_key,
- self.git_host,
+ self.server,
project)
- return 'https://%s/%s' % (self.git_host, project)
+ return 'https://%s/%s' % (self.server, project)
def getGitwebUrl(self, project, sha=None):
- url = 'https://%s/%s' % (self.git_host, project)
+ url = 'https://%s/%s' % (self.server, project)
if sha is not None:
url += '/commit/%s' % sha
return url
@@ -654,8 +779,19 @@
# if there are multiple reviews per user, keep the newest
# note that this breaks the ability to set the 'older-than'
# option on a review requirement.
+ # BUT do not keep the latest if it's a 'commented' type and the
+ # previous review was 'approved' or 'changes_requested', as
+ # the GitHub model does not change the vote if a comment is
+ # added after the fact. THANKS GITHUB!
if review['grantedOn'] > reviews[user]['grantedOn']:
- reviews[user] = review
+ if (review['type'] == 'commented' and reviews[user]['type']
+ in ('approved', 'changes_requested')):
+ self.log.debug("Discarding comment review %s due to "
+ "an existing vote %s" % (review,
+ reviews[user]))
+ pass
+ else:
+ reviews[user] = review
return reviews.values()
@@ -676,7 +812,7 @@
return GithubUser(self.getGithubClient(), login)
def getUserUri(self, login):
- return 'https://%s/%s' % (self.git_host, login)
+ return 'https://%s/%s' % (self.server, login)
def getRepoPermission(self, project, login):
github = self.getGithubClient(project)
@@ -686,7 +822,8 @@
headers = {'Accept': 'application/vnd.github.korra-preview'}
# Create a repo object
- repository = github.repository(owner, project)
+ repository = github.repository(owner, proj)
+
# Build up a URL
url = repository._build_url('collaborators', login, 'permission',
base_url=repository._api)
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index 37cbe61..ea41ccd 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -43,10 +43,13 @@
"""Report on an event."""
# order is important for github branch protection.
# A status should be set before a merge attempt
- if (self._commit_status is not None and
- hasattr(item.change, 'patchset') and
- item.change.patchset is not None):
- self.setCommitStatus(item)
+ if self._commit_status is not None:
+ if (hasattr(item.change, 'patchset') and
+ item.change.patchset is not None):
+ self.setCommitStatus(item)
+ elif (hasattr(item.change, 'newrev') and
+ item.change.newrev is not None):
+ self.setCommitStatus(item)
# Comments, labels, and merges can only be performed on pull requests.
# If the change is not a pull request (e.g. a push) skip them.
if hasattr(item.change, 'number'):
@@ -71,7 +74,10 @@
def setCommitStatus(self, item):
project = item.change.project.name
- sha = item.change.patchset
+ if hasattr(item.change, 'patchset'):
+ sha = item.change.patchset
+ elif hasattr(item.change, 'newrev'):
+ sha = item.change.newrev
context = '%s/%s' % (item.pipeline.layout.tenant.name,
item.pipeline.name)
state = self._commit_status
@@ -79,8 +85,8 @@
url_pattern = self.config.get('status-url')
if not url_pattern:
sched_config = self.connection.sched.config
- if sched_config.has_option('zuul', 'status_url'):
- url_pattern = sched_config.get('zuul', 'status_url')
+ if sched_config.has_option('webapp', 'status_url'):
+ url_pattern = sched_config.get('webapp', 'status_url')
url = item.formatUrlPattern(url_pattern) if url_pattern else ''
description = ''
@@ -139,7 +145,7 @@
if change.title:
message += change.title
- account = change.source_event.account
+ account = getattr(change.source_event, 'account', None)
if not account:
return message
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index 889e01f..3612eae 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -321,9 +321,9 @@
make_project_dict(project,
job_project.override_branch))
projects.add(project)
- for item in all_items:
- if item.change.project not in projects:
- project = item.change.project
+ for i in all_items:
+ if i.change.project not in projects:
+ project = i.change.project
params['projects'].append(make_project_dict(project))
projects.add(project)
@@ -337,7 +337,7 @@
gearman_job = gear.TextJob('executor:execute', json.dumps(params),
unique=uuid)
build.__gearman_job = gearman_job
- build.__gearman_manager = None
+ build.__gearman_worker = None
self.builds[uuid] = build
if pipeline.precedence == zuul.model.PRECEDENCE_NORMAL:
@@ -442,7 +442,7 @@
if not started:
self.log.info("Build %s started" % job)
- build.__gearman_manager = data.get('manager')
+ build.__gearman_worker = data.get('worker_name')
self.sched.onBuildStarted(build)
else:
self.log.error("Unable to find build %s" % job.unique)
@@ -471,12 +471,12 @@
return False
def cancelRunningBuild(self, build):
- if not build.__gearman_manager:
+ if not build.__gearman_worker:
self.log.error("Build %s has no manager while canceling" %
(build,))
stop_uuid = str(uuid4().hex)
data = dict(uuid=build.__gearman_job.unique)
- stop_job = gear.TextJob("executor:stop:%s" % build.__gearman_manager,
+ stop_job = gear.TextJob("executor:stop:%s" % build.__gearman_worker,
json.dumps(data), unique=stop_uuid)
self.meta_jobs[stop_uuid] = stop_job
self.log.debug("Submitting stop job: %s", stop_job)
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 3a4ddaf..6c390db 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -13,6 +13,7 @@
# under the License.
import collections
+import datetime
import json
import logging
import os
@@ -34,8 +35,10 @@
import zuul.ansible
from zuul.lib import commandsocket
+BUFFER_LINES_FOR_SYNTAX = 200
COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
- 'unverbose']
+ 'unverbose', 'keep', 'nokeep']
+DEFAULT_FINGER_PORT = 79
class Watchdog(object):
@@ -357,13 +360,14 @@
log = logging.getLogger("zuul.ExecutorServer")
def __init__(self, config, connections={}, jobdir_root=None,
- keep_jobdir=False):
+ keep_jobdir=False, log_streaming_port=DEFAULT_FINGER_PORT):
self.config = config
self.keep_jobdir = keep_jobdir
self.jobdir_root = jobdir_root
# TODOv3(mordred): make the executor name more unique --
# perhaps hostname+pid.
self.hostname = socket.gethostname()
+ self.log_streaming_port = log_streaming_port
self.zuul_url = config.get('merger', 'zuul_url')
self.merger_lock = threading.Lock()
self.verbose = False
@@ -374,6 +378,8 @@
graceful=self.graceful,
verbose=self.verboseOn,
unverbose=self.verboseOff,
+ keep=self.keep,
+ nokeep=self.nokeep,
)
self.merge_root = get_default(self.config, 'executor', 'git_dir',
@@ -394,7 +400,7 @@
self.merger = self._getMerger(self.merge_root)
self.update_queue = DeduplicateQueue()
- state_dir = get_default(self.config, 'zuul', 'state_dir',
+ state_dir = get_default(self.config, 'executor', 'state_dir',
'/var/lib/zuul', expand_user=True)
path = os.path.join(state_dir, 'executor.socket')
self.command_socket = commandsocket.CommandSocket(path)
@@ -510,6 +516,12 @@
def verboseOff(self):
self.verbose = False
+ def keep(self):
+ self.keep_jobdir = True
+
+ def nokeep(self):
+ self.keep_jobdir = False
+
def join(self):
self.update_thread.join()
self.merger_thread.join()
@@ -727,12 +739,11 @@
self.log.exception("Error stopping SSH agent:")
def _execute(self):
- self.log.debug("Job %s: beginning" % (self.job.unique,))
- self.log.debug("Job %s: args: %s" % (self.job.unique,
- self.job.arguments,))
- self.log.debug("Job %s: job root at %s" %
- (self.job.unique, self.jobdir.root))
args = json.loads(self.job.arguments)
+ self.log.debug("Beginning job %s for ref %s" %
+ (self.job.name, args['vars']['zuul']['ref']))
+ self.log.debug("Args: %s" % (self.job.arguments,))
+ self.log.debug("Job root: %s" % (self.jobdir.root,))
tasks = []
projects = set()
@@ -800,12 +811,24 @@
self.prepareAnsibleFiles(args)
data = {
- 'manager': self.executor_server.hostname,
- 'url': 'finger://{server}/{unique}'.format(
- unique=self.job.unique,
- server=self.executor_server.hostname),
- 'worker_name': 'My Worker',
+ # TODO(mordred) worker_name is needed as a unique name for the
+ # client to use for cancelling jobs on an executor. It's defaulting
+ # to the hostname for now, but in the future we should allow
+ # setting a per-executor override so that one can run more than
+ # one executor on a host.
+ 'worker_name': self.executor_server.hostname,
+ 'worker_hostname': self.executor_server.hostname,
+ 'worker_log_port': self.executor_server.log_streaming_port
}
+ if self.executor_server.log_streaming_port != DEFAULT_FINGER_PORT:
+ data['url'] = "finger://{hostname}:{port}/{uuid}".format(
+ hostname=data['worker_hostname'],
+ port=data['worker_log_port'],
+ uuid=self.job.unique)
+ else:
+ data['url'] = 'finger://{hostname}/{uuid}'.format(
+ hostname=data['worker_hostname'],
+ uuid=self.job.unique)
self.job.sendWorkData(json.dumps(data))
self.job.sendWorkStatus(0, 100)
@@ -858,6 +881,7 @@
def runPlaybooks(self, args):
result = None
+ pre_failed = False
for playbook in self.jobdir.pre_playbooks:
# TODOv3(pabelanger): Implement pre-run timeout setting.
pre_status, pre_code = self.runAnsiblePlaybook(
@@ -865,31 +889,37 @@
if pre_status != self.RESULT_NORMAL or pre_code != 0:
# These should really never fail, so return None and have
# zuul try again
+ pre_failed = True
+ success = False
+ break
+
+ if not pre_failed:
+ job_status, job_code = self.runAnsiblePlaybook(
+ self.jobdir.playbook, args['timeout'])
+ if job_status == self.RESULT_TIMED_OUT:
+ return 'TIMED_OUT'
+ if job_status == self.RESULT_ABORTED:
+ return 'ABORTED'
+ if job_status != self.RESULT_NORMAL:
+ # The result of the job is indeterminate. Zuul will
+ # run it again.
return result
- job_status, job_code = self.runAnsiblePlaybook(
- self.jobdir.playbook, args['timeout'])
- if job_status == self.RESULT_TIMED_OUT:
- return 'TIMED_OUT'
- if job_status == self.RESULT_ABORTED:
- return 'ABORTED'
- if job_status != self.RESULT_NORMAL:
- # The result of the job is indeterminate. Zuul will
- # run it again.
- return result
-
- success = (job_code == 0)
- if success:
- result = 'SUCCESS'
- else:
- result = 'FAILURE'
+ success = (job_code == 0)
+ if success:
+ result = 'SUCCESS'
+ else:
+ result = 'FAILURE'
for playbook in self.jobdir.post_playbooks:
# TODOv3(pabelanger): Implement post-run timeout setting.
post_status, post_code = self.runAnsiblePlaybook(
playbook, args['timeout'], success)
if post_status != self.RESULT_NORMAL or post_code != 0:
- result = 'POST_FAILURE'
+ # If we encountered a pre-failure, that takes
+ # precedence over the post result.
+ if not pre_failed:
+ result = 'POST_FAILURE'
return result
def getHostList(self, args):
@@ -1262,7 +1292,7 @@
'%s_ro_dirs' % opt_prefix)
rw_dirs = get_default(self.executor_server.config, 'executor',
'%s_rw_dirs' % opt_prefix)
- state_dir = get_default(self.executor_server.config, 'zuul',
+ state_dir = get_default(self.executor_server.config, 'executor',
'state_dir', '/var/lib/zuul', expand_user=True)
ro_dirs = ro_dirs.split(":") if ro_dirs else []
rw_dirs = rw_dirs.split(":") if rw_dirs else []
@@ -1274,6 +1304,9 @@
ssh_auth_sock=env_copy.get('SSH_AUTH_SOCK'))
env_copy['ANSIBLE_CONFIG'] = config_file
+ # NOTE(pabelanger): Default HOME variable to jobdir.work_root, as it is
+ # possible we don't bind mount current zuul user home directory.
+ env_copy['HOME'] = self.jobdir.work_root
with self.proc_lock:
if self.aborted:
@@ -1289,13 +1322,16 @@
env=env_copy,
)
+ syntax_buffer = []
ret = None
if timeout:
watchdog = Watchdog(timeout, self._ansibleTimeout,
("Ansible timeout exceeded",))
watchdog.start()
try:
- for line in iter(self.proc.stdout.readline, b''):
+ for idx, line in enumerate(iter(self.proc.stdout.readline, b'')):
+ if idx < BUFFER_LINES_FOR_SYNTAX:
+ syntax_buffer.append(line)
line = line[:1024].rstrip()
self.log.debug("Ansible output: %s" % (line,))
self.log.debug("Ansible output terminated")
@@ -1318,6 +1354,18 @@
elif ret == -9:
# Received abort request.
return (self.RESULT_ABORTED, None)
+ elif ret == 4:
+ # Ansible could not parse the yaml.
+ self.log.debug("Ansible parse error")
+ # TODO(mordred) If/when we rework use of logger in ansible-playbook
+ # we'll want to change how this works to use that as well. For now,
+ # this is what we need to do.
+ with open(self.jobdir.job_output_file, 'a') as job_output:
+ job_output.write("{now} | ANSIBLE PARSE ERROR\n".format(
+ now=datetime.datetime.now()))
+ for line in syntax_buffer:
+ job_output.write("{now} | {line}\n".format(
+ now=datetime.datetime.now(), line=line))
return (self.RESULT_NORMAL, ret)
diff --git a/zuul/lib/log_streamer.py b/zuul/lib/log_streamer.py
index c76b057..67c733e 100644
--- a/zuul/lib/log_streamer.py
+++ b/zuul/lib/log_streamer.py
@@ -30,7 +30,9 @@
def __init__(self, path):
self.path = path
- self.file = open(path)
+ # The logs are written as binary encoded utf-8, which is what we
+ # send over the wire.
+ self.file = open(path, 'rb')
self.stat = os.stat(path)
self.size = self.stat.st_size
@@ -127,7 +129,7 @@
chunk = log.file.read(4096)
if not chunk:
break
- self.request.send(chunk.encode('utf-8'))
+ self.request.send(chunk)
return log
def follow_log(self, log):
@@ -136,7 +138,7 @@
while True:
chunk = log.file.read(4096)
if chunk:
- self.request.send(chunk.encode('utf-8'))
+ self.request.send(chunk)
else:
break
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 20bc459..01429ce 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -401,7 +401,8 @@
self.log.exception("Exception while canceling build %s "
"for change %s" % (build, item.change))
finally:
- old_build_set.layout.tenant.semaphore_handler.release(
+ tenant = old_build_set.item.pipeline.layout.tenant
+ tenant.semaphore_handler.release(
old_build_set.item, build.job)
if not was_running:
diff --git a/zuul/model.py b/zuul/model.py
index c24244d..436a9c8 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -1105,11 +1105,13 @@
def __init__(self):
self.name = "Unknown"
self.hostname = None
+ self.log_port = None
def updateFromData(self, data):
"""Update worker information if contained in the WORK_DATA response."""
self.name = data.get('worker_name', self.name)
self.hostname = data.get('worker_hostname', self.hostname)
+ self.log_port = data.get('worker_log_port', self.log_port)
def __repr__(self):
return '<Worker %s>' % self.name
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 0ac5766..95b9208 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -14,6 +14,7 @@
import abc
import logging
+from zuul.lib.config import get_default
class BaseReporter(object, metaclass=abc.ABCMeta):
@@ -69,10 +70,8 @@
return ret
def _formatItemReportStart(self, item, with_jobs=True):
- status_url = ''
- if self.connection.sched.config.has_option('zuul', 'status_url'):
- status_url = self.connection.sched.config.get('zuul',
- 'status_url')
+ status_url = get_default(self.connection.sched.config,
+ 'webapp', 'status_url', '')
return item.pipeline.start_message.format(pipeline=item.pipeline,
status_url=status_url)
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index c762309..fe6a673 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -49,9 +49,9 @@
def wait(self, timeout=None):
self._wait_event.wait(timeout)
if self._exc_info:
- # http://python3porting.com/differences.html#raise
- e, v, t = self._exc_info
- raise e(v).with_traceback(t)
+ # sys.exc_info returns (type, value, traceback)
+ type_, exception_instance, traceback = self._exc_info
+ raise exception_instance.with_traceback(traceback)
return self._wait_event.is_set()
@@ -372,12 +372,12 @@
self.log.debug("Waiting for exit")
def _get_queue_pickle_file(self):
- state_dir = get_default(self.config, 'zuul', 'state_dir',
+ state_dir = get_default(self.config, 'scheduler', 'state_dir',
'/var/lib/zuul', expand_user=True)
return os.path.join(state_dir, 'queue.pickle')
def _get_time_database_dir(self):
- state_dir = get_default(self.config, 'zuul', 'state_dir',
+ state_dir = get_default(self.config, 'scheduler', 'state_dir',
'/var/lib/zuul', expand_user=True)
d = os.path.join(state_dir, 'times')
if not os.path.exists(d):
@@ -385,7 +385,7 @@
return d
def _get_project_key_dir(self):
- state_dir = get_default(self.config, 'zuul', 'state_dir',
+ state_dir = get_default(self.config, 'scheduler', 'state_dir',
'/var/lib/zuul', expand_user=True)
key_dir = os.path.join(state_dir, 'keys')
if not os.path.exists(key_dir):
@@ -451,7 +451,7 @@
self.log.debug("Performing reconfiguration")
loader = configloader.ConfigLoader()
abide = loader.loadConfig(
- self.config.get('zuul', 'tenant_config'),
+ self.config.get('scheduler', 'tenant_config'),
self._get_project_key_dir(),
self, self.merger, self.connections)
for tenant in abide.tenants.values():
@@ -468,7 +468,7 @@
self.log.debug("Performing tenant reconfiguration")
loader = configloader.ConfigLoader()
abide = loader.reloadTenant(
- self.config.get('zuul', 'tenant_config'),
+ self.config.get('scheduler', 'tenant_config'),
self._get_project_key_dir(),
self, self.merger, self.connections,
self.abide, event.tenant)
@@ -547,6 +547,8 @@
else:
items_to_remove.append(item)
for item in items_to_remove:
+ self.log.warning(
+ "Removing item %s during reconfiguration" % (item,))
for build in item.current_build_set.getBuilds():
builds_to_cancel.append(build)
for build in builds_to_cancel: