Merge "Call parameter_function prior to creating swift params"
diff --git a/.gitignore b/.gitignore
index 9703f16..b59cb77 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+*.sw?
 *.egg
 *.egg-info
 *.pyc
diff --git a/.testr.conf b/.testr.conf
index 5433c07..222ce97 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -1,4 +1,4 @@
 [DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} ${PYTHON:-python} -m subunit.run discover -t ./ tests $LISTOPT $IDOPTION
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} OS_LOG_DEFAULTS=${OS_LOG_DEFAULTS:-""} ${PYTHON:-python} -m subunit.run discover -t ./ tests $LISTOPT $IDOPTION
 test_id_option=--load-list $IDFILE
 test_list_option=--list
diff --git a/doc/source/cloner.rst b/doc/source/cloner.rst
index 2ddf0b5..70577cc 100644
--- a/doc/source/cloner.rst
+++ b/doc/source/cloner.rst
@@ -61,6 +61,23 @@
 
 .. program-output:: zuul-cloner --help
 
+
+Ref lookup order
+''''''''''''''''
+
+The Zuul cloner will attempt to lookup references in this order:
+
+ 1) Zuul reference for the indicated branch
+ 2) Zuul reference for the master branch
+ 3) The tip of the indicated branch
+ 4) The tip of the master branch
+
+The "indicated branch" is one of the following:
+
+ A) The project-specific override branch (from project_branches arg)
+ B) The user specified branch (from the branch arg)
+ C) ZUUL_BRANCH (from the zuul_branch arg)
+
 Clone order
 -----------
 
@@ -87,3 +104,7 @@
 The URL of origin remote of the resulting clone will be reset to use
 the ``git_base_url`` and then the remote will be updated so that the
 repository has all the information in the upstream repository.
+
+The default for ``--cache-dir`` is taken from the environment variable
+``ZUUL_CACHE_DIR``. A value provided explicitly on the command line
+overrides the environment variable setting.
diff --git a/doc/source/connections.rst b/doc/source/connections.rst
new file mode 100644
index 0000000..f0820a6
--- /dev/null
+++ b/doc/source/connections.rst
@@ -0,0 +1,79 @@
+:title: Connections
+
+.. _connections:
+
+Connections
+===========
+
+zuul coordinates talking to multiple different systems via the concept
+of connections. A connection is listed in the :ref:`zuulconf` file and is
+then referred to from the :ref:`layoutyaml`. This makes it possible to
+receive events from gerrit via one connection and post results from another
+connection that may report back as a different user.
+
+Gerrit
+------
+
+Create a connection with gerrit.
+
+**driver=gerrit**
+
+**server**
+  FQDN of Gerrit server.
+  ``server=review.example.com``
+
+**port**
+  Optional: Gerrit server port.
+  ``port=29418``
+
+**baseurl**
+  Optional: path to Gerrit web interface. Defaults to ``https://<value
+  of server>/``. ``baseurl=https://review.example.com/review_site/``
+
+**user**
+  User name to use when logging into above server via ssh.
+  ``user=zuul``
+
+**sshkey**
+  Path to SSH key to use when logging into above server.
+  ``sshkey=/home/zuul/.ssh/id_rsa``
+
+
+Gerrit Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+Zuul will need access to a Gerrit user.
+
+Create an SSH keypair for Zuul to use if there isn't one already, and
+create a Gerrit user with that key::
+
+  cat ~/id_rsa.pub | ssh -p29418 gerrit.example.com gerrit create-account --ssh-key - --full-name Jenkins jenkins
+
+Give that user whatever permissions will be needed on the projects you
+want Zuul to gate.  For instance, you may want to grant ``Verified
++/-1`` and ``Submit`` to the user.  Additional categories or values may
+be added to Gerrit.  Zuul is very flexible and can take advantage of
+those.
+
+SMTP
+----
+
+**driver=smtp**
+
+**server**
+  SMTP server hostname or address to use.
+  ``server=localhost``
+
+**port**
+  Optional: SMTP server port.
+  ``port=25``
+
+**default_from**
+  Who the email should appear to be sent from when emailing the report.
+  This can be overridden by individual pipelines.
+  ``default_from=zuul@example.com``
+
+**default_to**
+  Who the report should be emailed to by default.
+  This can be overridden by individual pipelines.
+  ``default_to=you@example.com``
diff --git a/doc/source/gating.rst b/doc/source/gating.rst
index 43a5928..c10ba83 100644
--- a/doc/source/gating.rst
+++ b/doc/source/gating.rst
@@ -212,19 +212,20 @@
   }
 
 
-Cross projects dependencies
----------------------------
+Cross Project Testing
+---------------------
 
 When your projects are closely coupled together, you want to make sure
 changes entering the gate are going to be tested with the version of
 other projects currently enqueued in the gate (since they will
 eventually be merged and might introduce breaking features).
 
-Such dependencies can be defined in Zuul configuration by registering a job
-in a DependentPipeline of several projects. Whenever a change enters such a
-pipeline, it will create references for the other projects as well.  As an
-example, given a main project ``acme`` and a plugin ``plugin`` you can
-define a job ``acme-tests`` which should be run for both projects:
+Such relationships can be defined in Zuul configuration by registering
+a job in a DependentPipeline of several projects. Whenever a change
+enters such a pipeline, it will create references for the other
+projects as well.  As an example, given a main project ``acme`` and a
+plugin ``plugin`` you can define a job ``acme-tests`` which should be
+run for both projects:
 
 .. code-block:: yaml
 
@@ -280,3 +281,191 @@
 When your job fetches several repositories without changes ahead in the
 queue, they may not have a Z reference in which case you can just check
 out the branch.
+
+
+Cross Repository Dependencies
+-----------------------------
+
+Zuul permits users to specify dependencies across repositories.  Using
+a special header in Git commit messages, Users may specify that a
+change depends on another change in any repository known to Zuul.
+
+Zuul's cross-repository dependencies (CRD) behave like a directed
+acyclic graph (DAG), like git itself, to indicate a one-way dependency
+relationship between changes in different git repositories.  Change A
+may depend on B, but B may not depend on A.
+
+To use them, include ``Depends-On: <gerrit-change-id>`` in the footer of
+a commit message.  Use the full Change-ID ('I' + 40 characters).
+
+
+Dependent Pipeline
+~~~~~~~~~~~~~~~~~~
+
+When Zuul sees CRD changes, it serializes them in the usual manner when
+enqueuing them into a pipeline.  This means that if change A depends on
+B, then when they are added to a dependent pipeline, B will appear first
+and A will follow:
+
+.. blockdiag::
+  :align: center
+
+  blockdiag crd {
+    orientation = portrait
+    span_width = 30
+    class greendot [
+        label = "",
+        shape = circle,
+        color = green,
+        width = 20, height = 20
+    ]
+
+    A_status [ class = greendot ]
+    B_status [ class = greendot ]
+    B_status -- A_status
+
+    'Change B\nChange-Id: Iabc' <- 'Change A\nDepends-On: Iabc'
+  }
+
+If tests for B fail, both B and A will be removed from the pipeline, and
+it will not be possible for A to merge until B does.
+
+
+.. note::
+
+   If changes with CRD do not share a change queue then Zuul is unable
+   to enqueue them together, and the first will be required to merge
+   before the second is enqueued.
+
+Independent Pipeline
+~~~~~~~~~~~~~~~~~~~~
+
+When changes are enqueued into an independent pipeline, all of the
+related dependencies (both normal git-dependencies that come from parent
+commits as well as CRD changes) appear in a dependency graph, as in a
+dependent pipeline. This means that even in an independent pipeline,
+your change will be tested with its dependencies.  So changes that were
+previously unable to be fully tested until a related change landed in a
+different repository may now be tested together from the start.
+
+All of the changes are still independent (so you will note that the
+whole pipeline does not share a graph as in a dependent pipeline), but
+for each change tested, all of its dependencies are visually connected
+to it, and they are used to construct the git references that Zuul uses
+when testing.
+
+When looking at this graph on the status page, you will note that the
+dependencies show up as grey dots, while the actual change tested shows
+up as red or green (depending on the jobs results):
+
+.. blockdiag::
+  :align: center
+
+  blockdiag crdgrey {
+    orientation = portrait
+    span_width = 30
+    class dot [
+        label = "",
+        shape = circle,
+        width = 20, height = 20
+    ]
+
+    A_status [class = "dot", color = green]
+    B_status [class = "dot", color = grey]
+    B_status -- A_status
+
+    "Change B" <- "Change A\nDepends-On: B"
+  }
+
+This is to indicate that the grey changes are only there to establish
+dependencies.  Even if one of the dependencies is also being tested, it
+will show up as a grey dot when used as a dependency, but separately and
+additionally will appear as its own red or green dot for its test.
+
+
+Multiple Changes
+~~~~~~~~~~~~~~~~
+
+A Gerrit change ID may refer to multiple changes (on multiple branches
+of the same project, or even multiple projects).  In these cases, Zuul
+will treat all of the changes with that change ID as dependencies.  So
+if you say that change in project A Depends-On a change ID that has
+changes in two branches of project B, then when testing the change to
+project A, both project B changes will be applied, and when deciding
+whether the project A change can merge, both changes must merge ahead
+of it.
+
+.. blockdiag::
+  :align: center
+
+  blockdiag crdmultirepos {
+    orientation = portrait
+    span_width = 30
+    class greendot [
+        label = "",
+        shape = circle,
+        color = green,
+        width = 20, height = 20
+    ]
+
+    B_stable_status [ class = "greendot" ]
+    B_master_status [ class = "greendot" ]
+    A_status [ class = "greendot" ]
+    B_stable_status -- B_master_status -- A_status
+
+    A [ label = "Repo A\nDepends-On: I123" ]
+    group {
+        orientation = portrait
+        label = "Dependencies"
+        color = "lightgray"
+
+        B_stable [ label = "Repo B\nChange-Id: I123\nBranch: stable" ]
+        B_master [ label = "Repo B\nChange-Id: I123\nBranch: master" ]
+    }
+    B_master <- A
+    B_stable <- A
+
+  }
+
+A change may depend on more than one Gerrit change ID as well.  So it
+is possible for a change in project A to depend on a change in project
+B and a change in project C.  Simply add more ``Depends-On:`` lines to
+the commit message footer.
+
+.. blockdiag::
+  :align: center
+
+  blockdiag crdmultichanges {
+    orientation = portrait
+    span_width = 30
+    class greendot [
+        label = "",
+        shape = circle,
+        color = green,
+        width = 20, height = 20
+    ]
+
+    C_status [ class = "greendot" ]
+    B_status [ class = "greendot" ]
+    A_status [ class = "greendot" ]
+    C_status -- B_status -- A_status
+
+    A [ label = "Repo A\nDepends-On: I123\nDepends-On: Iabc" ]
+    group {
+        orientation = portrait
+        label = "Dependencies"
+        color = "lightgray"
+
+        B [ label = "Repo B\nChange-Id: I123" ]
+        C [ label = "Repo C\nChange-Id: Iabc" ]
+    }
+    B, C <- A
+  }
+
+Cycles
+~~~~~~
+
+If a cycle is created by use of CRD, Zuul will abort its work very
+early.  There will be no message in Gerrit and no changes that are part
+of the cycle will be enqueued into any pipeline.  This is to protect
+Zuul from infinite loops.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index abe8089..3c793da 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -13,15 +13,17 @@
 .. toctree::
    :maxdepth: 2
 
+   quick-start
    gating
+   connections
    triggers
-   merger
-   launchers
    reporters
    zuul
-   client
+   merger
    cloner
+   launchers
    statsd
+   client
 
 Indices and tables
 ==================
diff --git a/doc/source/launchers.rst b/doc/source/launchers.rst
index b95354f..f368cb9 100644
--- a/doc/source/launchers.rst
+++ b/doc/source/launchers.rst
@@ -6,7 +6,7 @@
    https://wiki.jenkins-ci.org/display/JENKINS/Gearman+Plugin
 
 .. _`Turbo-Hipster`:
-   http://git.openstack.org/cgit/stackforge/turbo-hipster/
+   https://git.openstack.org/cgit/openstack/turbo-hipster/
 
 .. _`Turbo-Hipster Documentation`:
    http://turbo-hipster.rtfd.org/
@@ -66,6 +66,11 @@
 **LOG_PATH**
   zuul also suggests a unique path for logs to the worker. This is
   "BASE_LOG_PATH/pipeline-name/job-name/uuid"
+**ZUUL_VOTING**
+  Whether Zuul considers this job voting or not.  Note that if Zuul is
+  reconfigured during the run, the voting status of a job may change
+  and this value will be out of date.  Values are '1' if voting, '0'
+  otherwise.
 
 Change related parameters
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -234,7 +239,7 @@
 instead.  As an example, the OpenStack project uses the following
 script to prepare the workspace for its integration testing:
 
-  https://github.com/openstack-infra/devstack-gate/blob/master/devstack-vm-gate-wrap.sh
+  https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh
 
 Turbo Hipster Worker
 ~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/merger.rst b/doc/source/merger.rst
index e01bc8c..82e204b 100644
--- a/doc/source/merger.rst
+++ b/doc/source/merger.rst
@@ -58,3 +58,17 @@
 depending on what the state of Zuul's repository is when the clone
 happens).  They are, however, suitable for automated systems that
 respond to Zuul triggers.
+
+Clearing old references
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The references created under refs/zuul are not garbage collected. Since
+git fetch send them all to Gerrit to sync the repositories, the time
+spent on merge will slightly grow overtime and start being noticeable.
+
+To clean them you can use the ``tools/zuul-clear-refs.py`` script on
+each repositories. It will delete Zuul references that point to commits
+for which the commit date is older than a given amount of days (default
+360)::
+
+  ./tools/zuul-clear-refs.py /path/to/zuul/git/repo
diff --git a/doc/source/quick-start.rst b/doc/source/quick-start.rst
new file mode 100644
index 0000000..82779c6
--- /dev/null
+++ b/doc/source/quick-start.rst
@@ -0,0 +1,162 @@
+Quick Start Guide
+=================
+
+System Requirements
+-------------------
+
+For most deployments zuul only needs 1-2GB. OpenStack uses a 30GB setup.
+
+Install Zuul
+------------
+
+You can get zuul from pypi via::
+
+    pip install zuul
+
+Zuul Components
+---------------
+
+Zuul provides the following components:
+
+    - **zuul-server**: scheduler daemon which communicates with Gerrit and
+      Gearman. Handles receiving events, launching jobs, collecting results
+      and postingreports.
+    - **zuul-merger**: speculative-merger which communicates with Gearman.
+      Prepares Git repositories for jobs to test against. This additionally
+      requires a web server hosting the Git repositories which can be cloned
+      by the jobs.
+    - **zuul-cloner**: client side script used to setup job workspace. It is
+      used to clone the repositories prepared by the zuul-merger described
+      previously.
+    - **gearmand**: optional builtin gearman daemon provided by zuul-server
+
+External components:
+
+    - Jenkins Gearman plugin: Used by Jenkins to connect to Gearman
+
+Zuul Communication
+------------------
+
+All the Zuul components communicate with each other using Gearman. As well as
+the following communication channels:
+
+zuul-server:
+
+    - Gerrit
+    - Gearman Daemon
+
+zuul-merger:
+
+    - Gerrit
+    - Gearman Daemon
+
+zuul-cloner:
+
+    - http hosted zuul-merger git repos
+
+Jenkins:
+
+    - Gearman Daemon via Jenkins Gearman Plugin
+
+Zuul Setup
+----------
+
+At minimum we need to provide **zuul.conf** and **layout.yaml** and placed
+in /etc/zuul/ directory. You will also need a zuul user and ssh key for the
+zuul user in Gerrit. The following example uses the builtin gearmand service
+in zuul.
+
+**zuul.conf**::
+
+    [zuul]
+    layout_config=/etc/zuul/layout.yaml
+
+    [merger]
+    git_dir=/git
+    zuul_url=http://zuul.example.com/p
+
+    [gearman_server]
+    start=true
+
+    [gearman]
+    server=127.0.0.1
+
+    [connection gerrit]
+    driver=gerrit
+    server=git.example.com
+    port=29418
+    baseurl=https://git.example.com/gerrit/
+    user=zuul
+    sshkey=/home/zuul/.ssh/id_rsa
+
+See :doc:`zuul` for more details.
+
+The following sets up a basic timer triggered job using zuul.
+
+**layout.yaml**::
+
+    pipelines:
+      - name: periodic
+        source: gerrit
+        manager: IndependentPipelineManager
+        trigger:
+          timer:
+            - time: '0 * * * *'
+
+    projects:
+      - name: aproject
+        periodic:
+          - aproject-periodic-build
+
+Starting Zuul
+-------------
+
+You can run zuul-server with the **-d** option to make it not daemonize. It's
+a good idea at first to confirm there's no issues with your configuration.
+
+Simply run::
+
+    zuul-server
+
+Once run you should have 2 zuul-server processes::
+
+    zuul     12102     1  0 Jan21 ?        00:15:45 /home/zuul/zuulvenv/bin/python /home/zuul/zuulvenv/bin/zuul-server -d
+    zuul     12107 12102  0 Jan21 ?        00:00:01 /home/zuul/zuulvenv/bin/python /home/zuul/zuulvenv/bin/zuul-server -d
+
+Note: In this example zuul was installed in a virtualenv.
+
+The 2nd zuul-server process is gearmand running if you are using the builtin
+gearmand server, otherwise there will only be 1 process.
+
+Zuul won't actually process your Job queue however unless you also have a
+zuul-merger process running.
+
+Simply run::
+
+    zuul-merger
+
+Zuul should now be able to process your periodic job as configured above once
+the Jenkins side of things is configured.
+
+Jenkins Setup
+-------------
+
+Install the Jenkins Gearman Plugin via Jenkins Plugin management interface.
+Then naviage to **Manage > Configuration > Gearman** and setup the Jenkins
+server hostname/ip and port to connect to gearman.
+
+At this point gearman should be running your Jenkins jobs.
+
+Troubleshooting
+---------------
+
+Checking Gearman function registration (jobs). You can use telnet to connect
+to gearman to check that Jenkins is registering your configured jobs in
+gearman::
+
+    telnet <gearman_ip> 4730
+
+Useful commands are **workers** and **status** which you can run by just
+typing those commands once connected to gearman. Every job in your Jenkins
+master must appear when you run **workers** for Zuul to be able to run jobs
+against your Jenkins instance.
diff --git a/doc/source/reporters.rst b/doc/source/reporters.rst
index f94a439..97bed4a 100644
--- a/doc/source/reporters.rst
+++ b/doc/source/reporters.rst
@@ -25,17 +25,17 @@
 --verified 1`` and ``submit: true`` becomes ``gerrit review
 --submit``.
 
-Gerrit Configuration
-~~~~~~~~~~~~~~~~~~~~
-
-The configuration for posting back to Gerrit is shared with the Gerrit
-trigger in zuul.conf as described in :ref:`zuulconf`.
+A :ref:`connection` that uses the gerrit driver must be supplied to the
+trigger.
 
 SMTP
 ----
 
 A simple email reporter is also available.
 
+A :ref:`connection` that uses the smtp driver must be supplied to the
+trigger.
+
 SMTP Configuration
 ~~~~~~~~~~~~~~~~~~
 
@@ -48,13 +48,15 @@
   pipelines:
     - name: post-merge
       manager: IndependentPipelineManager
+      source: my_gerrit
       trigger:
-        - event: change-merged
+        my_gerrit:
+          - event: change-merged
       success:
-        smtp:
+        outgoing_smtp:
           to: you@example.com
       failure:
-        smtp:
+        internal_smtp:
           to: you@example.com
           from: alternative@example.com
           subject: Change {change} failed
diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst
index f789d61..b3bf99f 100644
--- a/doc/source/statsd.rst
+++ b/doc/source/statsd.rst
@@ -31,7 +31,7 @@
 
 The metrics are emitted by the Zuul scheduler (`zuul/scheduler.py`):
 
-**gerrit.events.<type> (counters)**
+**gerrit.event.<type> (counters)**
   Gerrit emits different kind of message over its `stream-events` interface. As
   a convenience, Zuul emits metrics to statsd which save you from having to use
   a different daemon to measure Gerrit events.
@@ -52,6 +52,18 @@
   Refer to your Gerrit installation documentation for an exhaustive list of
   Gerrit event types.
 
+**zuul.node_type.**
+  Holds metrics specifc to build nodes per label. The hierarchy is:
+
+    #. **<build node label>** each of the labels associated to a build in
+           Jenkins. It contains:
+
+      #. **job.<jobname>** subtree detailing per job statistics:
+
+        #. **wait_time** counter and timer of the wait time, with the
+                   difference of the job start time and the launch time, in
+                   milliseconds.
+
 **zuul.pipeline.**
   Holds metrics specific to jobs. The hierarchy is:
 
@@ -75,10 +87,13 @@
                known by Zuul (which includes build time and Zuul overhead).
       #. **total_changes** counter of the number of change proceeding since
                Zuul started.
+      #. **wait_time** counter and timer of the wait time, with the difference
+               of the job start time and the launch time, in milliseconds.
 
   Additionally, the `zuul.pipeline.<pipeline name>` hierarchy contains
-  `current_changes` and `resident_time` metrics for each projects. The slash
-  separator used in Gerrit name being replaced by dots.
+  `current_changes` (gauge), `resident_time` (timing) and `total_changes`
+  (counter) metrics for each projects. The slash separator used in Gerrit name
+  being replaced by dots.
 
   As an example, given a job named `myjob` triggered by the `gate` pipeline
   which took 40 seconds to build, the Zuul scheduler will emit the following
diff --git a/doc/source/triggers.rst b/doc/source/triggers.rst
index 5b745e6..263f280 100644
--- a/doc/source/triggers.rst
+++ b/doc/source/triggers.rst
@@ -15,36 +15,143 @@
 stream-events`` command over an SSH connection.  It also reports back
 to Gerrit using SSH.
 
-Gerrit Configuration
-~~~~~~~~~~~~~~~~~~~~
-
-Zuul will need access to a Gerrit user.  Consider naming the user
-*Jenkins* so that developers see that feedback from changes is from
-Jenkins (Zuul attempts to stay out of the way of developers, most
-shouldn't even need to know it's there).
-
-Create an SSH keypair for Zuul to use if there isn't one already, and
-create a Gerrit user with that key::
-
-  cat ~/id_rsa.pub | ssh -p29418 gerrit.example.com gerrit create-account --ssh-key - --full-name Jenkins jenkins
-
-Give that user whatever permissions will be needed on the projects you
-want Zuul to gate.  For instance, you may want to grant ``Verified
-+/-1`` and ``Submit`` to the user.  Additional categories or values may
-be added to Gerrit.  Zuul is very flexible and can take advantage of
-those.
-
 If using Gerrit 2.7 or later, make sure the user is a member of a group
 that is granted the ``Stream Events`` permission, otherwise it will not
 be able to invoke the ``gerrit stream-events`` command over SSH.
 
+A connection name with the gerrit driver can take multiple events with
+the following options.
+
+  **event**
+  The event name from gerrit.  Examples: ``patchset-created``,
+  ``comment-added``, ``ref-updated``.  This field is treated as a
+  regular expression.
+
+  **branch**
+  The branch associated with the event.  Example: ``master``.  This
+  field is treated as a regular expression, and multiple branches may
+  be listed.
+
+  **ref**
+  On ref-updated events, the branch parameter is not used, instead the
+  ref is provided.  Currently Gerrit has the somewhat idiosyncratic
+  behavior of specifying bare refs for branch names (e.g., ``master``),
+  but full ref names for other kinds of refs (e.g., ``refs/tags/foo``).
+  Zuul matches what you put here exactly against what Gerrit
+  provides.  This field is treated as a regular expression, and
+  multiple refs may be listed.
+
+  **ignore-deletes**
+  When a branch is deleted, a ref-updated event is emitted with a newrev
+  of all zeros specified. The ``ignore-deletes`` field is a boolean value
+  that describes whether or not these newrevs trigger ref-updated events.
+  The default is True, which will not trigger ref-updated events.
+
+  **approval**
+  This is only used for ``comment-added`` events.  It only matches if
+  the event has a matching approval associated with it.  Example:
+  ``code-review: 2`` matches a ``+2`` vote on the code review category.
+  Multiple approvals may be listed.
+
+  **email**
+  This is used for any event.  It takes a regex applied on the performer
+  email, i.e. Gerrit account email address.  If you want to specify
+  several email filters, you must use a YAML list.  Make sure to use non
+  greedy matchers and to escapes dots!
+  Example: ``email: ^.*?@example\.org$``.
+
+  **email_filter** (deprecated)
+  A deprecated alternate spelling of *email*.  Only one of *email* or
+  *email_filter* should be used.
+
+  **username**
+  This is used for any event.  It takes a regex applied on the performer
+  username, i.e. Gerrit account name.  If you want to specify several
+  username filters, you must use a YAML list.  Make sure to use non greedy
+  matchers and to escapes dots!
+  Example: ``username: ^jenkins$``.
+
+  **username_filter** (deprecated)
+  A deprecated alternate spelling of *username*.  Only one of *username* or
+  *username_filter* should be used.
+
+  **comment**
+  This is only used for ``comment-added`` events.  It accepts a list of
+  regexes that are searched for in the comment string. If any of these
+  regexes matches a portion of the comment string the trigger is
+  matched. ``comment: retrigger`` will match when comments
+  containing 'retrigger' somewhere in the comment text are added to a
+  change.
+
+  **comment_filter** (deprecated)
+  A deprecated alternate spelling of *comment*.  Only one of *comment* or
+  *comment_filter* should be used.
+
+  *require-approval*
+  This may be used for any event.  It requires that a certain kind
+  of approval be present for the current patchset of the change (the
+  approval could be added by the event in question).  It follows the
+  same syntax as the :ref:`"approval" pipeline requirement
+  <pipeline-require-approval>`. For each specified criteria there must
+  exist a matching approval.
+
+  *reject-approval*
+  This takes a list of approvals in the same format as
+  *require-approval* but will fail to enter the pipeline if there is
+  a matching approval.
+
+
 Timer
 -----
 
 A simple timer trigger is available as well.  It supports triggering
 jobs in a pipeline based on cron-style time instructions.
 
+Timers don't require a special connection or driver. Instead they can
+be used by listing **timer** as the trigger.
+
+This trigger will run based on a cron-style time specification.
+It will enqueue an event into its pipeline for every project
+defined in the configuration.  Any job associated with the
+pipeline will run in response to that event.
+
+  **time**
+  The time specification in cron syntax.  Only the 5 part syntax is
+  supported, not the symbolic names.  Example: ``0 0 * * *`` runs
+  at midnight.
+
 Zuul
 ----
 
 The Zuul trigger generates events based on internal actions in Zuul.
+Multiple events may be listed.
+
+Zuul events don't require a special connection or driver. Instead they
+can be used by listing **zuul** as the trigger.
+
+  **event**
+  The event name.  Currently supported:
+
+    *project-change-merged* when Zuul merges a change to a project,
+    it generates this event for every open change in the project.
+
+    *parent-change-enqueued* when Zuul enqueues a change into any
+    pipeline, it generates this event for every child of that
+    change.
+
+  **pipeline**
+  Only available for ``parent-change-enqueued`` events.  This is the
+  name of the pipeline in which the parent change was enqueued.
+
+  *require-approval*
+  This may be used for any event.  It requires that a certain kind
+  of approval be present for the current patchset of the change (the
+  approval could be added by the event in question).  It follows the
+  same syntax as the :ref:`"approval" pipeline requirement
+  <pipeline-require-approval>`. For each specified criteria there must
+  exist a matching approval.
+
+  *reject-approval*
+  This takes a list of approvals in the same format as
+  *require-approval* but will fail to enter the pipeline if there is
+  a matching approval.
\ No newline at end of file
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
index bcdfabb..2285ecb 100644
--- a/doc/source/zuul.rst
+++ b/doc/source/zuul.rst
@@ -10,11 +10,11 @@
 
 **zuul.conf**
   Connection information for Gerrit and Gearman, locations of the
-  other config files.
+  other config files. (required)
 **layout.yaml**
-  Project and pipeline configuration -- what Zuul does.
+  Project and pipeline configuration -- what Zuul does. (required)
 **logging.conf**
-    Python logging config.
+    Python logging config. (optional)
 
 Examples of each of the three files can be found in the etc/ directory
 of the source distribution.
@@ -41,17 +41,28 @@
 gearman
 """""""
 
+Client connection information for gearman. If using Zuul's builtin gearmand
+server just set **server** to 127.0.0.1.
+
 **server**
   Hostname or IP address of the Gearman server.
-  ``server=gearman.example.com``
+  ``server=gearman.example.com`` (required)
 
 **port**
   Port on which the Gearman server is listening.
-  ``port=4730``
+  ``port=4730`` (optional)
+
+**check_job_registration**
+  Check to see if job is registered with Gearman or not. When True
+  a build result of NOT_REGISTERED will be return if job is not found.
+  ``check_job_registration=True``
 
 gearman_server
 """"""""""""""
 
+The builtin gearman server. Zuul can fork a gearman process from itself rather
+than connecting to an external one.
+
 **start**
   Whether to start the internal Gearman server (default: False).
   ``start=true``
@@ -64,32 +75,25 @@
   Path to log config file for internal Gearman server.
   ``log_config=/etc/zuul/gearman-logging.yaml``
 
-gerrit
+webapp
 """"""
 
-**server**
-  FQDN of Gerrit server.
-  ``server=review.example.com``
+**listen_address**
+  IP address or domain name on which to listen (default: 0.0.0.0).
+  ``listen_address=127.0.0.1``
 
 **port**
-  Optional: Gerrit server port.
-  ``port=29418``
-
-**baseurl**
-  Optional: path to Gerrit web interface. Defaults to ``https://<value
-  of server>/``. ``baseurl=https://review.example.com/review_site/``
-
-**user**
-  User name to use when logging into above server via ssh.
-  ``user=zuul``
-
-**sshkey**
-  Path to SSH key to use when logging into above server.
-  ``sshkey=/home/zuul/.ssh/id_rsa``
+  Port on which the webapp is listening (default: 8001).
+  ``port=8008``
 
 zuul
 """"
 
+Zuul's main configuration section. At minimum zuul must be able to find
+layout.yaml to be useful.
+
+.. note:: Must be provided when running zuul-server
+
 .. _layout_config:
 
 **layout_config**
@@ -141,6 +145,13 @@
 merger
 """"""
 
+The zuul-merger process configuration. Detailed documentation on this process
+can be found on the :doc:`merger` page.
+
+.. note:: Must be provided when running zuul-merger. Both services may share the
+          same configuration (and even host) or otherwise have an individual
+          zuul.conf.
+
 **git_dir**
   Directory that Zuul should clone local git repositories to.
   ``git_dir=/var/lib/zuul/git``
@@ -166,27 +177,6 @@
   Path to PID lock file for the merger process.
   ``pidfile=/var/run/zuul-merger/merger.pid``
 
-smtp
-""""
-
-**server**
-  SMTP server hostname or address to use.
-  ``server=localhost``
-
-**port**
-  Optional: SMTP server port.
-  ``port=25``
-
-**default_from**
-  Who the email should appear to be sent from when emailing the report.
-  This can be overridden by individual pipelines.
-  ``default_from=zuul@example.com``
-
-**default_to**
-  Who the report should be emailed to by default.
-  This can be overridden by individual pipelines.
-  ``default_to=you@example.com``
-
 .. _swift:
 
 swift
@@ -257,6 +247,17 @@
   url and the object path.
   ``For example: http://logs.example.org/server.app?obj=``
 
+.. _connection:
+
+connection ArbitraryName
+""""""""""""""""""""""""
+
+A connection can be listed with any arbitrary name. The required
+parameters are specified in the :ref:`connections` documentation
+depending on what driver you are using.
+
+.. _layoutyaml:
+
 layout.yaml
 ~~~~~~~~~~~
 
@@ -303,14 +304,16 @@
 
   - name: check
     manager: IndependentPipelineManager
-    source: gerrit
+    source: my_gerrit
     trigger:
-      gerrit:
+      my_gerrit:
         - event: patchset-created
     success:
-      verified: 1
+      my_gerrit:
+        verified: 1
     failure:
-      verified: -1
+      my_gerrit
+        verified: -1
 
 **name**
   This is used later in the project definition to indicate what jobs
@@ -321,9 +324,11 @@
   description of the pipeline.
 
 **source**
-  A required field that specifies a trigger that provides access to
-  the change objects that this pipeline operates on.  Currently only
-  the value ``gerrit`` is supported.
+  A required field that specifies a connection that provides access to
+  the change objects that this pipeline operates on. The name of the
+  connection as per the zuul.conf should be specified. The driver used
+  for the connection named will be the source. Currently only ``gerrit``
+  drivers are supported.
 
 **success-message**
   An optional field that supplies the introductory text in message
@@ -400,115 +405,11 @@
   At least one trigger source must be supplied for each pipeline.
   Triggers are not exclusive -- matching events may be placed in
   multiple pipelines, and they will behave independently in each of
-  the pipelines they match.  You may select from the following:
+  the pipelines they match.
 
-  **gerrit**
-    This describes what Gerrit events should be placed in the
-    pipeline.  Multiple gerrit triggers may be listed.  Further
-    parameters describe the kind of events that match:
-
-    *event*
-    The event name from gerrit.  Examples: ``patchset-created``,
-    ``comment-added``, ``ref-updated``.  This field is treated as a
-    regular expression.
-
-    *branch*
-    The branch associated with the event.  Example: ``master``.  This
-    field is treated as a regular expression, and multiple branches may
-    be listed.
-
-    *ref*
-    On ref-updated events, the branch parameter is not used, instead the
-    ref is provided.  Currently Gerrit has the somewhat idiosyncratic
-    behavior of specifying bare refs for branch names (e.g., ``master``),
-    but full ref names for other kinds of refs (e.g., ``refs/tags/foo``).
-    Zuul matches what you put here exactly against what Gerrit
-    provides.  This field is treated as a regular expression, and
-    multiple refs may be listed.
-
-    *approval*
-    This is only used for ``comment-added`` events.  It only matches if
-    the event has a matching approval associated with it.  Example:
-    ``code-review: 2`` matches a ``+2`` vote on the code review category.
-    Multiple approvals may be listed.
-
-    *email*
-    This is used for any event.  It takes a regex applied on the performer
-    email, i.e. Gerrit account email address.  If you want to specify
-    several email filters, you must use a YAML list.  Make sure to use non
-    greedy matchers and to escapes dots!
-    Example: ``email: ^.*?@example\.org$``.
-
-    *email_filter* (deprecated)
-    A deprecated alternate spelling of *email*.  Only one of *email* or
-    *email_filter* should be used.
-
-    *username*
-    This is used for any event.  It takes a regex applied on the performer
-    username, i.e. Gerrit account name.  If you want to specify several
-    username filters, you must use a YAML list.  Make sure to use non greedy
-    matchers and to escapes dots!
-    Example: ``username: ^jenkins$``.
-
-    *username_filter* (deprecated)
-    A deprecated alternate spelling of *username*.  Only one of *username* or
-    *username_filter* should be used.
-
-    *comment*
-    This is only used for ``comment-added`` events.  It accepts a list of
-    regexes that are searched for in the comment string. If any of these
-    regexes matches a portion of the comment string the trigger is
-    matched. ``comment: retrigger`` will match when comments
-    containing 'retrigger' somewhere in the comment text are added to a
-    change.
-
-    *comment_filter* (deprecated)
-    A deprecated alternate spelling of *comment*.  Only one of *comment* or
-    *comment_filter* should be used.
-
-    *require-approval*
-    This may be used for any event.  It requires that a certain kind
-    of approval be present for the current patchset of the change (the
-    approval could be added by the event in question).  It follows the
-    same syntax as the :ref:`"approval" pipeline requirement below
-    <pipeline-require-approval>`.
-
-  **timer**
-    This trigger will run based on a cron-style time specification.
-    It will enqueue an event into its pipeline for every project
-    defined in the configuration.  Any job associated with the
-    pipeline will run in response to that event.
-
-    *time*
-    The time specification in cron syntax.  Only the 5 part syntax is
-    supported, not the symbolic names.  Example: ``0 0 * * *`` runs
-    at midnight.
-
-  **zuul**
-    This trigger supplies events generated internally by Zuul.
-    Multiple events may be listed.
-
-    *event*
-    The event name.  Currently supported:
-
-      *project-change-merged* when Zuul merges a change to a project,
-      it generates this event for every open change in the project.
-
-      *parent-change-enqueued* when Zuul enqueues a change into any
-      pipeline, it generates this event for every child of that
-      change.
-
-    *pipeline*
-    Only available for ``parent-change-enqueued`` events.  This is the
-    name of the pipeline in which the parent change was enqueued.
-
-    *require-approval*
-    This may be used for any event.  It requires that a certain kind
-    of approval be present for the current patchset of the change (the
-    approval could be added by the event in question).  It follows the
-    same syntax as the :ref:`"approval" pipeline requirement below
-    <pipeline-require-approval>`.
-
+  Triggers are loaded from their connection name. The driver type of
+  the connection will dictate which options are available.
+  See :doc:`triggers`.
 
 **require**
   If this section is present, it established pre-requisites for any
@@ -527,11 +428,12 @@
   approval matching all specified requirements.
 
     *username*
-    If present, an approval from this username is required.
+    If present, an approval from this username is required.  It is
+    treated as a regular expression.
 
     *email*
     If present, an approval with this email address is required.  It
-    is treated as a regular expression as above.
+    is treated as a regular expression.
 
     *email-filter* (deprecated)
     A deprecated alternate spelling of *email*.  Only one of *email* or
@@ -566,6 +468,23 @@
   reported by the trigger.  For example, when using the Gerrit
   trigger, status values such as ``NEW`` or ``MERGED`` may be useful.
 
+**reject**
+  If this section is present, it establishes pre-requisites that can
+  block an item from being enqueued. It can be considered a negative
+  version of **require**.
+
+  **approval**
+  This takes a list of approvals. If an approval matches the provided
+  criteria the change can not be entered into the pipeline. It follows
+  the same syntax as the :ref:`"require approval" pipeline above
+  <pipeline-require-approval>`.
+
+  Example to reject a change with any negative vote::
+
+    reject:
+      approval:
+        - code-review: [-1, -2]
+
 **dequeue-on-new-patchset**
   Normally, if a new patchset is uploaded to a change that is in a
   pipeline, the existing entry in the pipeline will be removed (with
@@ -589,8 +508,9 @@
   do nothing on success; it will not even report a message to Gerrit.
   If the section is present, the listed reporter plugins will be
   asked to report on the jobs.
-  Each reporter's value dictionary is handled by the reporter. See
-  reporters for more details.
+  The reporters are listed by their connection name. The options
+  available depend on the driver for the supplied connection.
+  See :doc:`reporters` for more details.
 
 **failure**
   Uses the same syntax as **success**, but describes what Zuul should
@@ -607,6 +527,18 @@
   do when a change is added to the pipeline manager.  This can be used,
   for example, to reset the value of the Verified review category.
 
+**disabled**
+  Uses the same syntax as **success**, but describes what Zuul should
+  do when a pipeline is disabled.
+  See ``disable-after-consecutive-failures``.
+
+**disable-after-consecutive-failures**
+  If set, a pipeline can enter a ''disabled'' state if too many changes
+  in a row fail. When this value is exceeded the pipeline will stop
+  reporting to any of the ``success``, ``failure`` or ``merge-failure``
+  reporters and instead only report to the ``disabled`` reporters.
+  (No ``start`` reports are made when a pipeline is disabled).
+
 **precedence**
   Indicates how the build scheduler should prioritize jobs for
   different pipelines.  Each pipeline may have one precedence, jobs
@@ -665,12 +597,13 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      - event: patchset-created
+      my_gerrit:
+        - event: patchset-created
     success:
-      gerrit:
+      my_gerrit:
         verified: 1
     failure:
-      gerrit:
+      my_gerrit:
         verified: -1
 
 This will trigger jobs each time a new patchset (or change) is
@@ -680,15 +613,16 @@
   - name: gate
     manager: DependentPipelineManager
     trigger:
-      - event: comment-added
-        approval:
-          - approved: 1
+      my_gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
     success:
-      gerrit:
+      my_gerrit:
         verified: 2
         submit: true
     failure:
-      gerrit:
+      my_gerrit:
         verified: -2
 
 This will trigger jobs whenever a reviewer leaves a vote of ``1`` in the
@@ -701,8 +635,9 @@
   - name: post
     manager: IndependentPipelineManager
     trigger:
-      - event: ref-updated
-        ref: ^(?!refs/).*$
+      my_gerrit:
+        - event: ref-updated
+          ref: ^(?!refs/).*$
 
 This will trigger jobs whenever a change is merged to a named branch
 (e.g., ``master``).  No output will be reported to Gerrit.  This is
@@ -711,7 +646,8 @@
   - name: silent
     manager: IndependentPipelineManager
     trigger:
-      - event: patchset-created
+      my_gerrit:
+        - event: patchset-created
 
 This also triggers jobs when changes are uploaded to Gerrit, but no
 results are reported to Gerrit.  This is useful for jobs that are in
@@ -721,12 +657,13 @@
     - name: post-merge
       manager: IndependentPipelineManager
       trigger:
-        - event: change-merged
+        my_gerrit:
+          - event: change-merged
       success:
-        gerrit:
+        my_gerrit:
           force-message: True
       failure:
-        gerrit:
+        my_gerrit:
           force-message: True
 
 The ``change-merged`` events happen when a change has been merged in the git
@@ -802,6 +739,11 @@
   would largely defeat the parallelization of dependent change testing
   that is the main feature of Zuul.  Default: ``false``.
 
+**mutex (optional)**
+  This is a string that names a mutex that should be observed by this
+  job.  Only one build of any job that references the same named mutex
+  will be enqueued at a time.  This applies across all pipelines.
+
 **branch (optional)**
   This job should only be run on matching branches.  This field is
   treated as a regular expression and multiple branches may be
@@ -852,12 +794,21 @@
     expressions.
 
     The pattern for '/COMMIT_MSG' is always matched on and does not
-    have to be included.
+    have to be included. Exception is merge commits (without modified
+    files), in this case '/COMMIT_MSG' is not matched, and job is not
+    skipped. In case of merge commits it's assumed that list of modified
+    files isn't predictible and CI should be run.
 
 **voting (optional)**
   Boolean value (``true`` or ``false``) that indicates whatever
   a job is voting or not.  Default: ``true``.
 
+**tags (optional)**
+  A list of arbitrary strings which will be associated with the job.
+  Can be used by the parameter-function to alter behavior based on
+  their presence on a job.  If the job name is a regular expression,
+  tags will accumulate on jobs that match.
+
 **parameter-function (optional)**
   Specifies a function that should be applied to the parameters before
   the job is launched.  The function should be defined in a python file
@@ -1084,9 +1035,8 @@
 
 If you send signal 1 (SIGHUP) to the zuul-server process, Zuul will
 stop executing new jobs, wait until all executing jobs are finished,
-reload its configuration, and resume.  Any values in any of the
-configuration files may be changed, except the location of Zuul's PID
-file (a change to that will be ignored until Zuul is restarted).
+reload its layout.yaml, and resume. Changes to any connections or
+the PID  file will be ignored until Zuul is restarted.
 
 If you send a SIGUSR1 to the zuul-server process, Zuul will stop
 executing new jobs, wait until all executing jobs are finished,
diff --git a/etc/layout.yaml-sample b/etc/layout.yaml-sample
index 30a3352..53f6ba1 100644
--- a/etc/layout.yaml-sample
+++ b/etc/layout.yaml-sample
@@ -30,6 +30,7 @@
       gerrit:
         - event: ref-updated
           ref: ^(?!refs/).*$
+          ignore-deletes: False
 
   - name: gate
     manager: DependentPipelineManager
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index 0ca2718..9df44ce 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -145,6 +145,9 @@
                     case 'unstable':
                         $status.addClass('label-warning');
                         break;
+                    case 'skipped':
+                        $status.addClass('label-info');
+                        break;
                     case 'in progress':
                     case 'queued':
                     case 'lost':
@@ -487,10 +490,12 @@
                 $header_div.append($heading);
 
                 if (typeof pipeline.description === 'string') {
+                    var descr = $('<small />')
+                    $.each( pipeline.description.split(/\r?\n\r?\n/), function(index, descr_part){
+                        descr.append($('<p />').text(descr_part));
+                    });
                     $header_div.append(
-                        $('<p />').append(
-                            $('<small />').text(pipeline.description)
-                        )
+                        $('<p />').append(descr)
                     );
                 }
                 return $header_div;
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index ac8021b..d7b8eae 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -4,12 +4,6 @@
 [gearman_server]
 start=true
 
-[gerrit]
-server=review.example.com
-;baseurl=https://review.example.com/r
-user=jenkins
-sshkey=/home/jenkins/.ssh/id_rsa
-
 [zuul]
 layout_config=/etc/zuul/layout.yaml
 log_config=/etc/zuul/logging.conf
@@ -32,8 +26,20 @@
 region_name=EXP
 logserver_prefix=http://logs.example.org/server.app/
 
-[smtp]
+[webapp]
+listen_address=0.0.0.0
+port=8001
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+;baseurl=https://review.example.com/r
+user=jenkins
+sshkey=/home/jenkins/.ssh/id_rsa
+
+[connection smtp]
+driver=smtp
 server=localhost
 port=25
 default_from=zuul@example.com
-default_to=you@example.com
\ No newline at end of file
+default_to=you@example.com
diff --git a/other-requirements.txt b/other-requirements.txt
new file mode 100644
index 0000000..1ade655
--- /dev/null
+++ b/other-requirements.txt
@@ -0,0 +1,4 @@
+mysql-client [test]
+mysql-server [test]
+postgresql [test]
+postgresql-client [test]
diff --git a/requirements.txt b/requirements.txt
index c682999..77ac0a5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,18 +1,17 @@
-pbr>=0.5.21,<1.0
+pbr>=1.1.0
 
-argparse
 PyYAML>=3.1.0
 Paste
-WebOb>=1.2.3,<1.3
-paramiko>=1.8.0
+WebOb>=1.2.3
+paramiko>=1.8.0,<2.0.0
 GitPython>=0.3.3
 ordereddict
-python-daemon>=2.0.4
+python-daemon>=2.0.4,<2.1.0
 extras
 statsd>=1.0.0,<3.0
 voluptuous>=0.7
 gear>=0.5.7,<1.0.0
-apscheduler>=2.1.1,<3.0
+apscheduler>=3.0
 PrettyTable>=0.6,<0.8
 babel>=1.0
 six>=1.6.0
diff --git a/setup.cfg b/setup.cfg
index 620e1ac..7ddeb84 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -25,6 +25,7 @@
     zuul-merger = zuul.cmd.merger:main
     zuul = zuul.cmd.client:main
     zuul-cloner = zuul.cmd.cloner:main
+    zuul-launcher = zuul.cmd.launcher:main
 
 [build_sphinx]
 source-dir = doc/source
diff --git a/test-requirements.txt b/test-requirements.txt
index c68b2db..88223b0 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
 
 coverage>=3.6
 sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
-sphinxcontrib-blockdiag>=0.5.5
+sphinxcontrib-blockdiag>=1.1.0
 discover
 fixtures>=0.3.14
 python-keystoneclient>=0.4.2
@@ -11,3 +11,4 @@
 testrepository>=0.0.17
 testtools>=0.9.32
 sphinxcontrib-programoutput
+mock
diff --git a/tests/base.py b/tests/base.py
index 5ddb160..38d2817 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -22,26 +22,28 @@
 import os
 import pprint
 from six.moves import queue as Queue
+from six.moves import urllib
 import random
 import re
 import select
 import shutil
+from six.moves import reload_module
 import socket
 import string
 import subprocess
 import swiftclient
 import threading
 import time
-import urllib2
 
 import git
 import gear
 import fixtures
-import six.moves.urllib.parse as urlparse
 import statsd
 import testtools
 from git import GitCommandError
 
+import zuul.connection.gerrit
+import zuul.connection.smtp
 import zuul.scheduler
 import zuul.webapp
 import zuul.rpclistener
@@ -52,6 +54,7 @@
 import zuul.merger.server
 import zuul.reporter.gerrit
 import zuul.reporter.smtp
+import zuul.source.gerrit
 import zuul.trigger.gerrit
 import zuul.trigger.timer
 import zuul.trigger.zuultrigger
@@ -378,20 +381,20 @@
         self.reported += 1
 
 
-class FakeGerrit(object):
-    log = logging.getLogger("zuul.test.FakeGerrit")
+class FakeGerritConnection(zuul.connection.gerrit.GerritConnection):
+    log = logging.getLogger("zuul.test.FakeGerritConnection")
 
-    def __init__(self, hostname, username, port=29418, keyfile=None,
-                 changes_dbs={}):
-        self.hostname = hostname
-        self.username = username
-        self.port = port
-        self.keyfile = keyfile
-        self.event_queue = Queue.Queue()
+    def __init__(self, connection_name, connection_config,
+                 changes_db=None, queues_db=None, upstream_root=None):
+        super(FakeGerritConnection, self).__init__(connection_name,
+                                                   connection_config)
+
+        self.event_queue = queues_db
         self.fixture_dir = os.path.join(FIXTURE_DIR, 'gerrit')
         self.change_number = 0
-        self.changes = changes_dbs.get(hostname, {})
+        self.changes = changes_db
         self.queries = []
+        self.upstream_root = upstream_root
 
     def addFakeChange(self, project, branch, subject, status='NEW'):
         self.change_number += 1
@@ -401,15 +404,6 @@
         self.changes[self.change_number] = c
         return c
 
-    def addEvent(self, data):
-        return self.event_queue.put((time.time(), data))
-
-    def getEvent(self):
-        return self.event_queue.get()
-
-    def eventDone(self):
-        self.event_queue.task_done()
-
     def review(self, project, changeid, message, action):
         number, ps = changeid.split(',')
         change = self.changes[int(number)]
@@ -426,11 +420,11 @@
 
         for cat in ['CRVW', 'VRFY', 'APRV']:
             if cat in action:
-                change.addApproval(cat, action[cat], username=self.username)
+                change.addApproval(cat, action[cat], username=self.user)
 
         if 'label' in action:
             parts = action['label'].split('=')
-            change.addApproval(parts[0], parts[2], username=self.username)
+            change.addApproval(parts[0], parts[2], username=self.user)
 
         change.messages.append(message)
 
@@ -463,9 +457,12 @@
             l = [change.query() for change in self.changes.values()]
         return l
 
-    def startWatching(self, *args, **kw):
+    def _start_watcher_thread(self, *args, **kw):
         pass
 
+    def getGitUrl(self, project):
+        return os.path.join(self.upstream_root, project.name)
+
 
 class BuildHistory(object):
     def __init__(self, **kw):
@@ -477,13 +474,12 @@
 
 
 class FakeURLOpener(object):
-    def __init__(self, upstream_root, fake_gerrit, url):
+    def __init__(self, upstream_root, url):
         self.upstream_root = upstream_root
-        self.fake_gerrit = fake_gerrit
         self.url = url
 
     def read(self):
-        res = urlparse.urlparse(self.url)
+        res = urllib.parse.urlparse(self.url)
         path = res.path
         project = '/'.join(path.split('/')[2:-2])
         ret = '001e# service=git-upload-pack\n'
@@ -499,18 +495,6 @@
         return ret
 
 
-class FakeGerritTrigger(zuul.trigger.gerrit.Gerrit):
-    name = 'gerrit'
-
-    def __init__(self, upstream_root, *args):
-        super(FakeGerritTrigger, self).__init__(*args)
-        self.upstream_root = upstream_root
-        self.gerrit_connector.delay = 0.0
-
-    def getGitUrl(self, project):
-        return os.path.join(self.upstream_root, project.name)
-
-
 class FakeStatsd(threading.Thread):
     def __init__(self):
         threading.Thread.__init__(self)
@@ -624,6 +608,8 @@
             result = 'RUN_ERROR'
         else:
             data['result'] = result
+            data['node_labels'] = ['bare-necessities']
+            data['node_name'] = 'foo'
             work_fail = False
 
         changes = None
@@ -634,6 +620,7 @@
             BuildHistory(name=self.name, number=self.number,
                          result=result, changes=changes, node=self.node,
                          uuid=self.unique, description=self.description,
+                         parameters=self.parameters,
                          pipeline=self.parameters['ZUUL_PIPELINE'])
         )
 
@@ -875,6 +862,28 @@
                 format='%(asctime)s %(name)-32s '
                 '%(levelname)-8s %(message)s'))
 
+            # NOTE(notmorgan): Extract logging overrides for specific libraries
+            # from the OS_LOG_DEFAULTS env and create FakeLogger fixtures for
+            # each. This is used to limit the output during test runs from
+            # libraries that zuul depends on such as gear.
+            log_defaults_from_env = os.environ.get('OS_LOG_DEFAULTS')
+
+            if log_defaults_from_env:
+                for default in log_defaults_from_env.split(','):
+                    try:
+                        name, level_str = default.split('=', 1)
+                        level = getattr(logging, level_str, logging.DEBUG)
+                        self.useFixture(fixtures.FakeLogger(
+                            name=name,
+                            level=level,
+                            format='%(asctime)s %(name)-32s '
+                                   '%(levelname)-8s %(message)s'))
+                    except ValueError:
+                        # NOTE(notmorgan): Invalid format of the log default,
+                        # skip and don't try and apply a logger for the
+                        # specified module
+                        pass
+
 
 class ZuulTestCase(BaseTestCase):
 
@@ -889,18 +898,21 @@
         self.test_root = os.path.join(tmp_root, "zuul-test")
         self.upstream_root = os.path.join(self.test_root, "upstream")
         self.git_root = os.path.join(self.test_root, "git")
+        self.state_root = os.path.join(self.test_root, "lib")
 
         if os.path.exists(self.test_root):
             shutil.rmtree(self.test_root)
         os.makedirs(self.test_root)
         os.makedirs(self.upstream_root)
-        os.makedirs(self.git_root)
+        os.makedirs(self.state_root)
 
         # Make per test copy of Configuration.
         self.setup_config()
         self.config.set('zuul', 'layout_config',
-                        os.path.join(FIXTURE_DIR, "layout.yaml"))
+                        os.path.join(FIXTURE_DIR,
+                                     self.config.get('zuul', 'layout_config')))
         self.config.set('merger', 'git_dir', self.git_root)
+        self.config.set('zuul', 'state_dir', self.state_root)
 
         # For each project in config:
         self.init_repo("org/project")
@@ -921,12 +933,14 @@
         self.init_repo("org/no-jobs-project")
 
         self.statsd = FakeStatsd()
-        os.environ['STATSD_HOST'] = 'localhost'
+        # note, use 127.0.0.1 rather than localhost to avoid getting ipv6
+        # see: https://github.com/jsocol/pystatsd/issues/61
+        os.environ['STATSD_HOST'] = '127.0.0.1'
         os.environ['STATSD_PORT'] = str(self.statsd.port)
         self.statsd.start()
         # the statsd client object is configured in the statsd module import
-        reload(statsd)
-        reload(zuul.scheduler)
+        reload_module(statsd)
+        reload_module(zuul.scheduler)
 
         self.gearman_server = FakeGearmanServer()
 
@@ -936,76 +950,47 @@
         self.worker.addServer('127.0.0.1', self.gearman_server.port)
         self.gearman_server.worker = self.worker
 
-        self.merge_server = zuul.merger.server.MergeServer(self.config)
-        self.merge_server.start()
+        zuul.source.gerrit.GerritSource.replication_timeout = 1.5
+        zuul.source.gerrit.GerritSource.replication_retry_interval = 0.5
+        zuul.connection.gerrit.GerritEventConnector.delay = 0.0
 
-        self.sched = zuul.scheduler.Scheduler()
+        self.sched = zuul.scheduler.Scheduler(self.config)
 
         self.useFixture(fixtures.MonkeyPatch('swiftclient.client.Connection',
                                              FakeSwiftClientConnection))
         self.swift = zuul.lib.swift.Swift(self.config)
 
+        self.event_queues = [
+            self.sched.result_event_queue,
+            self.sched.trigger_event_queue
+        ]
+
+        self.configure_connections()
+        self.sched.registerConnections(self.connections)
+
         def URLOpenerFactory(*args, **kw):
-            if isinstance(args[0], urllib2.Request):
+            if isinstance(args[0], urllib.request.Request):
                 return old_urlopen(*args, **kw)
-            args = [self.fake_gerrit] + list(args)
             return FakeURLOpener(self.upstream_root, *args, **kw)
 
-        old_urlopen = urllib2.urlopen
-        urllib2.urlopen = URLOpenerFactory
+        old_urlopen = urllib.request.urlopen
+        urllib.request.urlopen = URLOpenerFactory
+
+        self.merge_server = zuul.merger.server.MergeServer(self.config,
+                                                           self.connections)
+        self.merge_server.start()
 
         self.launcher = zuul.launcher.gearman.Gearman(self.config, self.sched,
                                                       self.swift)
         self.merge_client = zuul.merger.client.MergeClient(
             self.config, self.sched)
 
-        self.smtp_messages = []
-
-        def FakeSMTPFactory(*args, **kw):
-            args = [self.smtp_messages] + list(args)
-            return FakeSMTP(*args, **kw)
-
-        # Set a changes database so multiple FakeGerrit's can report back to
-        # a virtual canonical database given by the configured hostname
-        self.gerrit_changes_dbs = {
-            self.config.get('gerrit', 'server'): {}
-        }
-
-        def FakeGerritFactory(*args, **kw):
-            kw['changes_dbs'] = self.gerrit_changes_dbs
-            return FakeGerrit(*args, **kw)
-
-        self.useFixture(fixtures.MonkeyPatch('zuul.lib.gerrit.Gerrit',
-                                             FakeGerritFactory))
-
-        self.useFixture(fixtures.MonkeyPatch('smtplib.SMTP', FakeSMTPFactory))
-
-        self.gerrit = FakeGerritTrigger(
-            self.upstream_root, self.config, self.sched)
-        self.gerrit.replication_timeout = 1.5
-        self.gerrit.replication_retry_interval = 0.5
-        self.fake_gerrit = self.gerrit.gerrit
-        self.fake_gerrit.upstream_root = self.upstream_root
-
-        self.webapp = zuul.webapp.WebApp(self.sched, port=0)
-        self.rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
-
         self.sched.setLauncher(self.launcher)
         self.sched.setMerger(self.merge_client)
-        self.sched.registerTrigger(self.gerrit)
-        self.timer = zuul.trigger.timer.Timer(self.config, self.sched)
-        self.sched.registerTrigger(self.timer)
-        self.zuultrigger = zuul.trigger.zuultrigger.ZuulTrigger(self.config,
-                                                                self.sched)
-        self.sched.registerTrigger(self.zuultrigger)
 
-        self.sched.registerReporter(
-            zuul.reporter.gerrit.Reporter(self.gerrit))
-        self.smtp_reporter = zuul.reporter.smtp.Reporter(
-            self.config.get('smtp', 'default_from'),
-            self.config.get('smtp', 'default_to'),
-            self.config.get('smtp', 'server'))
-        self.sched.registerReporter(self.smtp_reporter)
+        self.webapp = zuul.webapp.WebApp(
+            self.sched, port=0, listen_address='127.0.0.1')
+        self.rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
 
         self.sched.start()
         self.sched.reconfigure(self.config)
@@ -1020,10 +1005,80 @@
         self.addCleanup(self.assertFinalState)
         self.addCleanup(self.shutdown)
 
-    def setup_config(self):
+    def configure_connections(self):
+        # Register connections from the config
+        self.smtp_messages = []
+
+        def FakeSMTPFactory(*args, **kw):
+            args = [self.smtp_messages] + list(args)
+            return FakeSMTP(*args, **kw)
+
+        self.useFixture(fixtures.MonkeyPatch('smtplib.SMTP', FakeSMTPFactory))
+
+        # Set a changes database so multiple FakeGerrit's can report back to
+        # a virtual canonical database given by the configured hostname
+        self.gerrit_changes_dbs = {}
+        self.gerrit_queues_dbs = {}
+        self.connections = {}
+
+        for section_name in self.config.sections():
+            con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
+                                 section_name, re.I)
+            if not con_match:
+                continue
+            con_name = con_match.group(2)
+            con_config = dict(self.config.items(section_name))
+
+            if 'driver' not in con_config:
+                raise Exception("No driver specified for connection %s."
+                                % con_name)
+
+            con_driver = con_config['driver']
+
+            # TODO(jhesketh): load the required class automatically
+            if con_driver == 'gerrit':
+                if con_config['server'] not in self.gerrit_changes_dbs.keys():
+                    self.gerrit_changes_dbs[con_config['server']] = {}
+                if con_config['server'] not in self.gerrit_queues_dbs.keys():
+                    self.gerrit_queues_dbs[con_config['server']] = \
+                        Queue.Queue()
+                    self.event_queues.append(
+                        self.gerrit_queues_dbs[con_config['server']])
+                self.connections[con_name] = FakeGerritConnection(
+                    con_name, con_config,
+                    changes_db=self.gerrit_changes_dbs[con_config['server']],
+                    queues_db=self.gerrit_queues_dbs[con_config['server']],
+                    upstream_root=self.upstream_root
+                )
+                setattr(self, 'fake_' + con_name, self.connections[con_name])
+            elif con_driver == 'smtp':
+                self.connections[con_name] = \
+                    zuul.connection.smtp.SMTPConnection(con_name, con_config)
+            else:
+                raise Exception("Unknown driver, %s, for connection %s"
+                                % (con_config['driver'], con_name))
+
+        # If the [gerrit] or [smtp] sections still exist, load them in as a
+        # connection named 'gerrit' or 'smtp' respectfully
+
+        if 'gerrit' in self.config.sections():
+            self.gerrit_changes_dbs['gerrit'] = {}
+            self.gerrit_queues_dbs['gerrit'] = Queue.Queue()
+            self.event_queues.append(self.gerrit_queues_dbs['gerrit'])
+            self.connections['gerrit'] = FakeGerritConnection(
+                '_legacy_gerrit', dict(self.config.items('gerrit')),
+                changes_db=self.gerrit_changes_dbs['gerrit'],
+                queues_db=self.gerrit_queues_dbs['gerrit'])
+
+        if 'smtp' in self.config.sections():
+            self.connections['smtp'] = \
+                zuul.connection.smtp.SMTPConnection(
+                    '_legacy_smtp', dict(self.config.items('smtp')))
+
+    def setup_config(self, config_file='zuul.conf'):
         """Per test config object. Override to set different config."""
         self.config = ConfigParser.ConfigParser()
-        self.config.read(os.path.join(FIXTURE_DIR, "zuul.conf"))
+        self.config.read(os.path.join(FIXTURE_DIR, config_file))
 
     def assertFinalState(self):
         # Make sure that git.Repo objects have been garbage collected.
@@ -1046,8 +1101,6 @@
         self.merge_server.join()
         self.merge_client.stop()
         self.worker.shutdown()
-        self.gerrit.stop()
-        self.timer.stop()
         self.sched.stop()
         self.sched.join()
         self.statsd.stop()
@@ -1105,6 +1158,17 @@
         zuul.merger.merger.reset_repo_to_head(repo)
         repo.git.clean('-x', '-f', '-d')
 
+    def create_commit(self, project):
+        path = os.path.join(self.upstream_root, project)
+        repo = git.Repo(path)
+        repo.head.reference = repo.heads['master']
+        file_name = os.path.join(path, 'README')
+        with open(file_name, 'a') as f:
+            f.write('creating fake commit\n')
+        repo.index.add([file_name])
+        commit = repo.index.commit('Creating a fake commit')
+        return commit.hexsha
+
     def ref_has_change(self, ref, change):
         path = os.path.join(self.git_root, change.project)
         repo = git.Repo(path)
@@ -1253,16 +1317,24 @@
                 return False
         return True
 
+    def eventQueuesEmpty(self):
+        for queue in self.event_queues:
+            yield queue.empty()
+
+    def eventQueuesJoin(self):
+        for queue in self.event_queues:
+            queue.join()
+
     def waitUntilSettled(self):
         self.log.debug("Waiting until settled...")
         start = time.time()
         while True:
             if time.time() - start > 10:
-                print 'queue status:',
-                print self.sched.trigger_event_queue.empty(),
-                print self.sched.result_event_queue.empty(),
-                print self.fake_gerrit.event_queue.empty(),
-                print self.areAllBuildsWaiting()
+                self.log.debug("Queue status:")
+                for queue in self.event_queues:
+                    self.log.debug("  %s: %s" % (queue, queue.empty()))
+                self.log.debug("All builds waiting: %s" %
+                               (self.areAllBuildsWaiting(),))
                 raise Exception("Timeout waiting for Zuul to settle")
             # Make sure no new events show up while we're checking
             self.worker.lock.acquire()
@@ -1270,14 +1342,10 @@
             if self.haveAllBuildsReported():
                 # Join ensures that the queue is empty _and_ events have been
                 # processed
-                self.fake_gerrit.event_queue.join()
-                self.sched.trigger_event_queue.join()
-                self.sched.result_event_queue.join()
+                self.eventQueuesJoin()
                 self.sched.run_handler_lock.acquire()
                 if (not self.merge_client.build_sets and
-                    self.sched.trigger_event_queue.empty() and
-                    self.sched.result_event_queue.empty() and
-                    self.fake_gerrit.event_queue.empty() and
+                    all(self.eventQueuesEmpty()) and
                     self.haveAllBuildsReported() and
                     self.areAllBuildsWaiting()):
                     self.sched.run_handler_lock.release()
@@ -1304,8 +1372,8 @@
         for pipeline in self.sched.layout.pipelines.values():
             for queue in pipeline.queues:
                 if len(queue.queue) != 0:
-                    print 'pipeline %s queue %s contents %s' % (
-                        pipeline.name, queue.name, queue.queue)
+                    print('pipeline %s queue %s contents %s' % (
+                        pipeline.name, queue.name, queue.queue))
                 self.assertEqual(len(queue.queue), 0,
                                  "Pipelines queues should be empty")
 
diff --git a/tests/cmd/__init__.py b/tests/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/cmd/__init__.py
diff --git a/tests/cmd/test_cloner.py b/tests/cmd/test_cloner.py
new file mode 100644
index 0000000..9cbb5b8
--- /dev/null
+++ b/tests/cmd/test_cloner.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+
+import testtools
+import zuul.cmd.cloner
+
+logging.basicConfig(level=logging.DEBUG,
+                    format='%(asctime)s %(name)-32s '
+                    '%(levelname)-8s %(message)s')
+
+
+class TestClonerCmdArguments(testtools.TestCase):
+
+    def setUp(self):
+        super(TestClonerCmdArguments, self).setUp()
+        self.app = zuul.cmd.cloner.Cloner()
+
+    def test_default_cache_dir_empty(self):
+        self.app.parse_arguments(['base', 'repo'])
+        self.assertEqual(None, self.app.args.cache_dir)
+
+    def test_default_cache_dir_environ(self):
+        try:
+            os.environ['ZUUL_CACHE_DIR'] = 'fromenviron'
+            self.app.parse_arguments(['base', 'repo'])
+            self.assertEqual('fromenviron', self.app.args.cache_dir)
+        finally:
+            del os.environ['ZUUL_CACHE_DIR']
+
+    def test_default_cache_dir_override_environ(self):
+        try:
+            os.environ['ZUUL_CACHE_DIR'] = 'fromenviron'
+            self.app.parse_arguments(['--cache-dir', 'argument',
+                                      'base', 'repo'])
+            self.assertEqual('argument', self.app.args.cache_dir)
+        finally:
+            del os.environ['ZUUL_CACHE_DIR']
+
+    def test_default_cache_dir_argument(self):
+        self.app.parse_arguments(['--cache-dir', 'argument',
+                                  'base', 'repo'])
+        self.assertEqual('argument', self.app.args.cache_dir)
diff --git a/tests/fixtures/gerrit/simple_query_pagination_new_1 b/tests/fixtures/gerrit/simple_query_pagination_new_1
new file mode 100644
index 0000000..b3fdd83
--- /dev/null
+++ b/tests/fixtures/gerrit/simple_query_pagination_new_1
@@ -0,0 +1,5 @@
+gerrit query --format json --commit-message --current-patch-set project:openstack-infra/zuul
+{"project":"openstack-infra/zuul","branch":"master","topic":"(detached","id":"I173251c8b1569755124b7cb1a48b6274bf38c94b","number":"202867","subject":"Report the per-job build wait time to graphite","owner":{"name":"Timothy R. Chavez","email":"timothy.chavez@hp.com","username":"timrchavez"},"url":"https://review.openstack.org/202867","commitMessage":"Report the per-job build wait time to graphite\n\nKnowing how long a job waits to build in aggregate can give useful\ninsights into the performance and capacity of the build system. This\nchange also uses the node labels sent back from the gearman worker to\nsubmit metrics within that context.\n\nChange-Id: I173251c8b1569755124b7cb1a48b6274bf38c94b\nDepends-On: Ibca938fcf8a65facd7e39dab4eb994dfc637722a\n","createdOn":1437104683,"lastUpdated":1440760891,"open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"ignore-deletes","id":"Iea75d05ddcb49b0bf748b72b9d2d5472d077f0c6","number":"178833","subject":"Add option to ignore ref-updated events emitted by branch deletions","owner":{"name":"K Jonathan Harker","email":"code@gentlydownthe.net","username":"jesusaurus"},"url":"https://review.openstack.org/178833","commitMessage":"Add option to ignore ref-updated events emitted by branch deletions\n\nWhen a branch is deleted, gerrit emits a ref-updated event with a newrev\nvalue of all zeros. This adds a boolean field to optionally not trigger\non these ref-updated events.\n\nChange-Id: Iea75d05ddcb49b0bf748b72b9d2d5472d077f0c6\n","createdOn":1430339761,"lastUpdated":1440735750,"open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"undefined-projects","id":"I7912197fb86c1a7becb7f43ca36078101f632715","number":"207094","subject":"Dependencies from undefined projects","owner":{"name":"Evgeny Antyshev","email":"eantyshev@virtuozzo.com","username":"eantyshev"},"url":"https://review.openstack.org/207094","commitMessage":"Dependencies from undefined projects\n\n3rd party CI layout usually has only a few projects defined,\nso it\u0027s possible that some changes depend on projects\nwhich are unknown to Zuul scheduler.\nThese items had None as a \"item.change.project\", which\nis not handled in many places, for ex. in reconfiguration.\n\nThese cases could be handled by defining these projects in layout\nas \"foreign\" projects: no jobs, no other non-standard attributes.\nChanges to those projects are also dropped, unless\nthey came as dependencies.\n\nChange-Id: I7912197fb86c1a7becb7f43ca36078101f632715\n","createdOn":1438183395,"lastUpdated":1440667433,"open":true,"status":"NEW"}
+{"type":"stats","rowCount":3,"runTimeMilliseconds":12,"moreChanges":true}
\ No newline at end of file
diff --git a/tests/fixtures/gerrit/simple_query_pagination_new_2 b/tests/fixtures/gerrit/simple_query_pagination_new_2
new file mode 100644
index 0000000..9fd8d54
--- /dev/null
+++ b/tests/fixtures/gerrit/simple_query_pagination_new_2
@@ -0,0 +1,4 @@
+gerrit query --format json --commit-message --current-patch-set project:openstack-infra/zuul -S 3
+{"project":"openstack-infra/zuul","branch":"master","topic":"github-integration","id":"I95f41088ea160d4e33a507c4a413e3fa7f08906b","number":"192457","subject":"(WIP) Fix job hierarchy bug.","owner":{"name":"Wayne Warren","email":"waynr+launchpad@sdf.org","username":"waynr"},"url":"https://review.openstack.org/192457","commitMessage":"(WIP) Fix job hierarchy bug.\n\nJobTree.addJob may return \u0027None\u0027, this prevents that from happening.\n\nChange-Id: I95f41088ea160d4e33a507c4a413e3fa7f08906b\n","createdOn":1434498278,"lastUpdated":1440608984,"sortKey":"003763050002efc9","open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"github-integration","id":"Ic5887d00ff302f67469df5154e9df10b99f1cfcd","number":"215642","subject":"(WIP) Allow using webapp from connections","owner":{"name":"Jan Hruban","email":"jan.hruban@gooddata.com","username":"hrubi"},"url":"https://review.openstack.org/215642","commitMessage":"(WIP) Allow using webapp from connections\n\nAllow connections to register their own handlers for HTTP URIs inside\nthe zuul\u0027s webapp HTTP server. That way, connections can listen for\nevents comming through HTTP.\n\nChange-Id: Ic5887d00ff302f67469df5154e9df10b99f1cfcd\n","createdOn":1440165019,"lastUpdated":1440602591,"sortKey":"0037629b00034a5a","open":true,"status":"NEW"}
+{"type":"stats","rowCount":2,"runTimeMilliseconds":12,"moreChanges":false}
\ No newline at end of file
diff --git a/tests/fixtures/gerrit/simple_query_pagination_old_1 b/tests/fixtures/gerrit/simple_query_pagination_old_1
new file mode 100644
index 0000000..8ff1710
--- /dev/null
+++ b/tests/fixtures/gerrit/simple_query_pagination_old_1
@@ -0,0 +1,5 @@
+gerrit query --format json --commit-message --current-patch-set project:openstack-infra/zuul
+{"project":"openstack-infra/zuul","branch":"master","topic":"(detached","id":"I173251c8b1569755124b7cb1a48b6274bf38c94b","number":"202867","subject":"Report the per-job build wait time to graphite","owner":{"name":"Timothy R. Chavez","email":"timothy.chavez@hp.com","username":"timrchavez"},"url":"https://review.openstack.org/202867","commitMessage":"Report the per-job build wait time to graphite\n\nKnowing how long a job waits to build in aggregate can give useful\ninsights into the performance and capacity of the build system. This\nchange also uses the node labels sent back from the gearman worker to\nsubmit metrics within that context.\n\nChange-Id: I173251c8b1569755124b7cb1a48b6274bf38c94b\nDepends-On: Ibca938fcf8a65facd7e39dab4eb994dfc637722a\n","createdOn":1437104683,"lastUpdated":1440760891,"sortKey":"00376ce900031873","open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"ignore-deletes","id":"Iea75d05ddcb49b0bf748b72b9d2d5472d077f0c6","number":"178833","subject":"Add option to ignore ref-updated events emitted by branch deletions","owner":{"name":"K Jonathan Harker","email":"code@gentlydownthe.net","username":"jesusaurus"},"url":"https://review.openstack.org/178833","commitMessage":"Add option to ignore ref-updated events emitted by branch deletions\n\nWhen a branch is deleted, gerrit emits a ref-updated event with a newrev\nvalue of all zeros. This adds a boolean field to optionally not trigger\non these ref-updated events.\n\nChange-Id: Iea75d05ddcb49b0bf748b72b9d2d5472d077f0c6\n","createdOn":1430339761,"lastUpdated":1440735750,"sortKey":"00376b460002ba91","open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"undefined-projects","id":"I7912197fb86c1a7becb7f43ca36078101f632715","number":"207094","subject":"Dependencies from undefined projects","owner":{"name":"Evgeny Antyshev","email":"eantyshev@virtuozzo.com","username":"eantyshev"},"url":"https://review.openstack.org/207094","commitMessage":"Dependencies from undefined projects\n\n3rd party CI layout usually has only a few projects defined,\nso it\u0027s possible that some changes depend on projects\nwhich are unknown to Zuul scheduler.\nThese items had None as a \"item.change.project\", which\nis not handled in many places, for ex. in reconfiguration.\n\nThese cases could be handled by defining these projects in layout\nas \"foreign\" projects: no jobs, no other non-standard attributes.\nChanges to those projects are also dropped, unless\nthey came as dependencies.\n\nChange-Id: I7912197fb86c1a7becb7f43ca36078101f632715\n","createdOn":1438183395,"lastUpdated":1440667433,"sortKey":"003766d3000328f6","open":true,"status":"NEW"}
+{"type":"stats","rowCount":3,"runTimeMilliseconds":12}
\ No newline at end of file
diff --git a/tests/fixtures/gerrit/simple_query_pagination_old_2 b/tests/fixtures/gerrit/simple_query_pagination_old_2
new file mode 100644
index 0000000..c55cd40
--- /dev/null
+++ b/tests/fixtures/gerrit/simple_query_pagination_old_2
@@ -0,0 +1,4 @@
+gerrit query --format json --commit-message --current-patch-set project:openstack-infra/zuul resume_sortkey:'003766d3000328f6'
+{"project":"openstack-infra/zuul","branch":"master","topic":"github-integration","id":"I95f41088ea160d4e33a507c4a413e3fa7f08906b","number":"192457","subject":"(WIP) Fix job hierarchy bug.","owner":{"name":"Wayne Warren","email":"waynr+launchpad@sdf.org","username":"waynr"},"url":"https://review.openstack.org/192457","commitMessage":"(WIP) Fix job hierarchy bug.\n\nJobTree.addJob may return \u0027None\u0027, this prevents that from happening.\n\nChange-Id: I95f41088ea160d4e33a507c4a413e3fa7f08906b\n","createdOn":1434498278,"lastUpdated":1440608984,"sortKey":"003763050002efc9","open":true,"status":"NEW"}
+{"project":"openstack-infra/zuul","branch":"master","topic":"github-integration","id":"Ic5887d00ff302f67469df5154e9df10b99f1cfcd","number":"215642","subject":"(WIP) Allow using webapp from connections","owner":{"name":"Jan Hruban","email":"jan.hruban@gooddata.com","username":"hrubi"},"url":"https://review.openstack.org/215642","commitMessage":"(WIP) Allow using webapp from connections\n\nAllow connections to register their own handlers for HTTP URIs inside\nthe zuul\u0027s webapp HTTP server. That way, connections can listen for\nevents comming through HTTP.\n\nChange-Id: Ic5887d00ff302f67469df5154e9df10b99f1cfcd\n","createdOn":1440165019,"lastUpdated":1440602591,"sortKey":"0037629b00034a5a","open":true,"status":"NEW"}
+{"type":"stats","rowCount":2,"runTimeMilliseconds":12}
\ No newline at end of file
diff --git a/tests/fixtures/gerrit/simple_query_pagination_old_3 b/tests/fixtures/gerrit/simple_query_pagination_old_3
new file mode 100644
index 0000000..b8cdc4a
--- /dev/null
+++ b/tests/fixtures/gerrit/simple_query_pagination_old_3
@@ -0,0 +1,2 @@
+gerrit query --format json --commit-message --current-patch-set project:openstack-infra/zuul resume_sortkey:'0037629b00034a5a'
+{"type":"stats","rowCount":0,"runTimeMilliseconds":12}
\ No newline at end of file
diff --git a/tests/fixtures/layout-connections-multiple-gerrits.yaml b/tests/fixtures/layout-connections-multiple-gerrits.yaml
new file mode 100644
index 0000000..029f42f
--- /dev/null
+++ b/tests/fixtures/layout-connections-multiple-gerrits.yaml
@@ -0,0 +1,37 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    source: review_gerrit
+    trigger:
+      review_gerrit:
+        - event: patchset-created
+    success:
+      review_gerrit:
+        VRFY: 1
+    failure:
+      review_gerrit:
+        VRFY: -1
+
+  - name: another_check
+    manager: IndependentPipelineManager
+    source: another_gerrit
+    trigger:
+      another_gerrit:
+        - event: patchset-created
+    success:
+      another_gerrit:
+        VRFY: 1
+    failure:
+      another_gerrit:
+        VRFY: -1
+
+projects:
+  - name: org/project
+    check:
+      - project-review-gerrit
+    another_check:
+      - project-another-gerrit
+
+  - name: org/project1
+    another_check:
+      - project1-another-gerrit
diff --git a/tests/fixtures/layout-connections-multiple-voters.yaml b/tests/fixtures/layout-connections-multiple-voters.yaml
new file mode 100644
index 0000000..2b8df83
--- /dev/null
+++ b/tests/fixtures/layout-connections-multiple-voters.yaml
@@ -0,0 +1,20 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    source: review_gerrit
+    trigger:
+      review_gerrit:
+        - event: patchset-created
+    success:
+      review_gerrit:
+        VRFY: 1
+    failure:
+      alt_voting_gerrit:
+        VRFY: -1
+
+projects:
+  - name: org/project
+    check:
+      - project-merge:
+        - project-test1
+        - project-test2
diff --git a/tests/fixtures/layout-disable-at.yaml b/tests/fixtures/layout-disable-at.yaml
new file mode 100644
index 0000000..a2b2526
--- /dev/null
+++ b/tests/fixtures/layout-disable-at.yaml
@@ -0,0 +1,21 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+    disabled:
+      smtp:
+        to: you@example.com
+    disable-after-consecutive-failures: 3
+
+projects:
+  - name: org/project
+    check:
+      - project-test1
diff --git a/tests/fixtures/layout-dont-ignore-deletes.yaml b/tests/fixtures/layout-dont-ignore-deletes.yaml
new file mode 100644
index 0000000..1cf3c71
--- /dev/null
+++ b/tests/fixtures/layout-dont-ignore-deletes.yaml
@@ -0,0 +1,16 @@
+includes:
+  - python-file: custom_functions.py
+
+pipelines:
+  - name: post
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: ref-updated
+          ref: ^(?!refs/).*$
+          ignore-deletes: False
+
+projects:
+  - name: org/project
+    post:
+      - project-post
diff --git a/tests/fixtures/layout-live-reconfiguration-del-project.yaml b/tests/fixtures/layout-live-reconfiguration-del-project.yaml
new file mode 100644
index 0000000..07ffb2e
--- /dev/null
+++ b/tests/fixtures/layout-live-reconfiguration-del-project.yaml
@@ -0,0 +1,21 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+projects:
+  - name: org/project
+    merge-mode: cherry-pick
+    check:
+      - project-merge:
+        - project-test1
+        - project-test2
+        - project-testfile
diff --git a/tests/fixtures/layout-mutex.yaml b/tests/fixtures/layout-mutex.yaml
new file mode 100644
index 0000000..fcd0529
--- /dev/null
+++ b/tests/fixtures/layout-mutex.yaml
@@ -0,0 +1,25 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+jobs:
+  - name: mutex-one
+    mutex: test-mutex
+  - name: mutex-two
+    mutex: test-mutex
+
+projects:
+  - name: org/project
+    check:
+      - project-test1
+      - mutex-one
+      - mutex-two
diff --git a/tests/fixtures/layout-requirement-reject-username.yaml b/tests/fixtures/layout-requirement-reject-username.yaml
new file mode 100644
index 0000000..9c71045
--- /dev/null
+++ b/tests/fixtures/layout-requirement-reject-username.yaml
@@ -0,0 +1,37 @@
+pipelines:
+  - name: pipeline
+    manager: IndependentPipelineManager
+    reject:
+      approval:
+        - username: 'jenkins'
+    trigger:
+      gerrit:
+        - event: comment-added
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+  - name: trigger
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: comment-added
+          reject-approval:
+            - username: 'jenkins'
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+projects:
+  - name: org/project1
+    pipeline:
+      - project1-pipeline
+  - name: org/project2
+    trigger:
+      - project2-trigger
\ No newline at end of file
diff --git a/tests/fixtures/layout-requirement-reject.yaml b/tests/fixtures/layout-requirement-reject.yaml
new file mode 100644
index 0000000..1f5d714
--- /dev/null
+++ b/tests/fixtures/layout-requirement-reject.yaml
@@ -0,0 +1,44 @@
+pipelines:
+  - name: pipeline
+    manager: IndependentPipelineManager
+    require:
+      approval:
+        - username: jenkins
+          verified: [1, 2]
+    reject:
+      approval:
+        - verified: [-1, -2]
+    trigger:
+      gerrit:
+        - event: comment-added
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+  - name: trigger
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: comment-added
+          require-approval:
+            - username: jenkins
+              verified: [1, 2]
+          reject-approval:
+            - verified: [-1, -2]
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+projects:
+  - name: org/project1
+    pipeline:
+      - project1-pipeline
+  - name: org/project2
+    trigger:
+      - project2-trigger
diff --git a/tests/fixtures/layout-requirement-username.yaml b/tests/fixtures/layout-requirement-username.yaml
index 7a549f0..f9e6477 100644
--- a/tests/fixtures/layout-requirement-username.yaml
+++ b/tests/fixtures/layout-requirement-username.yaml
@@ -3,7 +3,7 @@
     manager: IndependentPipelineManager
     require:
       approval:
-        - username: jenkins
+        - username: ^(jenkins|zuul)$
     trigger:
       gerrit:
         - event: comment-added
diff --git a/tests/fixtures/layout-success-pattern.yaml b/tests/fixtures/layout-success-pattern.yaml
new file mode 100644
index 0000000..cea15f1
--- /dev/null
+++ b/tests/fixtures/layout-success-pattern.yaml
@@ -0,0 +1,21 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      smtp:
+        to: me@example.org
+
+jobs:
+  - name: docs-draft-test
+    success-pattern: http://docs-draft.example.org/{build.parameters[LOG_PATH]}/publish-docs/
+  - name: docs-draft-test2
+    success-pattern: http://docs-draft.example.org/{NOPE}/{build.parameters[BAD]}/publish-docs/
+
+projects:
+  - name: org/docs
+    check:
+      - docs-draft-test:
+        - docs-draft-test2
diff --git a/tests/fixtures/layout-tags.yaml b/tests/fixtures/layout-tags.yaml
new file mode 100644
index 0000000..d5b8bf9
--- /dev/null
+++ b/tests/fixtures/layout-tags.yaml
@@ -0,0 +1,42 @@
+includes:
+  - python-file: tags_custom_functions.py
+
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+jobs:
+  - name: ^.*$
+    parameter-function: apply_tags
+  - name: ^.*-merge$
+    failure-message: Unable to merge change
+    hold-following-changes: true
+    tags: merge
+  - name: project1-merge
+    tags:
+      - project1
+      - extratag
+
+projects:
+  - name: org/project1
+    check:
+      - project1-merge:
+        - project1-test1
+        - project1-test2
+        - project1-project2-integration
+
+  - name: org/project2
+    check:
+      - project2-merge:
+        - project2-test1
+        - project2-test2
+        - project1-project2-integration
diff --git a/tests/fixtures/layout.yaml b/tests/fixtures/layout.yaml
index 1d23443..2e48ff1 100644
--- a/tests/fixtures/layout.yaml
+++ b/tests/fixtures/layout.yaml
@@ -107,6 +107,7 @@
   - name: ^.*-merge$
     failure-message: Unable to merge change
     hold-following-changes: true
+    tags: merge
   - name: nonvoting-project-test2
     voting: false
   - name: project-testfile
@@ -116,6 +117,14 @@
     parameter-function: select_debian_node
   - name: project1-project2-integration
     queue-name: integration
+  - name: mutex-one
+    mutex: test-mutex
+  - name: mutex-two
+    mutex: test-mutex
+  - name: project1-merge
+    tags:
+      - project1
+      - extratag
 
 project-templates:
   - name: test-one-and-two
diff --git a/tests/fixtures/layouts/bad_gerrit_missing.yaml b/tests/fixtures/layouts/bad_gerrit_missing.yaml
new file mode 100644
index 0000000..8db7248
--- /dev/null
+++ b/tests/fixtures/layouts/bad_gerrit_missing.yaml
@@ -0,0 +1,18 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    trigger:
+      not_gerrit:
+        - event: patchset-created
+    success:
+      review_gerrit:
+        verified: 1
+    failure:
+      review_gerrit:
+        verified: -1
+
+projects:
+  - name: test-org/test
+    check:
+      - test-merge
+      - test-test
diff --git a/tests/fixtures/layouts/bad_merge_failure.yaml b/tests/fixtures/layouts/bad_merge_failure.yaml
index fc6854e..d9b973c 100644
--- a/tests/fixtures/layouts/bad_merge_failure.yaml
+++ b/tests/fixtures/layouts/bad_merge_failure.yaml
@@ -2,13 +2,13 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
     # merge-failure-message needs a string.
     merge-failure-message:
@@ -17,20 +17,20 @@
     manager: DependentPipelineManager
     failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
     trigger:
-      gerrit:
+      review_gerrit:
         - event: comment-added
           approval:
             - approved: 1
     success:
-      gerrit:
+      review_gerrit:
         verified: 2
         submit: true
     failure:
-      gerrit:
+      review_gerrit:
         verified: -2
     merge-failure:
     start:
-      gerrit:
+      review_gerrit:
         verified: 0
     precedence: high
 
diff --git a/tests/fixtures/layouts/bad_misplaced_ref.yaml b/tests/fixtures/layouts/bad_misplaced_ref.yaml
index f009c39..d8bb6bc 100644
--- a/tests/fixtures/layouts/bad_misplaced_ref.yaml
+++ b/tests/fixtures/layouts/bad_misplaced_ref.yaml
@@ -2,7 +2,7 @@
   - name: 'check'
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
           ref: /some/ref/path
 
diff --git a/tests/fixtures/layouts/bad_pipelines5.yaml b/tests/fixtures/layouts/bad_pipelines5.yaml
index f95a78e..a91ac7a 100644
--- a/tests/fixtures/layouts/bad_pipelines5.yaml
+++ b/tests/fixtures/layouts/bad_pipelines5.yaml
@@ -2,7 +2,7 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         # event is a required item but it is missing.
         - approval:
             - approved: 1
diff --git a/tests/fixtures/layouts/bad_pipelines6.yaml b/tests/fixtures/layouts/bad_pipelines6.yaml
index aa91c77..bf2d538 100644
--- a/tests/fixtures/layouts/bad_pipelines6.yaml
+++ b/tests/fixtures/layouts/bad_pipelines6.yaml
@@ -2,7 +2,7 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: comment-added
           # approved is not a valid entry. Should be approval.
           approved: 1
diff --git a/tests/fixtures/layouts/bad_reject.yaml b/tests/fixtures/layouts/bad_reject.yaml
new file mode 100644
index 0000000..0549875
--- /dev/null
+++ b/tests/fixtures/layouts/bad_reject.yaml
@@ -0,0 +1,21 @@
+# Template is going to be called but missing a parameter
+
+pipelines:
+  - name: 'check'
+    manager: IndependentPipelineManager
+    require:
+      open: True
+      current-patchset: True
+      approval:
+        - verified: [1, 2]
+          username: jenkins
+        - workflow: 1
+    reject:
+      # Reject only takes 'approval', has no need for open etc..
+      open: True
+      approval:
+        - code-review: [-1, -2]
+          username: core-person
+    trigger:
+      review_gerrit:
+        - event: patchset-created
diff --git a/tests/fixtures/layouts/bad_swift.yaml b/tests/fixtures/layouts/bad_swift.yaml
index e79dca6..f217821 100644
--- a/tests/fixtures/layouts/bad_swift.yaml
+++ b/tests/fixtures/layouts/bad_swift.yaml
@@ -2,13 +2,13 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
 jobs:
diff --git a/tests/fixtures/layouts/bad_template1.yaml b/tests/fixtures/layouts/bad_template1.yaml
index cab17a1..8868edf 100644
--- a/tests/fixtures/layouts/bad_template1.yaml
+++ b/tests/fixtures/layouts/bad_template1.yaml
@@ -4,7 +4,7 @@
   - name: 'check'
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
 
 project-templates:
diff --git a/tests/fixtures/layouts/bad_template2.yaml b/tests/fixtures/layouts/bad_template2.yaml
index b731543..09a5f91 100644
--- a/tests/fixtures/layouts/bad_template2.yaml
+++ b/tests/fixtures/layouts/bad_template2.yaml
@@ -4,7 +4,7 @@
   - name: 'check'
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
 
 project-templates:
diff --git a/tests/fixtures/layouts/good_connections1.conf b/tests/fixtures/layouts/good_connections1.conf
new file mode 100644
index 0000000..768cbb0
--- /dev/null
+++ b/tests/fixtures/layouts/good_connections1.conf
@@ -0,0 +1,42 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+layout_config=layout.yaml
+url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+job_name_in_report=true
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[swift]
+authurl=https://identity.api.example.org/v2.0/
+user=username
+key=password
+tenant_name=" "
+
+default_container=logs
+region_name=EXP
+logserver_prefix=http://logs.example.org/server.app/
+
+[connection review_gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=none
+
+[connection other_gerrit]
+driver=gerrit
+server=review2.example.com
+user=jenkins2
+sshkey=none
+
+[connection my_smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/layouts/good_connections1.yaml b/tests/fixtures/layouts/good_connections1.yaml
new file mode 100644
index 0000000..f5f55b1
--- /dev/null
+++ b/tests/fixtures/layouts/good_connections1.yaml
@@ -0,0 +1,18 @@
+pipelines:
+  - name: check
+    manager: IndependentPipelineManager
+    source: review_gerrit
+    trigger:
+      review_gerrit:
+        - event: patchset-created
+    success:
+      review_gerrit:
+        verified: 1
+    failure:
+      other_gerrit:
+        verified: -1
+
+projects:
+  - name: org/project
+    check:
+      - project-check
diff --git a/tests/fixtures/layouts/good_layout.yaml b/tests/fixtures/layouts/good_layout.yaml
index fc2effd..0e21d57 100644
--- a/tests/fixtures/layouts/good_layout.yaml
+++ b/tests/fixtures/layouts/good_layout.yaml
@@ -4,54 +4,74 @@
 pipelines:
   - name: check
     manager: IndependentPipelineManager
+    require:
+      open: True
+      current-patchset: True
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
+        - event: comment-added
+          require-approval:
+            - verified: [-1, -2]
+              username: jenkins
+          approval:
+            - workflow: 1
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
   - name: post
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: ref-updated
           ref: ^(?!refs/).*$
+          ignore-deletes: True
 
   - name: gate
     manager: DependentPipelineManager
     success-message: Your change is awesome.
     failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
+    require:
+      open: True
+      current-patchset: True
+      approval:
+        - verified: [1, 2]
+          username: jenkins
+        - workflow: 1
+    reject:
+      approval:
+        - code-review: [-1, -2]
     trigger:
-      gerrit:
+      review_gerrit:
         - event: comment-added
           approval:
             - approved: 1
     start:
-      gerrit:
+      review_gerrit:
         verified: 0
     success:
-      gerrit:
+      review_gerrit:
         verified: 2
         code-review: 1
         submit: true
     failure:
-      gerrit:
+      review_gerrit:
         verified: -2
         workinprogress: true
 
   - name: merge-check
     manager: IndependentPipelineManager
-    source: gerrit
+    source: review_gerrit
     ignore-dependencies: true
     trigger:
       zuul:
         - event: project-change-merged
     merge-failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
 jobs:
diff --git a/tests/fixtures/layouts/good_merge_failure.yaml b/tests/fixtures/layouts/good_merge_failure.yaml
index f69b764..afede3c 100644
--- a/tests/fixtures/layouts/good_merge_failure.yaml
+++ b/tests/fixtures/layouts/good_merge_failure.yaml
@@ -3,47 +3,47 @@
     manager: IndependentPipelineManager
     merge-failure-message: "Could not merge the change. Please rebase..."
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
   - name: post
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: ref-updated
           ref: ^(?!refs/).*$
     merge-failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
   - name: gate
     manager: DependentPipelineManager
     failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
     trigger:
-      gerrit:
+      review_gerrit:
         - event: comment-added
           approval:
             - approved: 1
     success:
-      gerrit:
+      review_gerrit:
         verified: 2
         submit: true
     failure:
-      gerrit:
+      review_gerrit:
         verified: -2
     merge-failure:
-      gerrit:
+      review_gerrit:
         verified: -1
-      smtp:
+      my_smtp:
         to: you@example.com
     start:
-      gerrit:
+      review_gerrit:
         verified: 0
     precedence: high
 
diff --git a/tests/fixtures/layouts/good_require_approvals.yaml b/tests/fixtures/layouts/good_require_approvals.yaml
index 75bdc8e..d899765 100644
--- a/tests/fixtures/layouts/good_require_approvals.yaml
+++ b/tests/fixtures/layouts/good_require_approvals.yaml
@@ -5,7 +5,7 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: comment-added
           require-approval:
             - username: jenkins
@@ -23,10 +23,10 @@
               username: jenkins
               email: jenkins@example.com
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
 projects:
diff --git a/tests/fixtures/layouts/good_swift.yaml b/tests/fixtures/layouts/good_swift.yaml
index 913c268..48ca7f0 100644
--- a/tests/fixtures/layouts/good_swift.yaml
+++ b/tests/fixtures/layouts/good_swift.yaml
@@ -2,13 +2,13 @@
   - name: check
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
     success:
-      gerrit:
+      review_gerrit:
         verified: 1
     failure:
-      gerrit:
+      review_gerrit:
         verified: -1
 
 jobs:
diff --git a/tests/fixtures/layouts/good_template1.yaml b/tests/fixtures/layouts/good_template1.yaml
index ad060a4..1680c7b 100644
--- a/tests/fixtures/layouts/good_template1.yaml
+++ b/tests/fixtures/layouts/good_template1.yaml
@@ -2,7 +2,7 @@
   - name: 'check'
     manager: IndependentPipelineManager
     trigger:
-      gerrit:
+      review_gerrit:
         - event: patchset-created
 
 project-templates:
diff --git a/tests/fixtures/layouts/zuul_default.conf b/tests/fixtures/layouts/zuul_default.conf
new file mode 100644
index 0000000..6440027
--- /dev/null
+++ b/tests/fixtures/layouts/zuul_default.conf
@@ -0,0 +1,36 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+layout_config=layout.yaml
+url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+job_name_in_report=true
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[swift]
+authurl=https://identity.api.example.org/v2.0/
+user=username
+key=password
+tenant_name=" "
+
+default_container=logs
+region_name=EXP
+logserver_prefix=http://logs.example.org/server.app/
+
+[connection review_gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=none
+
+[connection my_smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/tags_custom_functions.py b/tests/fixtures/tags_custom_functions.py
new file mode 100644
index 0000000..67e7ef1
--- /dev/null
+++ b/tests/fixtures/tags_custom_functions.py
@@ -0,0 +1,2 @@
+def apply_tags(item, job, params):
+    params['BUILD_TAGS'] = ' '.join(sorted(job.tags))
diff --git a/tests/fixtures/zuul-connections-multiple-gerrits.conf b/tests/fixtures/zuul-connections-multiple-gerrits.conf
new file mode 100644
index 0000000..f067e6e
--- /dev/null
+++ b/tests/fixtures/zuul-connections-multiple-gerrits.conf
@@ -0,0 +1,42 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+layout_config=layout-connections-multiple-voters.yaml
+url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+job_name_in_report=true
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[swift]
+authurl=https://identity.api.example.org/v2.0/
+user=username
+key=password
+tenant_name=" "
+
+default_container=logs
+region_name=EXP
+logserver_prefix=http://logs.example.org/server.app/
+
+[connection review_gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=none
+
+[connection another_gerrit]
+driver=gerrit
+server=another.example.com
+user=jenkins
+sshkey=none
+
+[connection outgoing_smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
new file mode 100644
index 0000000..af31c8a
--- /dev/null
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -0,0 +1,42 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+layout_config=layout-connections-multiple-voters.yaml
+url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
+job_name_in_report=true
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[swift]
+authurl=https://identity.api.example.org/v2.0/
+user=username
+key=password
+tenant_name=" "
+
+default_container=logs
+region_name=EXP
+logserver_prefix=http://logs.example.org/server.app/
+
+[connection review_gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=none
+
+[connection alt_voting_gerrit]
+driver=gerrit
+server=review.example.com
+user=civoter
+sshkey=none
+
+[connection outgoing_smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index ec76cd0..b250c6d 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -1,11 +1,6 @@
 [gearman]
 server=127.0.0.1
 
-[gerrit]
-server=review.example.com
-user=jenkins
-sshkey=none
-
 [zuul]
 layout_config=layout.yaml
 url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
@@ -17,12 +12,6 @@
 git_user_name=zuul
 zuul_url=http://zuul.example.com/p
 
-[smtp]
-server=localhost
-port=25
-default_from=zuul@example.com
-default_to=you@example.com
-
 [swift]
 authurl=https://identity.api.example.org/v2.0/
 user=username
@@ -32,3 +21,16 @@
 default_container=logs
 region_name=EXP
 logserver_prefix=http://logs.example.org/server.app/
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=none
+
+[connection smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/test_change_matcher.py b/tests/test_change_matcher.py
index 1f4ab93..0585322 100644
--- a/tests/test_change_matcher.py
+++ b/tests/test_change_matcher.py
@@ -123,13 +123,13 @@
         self._test_matches(False)
 
     def test_matches_returns_false_when_not_all_files_match(self):
-        self._test_matches(False, files=['docs/foo', 'foo/bar'])
+        self._test_matches(False, files=['/COMMIT_MSG', 'docs/foo', 'foo/bar'])
 
-    def test_matches_returns_true_when_commit_message_matches(self):
-        self._test_matches(True, files=['/COMMIT_MSG'])
+    def test_matches_returns_false_when_commit_message_matches(self):
+        self._test_matches(False, files=['/COMMIT_MSG'])
 
     def test_matches_returns_true_when_all_files_match(self):
-        self._test_matches(True, files=['docs/foo'])
+        self._test_matches(True, files=['/COMMIT_MSG', 'docs/foo'])
 
 
 class TestMatchAll(BaseTestMatcher):
diff --git a/tests/test_cloner.py b/tests/test_cloner.py
index 137c157..e3576bd 100644
--- a/tests/test_cloner.py
+++ b/tests/test_cloner.py
@@ -566,3 +566,57 @@
         self.worker.hold_jobs_in_build = False
         self.worker.release()
         self.waitUntilSettled()
+
+    def test_post_checkout(self):
+        project = "org/project"
+        path = os.path.join(self.upstream_root, project)
+        repo = git.Repo(path)
+        repo.head.reference = repo.heads['master']
+        commits = []
+        for i in range(0, 3):
+            commits.append(self.create_commit(project))
+        newRev = commits[1]
+
+        cloner = zuul.lib.cloner.Cloner(
+            git_base_url=self.upstream_root,
+            projects=[project],
+            workspace=self.workspace_root,
+            zuul_branch=None,
+            zuul_ref='master',
+            zuul_url=self.git_root,
+            zuul_project=project,
+            zuul_newrev=newRev,
+        )
+        cloner.execute()
+        repos = self.getWorkspaceRepos([project])
+        cloned_sha = repos[project].rev_parse('HEAD').hexsha
+        self.assertEqual(newRev, cloned_sha)
+
+    def test_post_and_master_checkout(self):
+        project = "org/project1"
+        master_project = "org/project2"
+        path = os.path.join(self.upstream_root, project)
+        repo = git.Repo(path)
+        repo.head.reference = repo.heads['master']
+        commits = []
+        for i in range(0, 3):
+            commits.append(self.create_commit(project))
+        newRev = commits[1]
+
+        cloner = zuul.lib.cloner.Cloner(
+            git_base_url=self.upstream_root,
+            projects=[project, master_project],
+            workspace=self.workspace_root,
+            zuul_branch=None,
+            zuul_ref='master',
+            zuul_url=self.git_root,
+            zuul_project=project,
+            zuul_newrev=newRev
+        )
+        cloner.execute()
+        repos = self.getWorkspaceRepos([project, master_project])
+        cloned_sha = repos[project].rev_parse('HEAD').hexsha
+        self.assertEqual(newRev, cloned_sha)
+        self.assertEqual(
+            repos[master_project].rev_parse('HEAD').hexsha,
+            repos[master_project].rev_parse('master').hexsha)
diff --git a/tests/test_connection.py b/tests/test_connection.py
new file mode 100644
index 0000000..c3458ac
--- /dev/null
+++ b/tests/test_connection.py
@@ -0,0 +1,85 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import testtools
+
+import zuul.connection.gerrit
+
+from tests.base import ZuulTestCase
+
+
+class TestGerritConnection(testtools.TestCase):
+    log = logging.getLogger("zuul.test_connection")
+
+    def test_driver_name(self):
+        self.assertEqual('gerrit',
+                         zuul.connection.gerrit.GerritConnection.driver_name)
+
+
+class TestConnections(ZuulTestCase):
+    def setup_config(self, config_file='zuul-connections-same-gerrit.conf'):
+        super(TestConnections, self).setup_config(config_file)
+
+    def test_multiple_connections(self):
+        "Test multiple connections to the one gerrit"
+
+        A = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'A')
+        self.fake_review_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+
+        self.waitUntilSettled()
+
+        self.assertEqual(len(A.patchsets[-1]['approvals']), 1)
+        self.assertEqual(A.patchsets[-1]['approvals'][0]['type'], 'VRFY')
+        self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
+        self.assertEqual(A.patchsets[-1]['approvals'][0]['by']['username'],
+                         'jenkins')
+
+        B = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'B')
+        self.worker.addFailTest('project-test2', B)
+        self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+
+        self.waitUntilSettled()
+
+        self.assertEqual(len(B.patchsets[-1]['approvals']), 1)
+        self.assertEqual(B.patchsets[-1]['approvals'][0]['type'], 'VRFY')
+        self.assertEqual(B.patchsets[-1]['approvals'][0]['value'], '-1')
+        self.assertEqual(B.patchsets[-1]['approvals'][0]['by']['username'],
+                         'civoter')
+
+
+class TestMultipleGerrits(ZuulTestCase):
+    def setup_config(self,
+                     config_file='zuul-connections-multiple-gerrits.conf'):
+        super(TestMultipleGerrits, self).setup_config(config_file)
+        self.config.set(
+            'zuul', 'layout_config',
+            'layout-connections-multiple-gerrits.yaml')
+
+    def test_multiple_project_separate_gerrits(self):
+        self.worker.hold_jobs_in_build = True
+
+        A = self.fake_another_gerrit.addFakeChange(
+            'org/project', 'master', 'A')
+        self.fake_another_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+
+        self.waitUntilSettled()
+
+        self.assertEqual(1, len(self.builds))
+        self.assertEqual('project-another-gerrit', self.builds[0].name)
+        self.assertTrue(self.job_has_changes(self.builds[0], A))
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+        self.waitUntilSettled()
diff --git a/tests/test_gerrit.py b/tests/test_gerrit.py
new file mode 100644
index 0000000..93ce122
--- /dev/null
+++ b/tests/test_gerrit.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+# Copyright 2015 BMW Car IT GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
+from tests.base import BaseTestCase
+from zuul.connection.gerrit import GerritConnection
+
+FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures/gerrit')
+
+
+def read_fixture(file):
+    with open('%s/%s' % (FIXTURE_DIR, file), 'r') as fixturefile:
+        lines = fixturefile.readlines()
+        command = lines[0].replace('\n', '')
+        value = ''.join(lines[1:])
+        return command, value
+
+
+def read_fixtures(files):
+    calls = []
+    values = []
+    for fixture_file in files:
+        command, value = read_fixture(fixture_file)
+        calls.append(mock.call(command))
+        values.append([value, ''])
+    return calls, values
+
+
+class TestGerrit(BaseTestCase):
+
+    @mock.patch('zuul.connection.gerrit.GerritConnection._ssh')
+    def run_query(self, files, expected_patches, _ssh_mock):
+        gerrit_config = {
+            'user': 'gerrit',
+            'server': 'localhost',
+        }
+        gerrit = GerritConnection('review_gerrit', gerrit_config)
+
+        calls, values = read_fixtures(files)
+        _ssh_mock.side_effect = values
+
+        result = gerrit.simpleQuery('project:openstack-infra/zuul')
+
+        _ssh_mock.assert_has_calls(calls)
+        self.assertEquals(len(calls), _ssh_mock.call_count,
+                          '_ssh should be called %d times' % len(calls))
+        self.assertIsNotNone(result, 'Result is not none')
+        self.assertEquals(len(result), expected_patches,
+                          'There must be %d patches.' % expected_patches)
+
+    def test_simple_query_pagination_new(self):
+        files = ['simple_query_pagination_new_1',
+                 'simple_query_pagination_new_2']
+        expected_patches = 5
+        self.run_query(files, expected_patches)
+
+    def test_simple_query_pagination_old(self):
+        files = ['simple_query_pagination_old_1',
+                 'simple_query_pagination_old_2',
+                 'simple_query_pagination_old_3']
+        expected_patches = 5
+        self.run_query(files, expected_patches)
diff --git a/tests/test_layoutvalidator.py b/tests/test_layoutvalidator.py
index 5a8fc46..46a8c7c 100644
--- a/tests/test_layoutvalidator.py
+++ b/tests/test_layoutvalidator.py
@@ -14,6 +14,7 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+from six.moves import configparser as ConfigParser
 import os
 import re
 
@@ -22,6 +23,7 @@
 import yaml
 
 import zuul.layoutvalidator
+import zuul.lib.connections
 
 FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
                            'fixtures')
@@ -31,31 +33,43 @@
 class TestLayoutValidator(testtools.TestCase):
     def test_layouts(self):
         """Test layout file validation"""
-        print
+        print()
         errors = []
         for fn in os.listdir(os.path.join(FIXTURE_DIR, 'layouts')):
             m = LAYOUT_RE.match(fn)
             if not m:
                 continue
-            print fn
+            print(fn)
+
+            # Load any .conf file by the same name but .conf extension.
+            config_file = ("%s.conf" %
+                           os.path.join(FIXTURE_DIR, 'layouts',
+                                        fn.split('.yaml')[0]))
+            if not os.path.isfile(config_file):
+                config_file = os.path.join(FIXTURE_DIR, 'layouts',
+                                           'zuul_default.conf')
+            config = ConfigParser.ConfigParser()
+            config.read(config_file)
+            connections = zuul.lib.connections.configure_connections(config)
+
             layout = os.path.join(FIXTURE_DIR, 'layouts', fn)
             data = yaml.load(open(layout))
             validator = zuul.layoutvalidator.LayoutValidator()
             if m.group(1) == 'good':
                 try:
-                    validator.validate(data)
+                    validator.validate(data, connections)
                 except voluptuous.Invalid as e:
                     raise Exception(
                         'Unexpected YAML syntax error in %s:\n  %s' %
                         (fn, str(e)))
             else:
                 try:
-                    validator.validate(data)
+                    validator.validate(data, connections)
                     raise Exception("Expected a YAML syntax error in %s." %
                                     fn)
                 except voluptuous.Invalid as e:
                     error = str(e)
-                    print '  ', error
+                    print('  ', error)
                     if error in errors:
                         raise Exception("Error has already been tested: %s" %
                                         error)
diff --git a/tests/test_model.py b/tests/test_model.py
index 2711618..6ad0750 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -12,6 +12,11 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import os
+import random
+
+import fixtures
+
 from zuul import change_matcher as cm
 from zuul import model
 
@@ -31,12 +36,12 @@
 
     def test_change_matches_returns_false_for_matched_skip_if(self):
         change = model.Change('project')
-        change.files = ['docs/foo']
+        change.files = ['/COMMIT_MSG', 'docs/foo']
         self.assertFalse(self.job.changeMatches(change))
 
     def test_change_matches_returns_true_for_unmatched_skip_if(self):
         change = model.Change('project')
-        change.files = ['foo']
+        change.files = ['/COMMIT_MSG', 'foo']
         self.assertTrue(self.job.changeMatches(change))
 
     def test_copy_retains_skip_if(self):
@@ -62,3 +67,76 @@
         metajob = model.Job('^job')
         job.copy(metajob)
         self._assert_job_booleans_are_not_none(job)
+
+
+class TestJobTimeData(BaseTestCase):
+    def setUp(self):
+        super(TestJobTimeData, self).setUp()
+        self.tmp_root = self.useFixture(fixtures.TempDir(
+            rootdir=os.environ.get("ZUUL_TEST_ROOT"))
+        ).path
+
+    def test_empty_timedata(self):
+        path = os.path.join(self.tmp_root, 'job-name')
+        self.assertFalse(os.path.exists(path))
+        self.assertFalse(os.path.exists(path + '.tmp'))
+        td = model.JobTimeData(path)
+        self.assertEqual(td.success_times, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+        self.assertEqual(td.failure_times, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+        self.assertEqual(td.results, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+    def test_save_reload(self):
+        path = os.path.join(self.tmp_root, 'job-name')
+        self.assertFalse(os.path.exists(path))
+        self.assertFalse(os.path.exists(path + '.tmp'))
+        td = model.JobTimeData(path)
+        self.assertEqual(td.success_times, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+        self.assertEqual(td.failure_times, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+        self.assertEqual(td.results, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+        success_times = []
+        failure_times = []
+        results = []
+        for x in range(10):
+            success_times.append(int(random.random() * 1000))
+            failure_times.append(int(random.random() * 1000))
+            results.append(0)
+            results.append(1)
+        random.shuffle(results)
+        s = f = 0
+        for result in results:
+            if result:
+                td.add(failure_times[f], 'FAILURE')
+                f += 1
+            else:
+                td.add(success_times[s], 'SUCCESS')
+                s += 1
+        self.assertEqual(td.success_times, success_times)
+        self.assertEqual(td.failure_times, failure_times)
+        self.assertEqual(td.results, results[10:])
+        td.save()
+        self.assertTrue(os.path.exists(path))
+        self.assertFalse(os.path.exists(path + '.tmp'))
+        td = model.JobTimeData(path)
+        td.load()
+        self.assertEqual(td.success_times, success_times)
+        self.assertEqual(td.failure_times, failure_times)
+        self.assertEqual(td.results, results[10:])
+
+
+class TestTimeDataBase(BaseTestCase):
+    def setUp(self):
+        super(TestTimeDataBase, self).setUp()
+        self.tmp_root = self.useFixture(fixtures.TempDir(
+            rootdir=os.environ.get("ZUUL_TEST_ROOT"))
+        ).path
+        self.db = model.TimeDataBase(self.tmp_root)
+
+    def test_timedatabase(self):
+        self.assertEqual(self.db.getEstimatedTime('job-name'), 0)
+        self.db.update('job-name', 50, 'SUCCESS')
+        self.assertEqual(self.db.getEstimatedTime('job-name'), 50)
+        self.db.update('job-name', 100, 'SUCCESS')
+        self.assertEqual(self.db.getEstimatedTime('job-name'), 75)
+        for x in range(10):
+            self.db.update('job-name', 100, 'SUCCESS')
+        self.assertEqual(self.db.getEstimatedTime('job-name'), 100)
diff --git a/tests/test_reporter.py b/tests/test_reporter.py
new file mode 100644
index 0000000..8d3090a
--- /dev/null
+++ b/tests/test_reporter.py
@@ -0,0 +1,46 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import testtools
+
+import zuul.reporter
+
+
+class TestSMTPReporter(testtools.TestCase):
+    log = logging.getLogger("zuul.test_reporter")
+
+    def setUp(self):
+        super(TestSMTPReporter, self).setUp()
+
+    def test_reporter_abc(self):
+        # We only need to instantiate a class for this
+        reporter = zuul.reporter.smtp.SMTPReporter({})  # noqa
+
+    def test_reporter_name(self):
+        self.assertEqual('smtp', zuul.reporter.smtp.SMTPReporter.name)
+
+
+class TestGerritReporter(testtools.TestCase):
+    log = logging.getLogger("zuul.test_reporter")
+
+    def setUp(self):
+        super(TestGerritReporter, self).setUp()
+
+    def test_reporter_abc(self):
+        # We only need to instantiate a class for this
+        reporter = zuul.reporter.gerrit.GerritReporter(None)  # noqa
+
+    def test_reporter_name(self):
+        self.assertEqual('gerrit', zuul.reporter.gerrit.GerritReporter.name)
diff --git a/tests/test_requirements.py b/tests/test_requirements.py
index 4316925..3ae56ad 100644
--- a/tests/test_requirements.py
+++ b/tests/test_requirements.py
@@ -323,3 +323,105 @@
         self.fake_gerrit.addEvent(B.addApproval('CRVW', 2))
         self.waitUntilSettled()
         self.assertEqual(len(self.history), 1)
+
+    def _test_require_reject_username(self, project, job):
+        "Test negative username's match"
+        # Should only trigger if Jenkins hasn't voted.
+        self.config.set(
+            'zuul', 'layout_config',
+            'tests/fixtures/layout-requirement-reject-username.yaml')
+        self.sched.reconfigure(self.config)
+        self.registerJobs()
+
+        # add in a change with no comments
+        A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 0)
+
+        # add in a comment that will trigger
+        self.fake_gerrit.addEvent(A.addApproval('CRVW', 1,
+                                                username='reviewer'))
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+        self.assertEqual(self.history[0].name, job)
+
+        # add in a comment from jenkins user which shouldn't trigger
+        self.fake_gerrit.addEvent(A.addApproval('VRFY', 1, username='jenkins'))
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+
+        # Check future reviews also won't trigger as a 'jenkins' user has
+        # commented previously
+        self.fake_gerrit.addEvent(A.addApproval('CRVW', 1,
+                                                username='reviewer'))
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+
+    def test_pipeline_reject_username(self):
+        "Test negative pipeline requirement: no comment from jenkins"
+        return self._test_require_reject_username('org/project1',
+                                                  'project1-pipeline')
+
+    def test_trigger_reject_username(self):
+        "Test negative trigger requirement: no comment from jenkins"
+        return self._test_require_reject_username('org/project2',
+                                                  'project2-trigger')
+
+    def _test_require_reject(self, project, job):
+        "Test no approval matches a reject param"
+        self.config.set(
+            'zuul', 'layout_config',
+            'tests/fixtures/layout-requirement-reject.yaml')
+        self.sched.reconfigure(self.config)
+        self.registerJobs()
+
+        A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 0)
+
+        # First positive vote should not queue until jenkins has +1'd
+        comment = A.addApproval('VRFY', 1, username='reviewer_a')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 0)
+
+        # Jenkins should put in a +1 which will also queue
+        comment = A.addApproval('VRFY', 1, username='jenkins')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+        self.assertEqual(self.history[0].name, job)
+
+        # Negative vote should not queue
+        comment = A.addApproval('VRFY', -1, username='reviewer_b')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+
+        # Future approvals should do nothing
+        comment = A.addApproval('VRFY', 1, username='reviewer_c')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 1)
+
+        # Change/update negative vote should queue
+        comment = A.addApproval('VRFY', 1, username='reviewer_b')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 2)
+        self.assertEqual(self.history[1].name, job)
+
+        # Future approvals should also queue
+        comment = A.addApproval('VRFY', 1, username='reviewer_d')
+        self.fake_gerrit.addEvent(comment)
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 3)
+        self.assertEqual(self.history[2].name, job)
+
+    def test_pipeline_require_reject(self):
+        "Test pipeline requirement: rejections absent"
+        return self._test_require_reject('org/project1', 'project1-pipeline')
+
+    def test_trigger_require_reject(self):
+        "Test trigger requirement: rejections absent"
+        return self._test_require_reject('org/project2', 'project2-trigger')
diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py
index 61a2d09..628775d 100755
--- a/tests/test_scheduler.py
+++ b/tests/test_scheduler.py
@@ -20,11 +20,10 @@
 import re
 import shutil
 import time
-import urllib
-import urllib2
 import yaml
 
 import git
+from six.moves import urllib
 import testtools
 
 import zuul.change_matcher
@@ -34,7 +33,6 @@
 import zuul.reporter.smtp
 
 from tests.base import (
-    BaseTestCase,
     ZuulTestCase,
     repack_repo,
 )
@@ -44,40 +42,6 @@
                     '%(levelname)-8s %(message)s')
 
 
-class TestSchedulerConfigParsing(BaseTestCase):
-
-    def test_parse_skip_if(self):
-        job_yaml = """
-jobs:
-  - name: job_name
-    skip-if:
-      - project: ^project_name$
-        branch: ^stable/icehouse$
-        all-files-match-any:
-          - ^filename$
-      - project: ^project2_name$
-        all-files-match-any:
-          - ^filename2$
-    """.strip()
-        data = yaml.load(job_yaml)
-        config_job = data.get('jobs')[0]
-        sched = zuul.scheduler.Scheduler()
-        cm = zuul.change_matcher
-        expected = cm.MatchAny([
-            cm.MatchAll([
-                cm.ProjectMatcher('^project_name$'),
-                cm.BranchMatcher('^stable/icehouse$'),
-                cm.MatchAllFiles([cm.FileMatcher('^filename$')]),
-            ]),
-            cm.MatchAll([
-                cm.ProjectMatcher('^project2_name$'),
-                cm.MatchAllFiles([cm.FileMatcher('^filename2$')]),
-            ]),
-        ])
-        matcher = sched._parseSkipIf(config_job)
-        self.assertEqual(expected, matcher)
-
-
 class TestScheduler(ZuulTestCase):
 
     def test_jobs_launched(self):
@@ -111,6 +75,9 @@
         self.assertReportedStat(
             'zuul.pipeline.gate.org.project.total_changes', value='1|c')
 
+        for build in self.builds:
+            self.assertEqual(build.parameters['ZUUL_VOTING'], '1')
+
     def test_initial_pipeline_gauges(self):
         "Test that each pipeline reported its length on start"
         pipeline_names = self.sched.layout.pipelines.keys()
@@ -492,6 +459,46 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    def _test_time_database(self, iteration):
+        self.worker.hold_jobs_in_build = True
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.addApproval('CRVW', 2)
+        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.waitUntilSettled()
+        time.sleep(2)
+
+        data = json.loads(self.sched.formatStatusJSON())
+        found_job = None
+        for pipeline in data['pipelines']:
+            if pipeline['name'] != 'gate':
+                continue
+            for queue in pipeline['change_queues']:
+                for head in queue['heads']:
+                    for item in head:
+                        for job in item['jobs']:
+                            if job['name'] == 'project-merge':
+                                found_job = job
+                                break
+
+        self.assertIsNotNone(found_job)
+        if iteration == 1:
+            self.assertIsNotNone(found_job['estimated_time'])
+            self.assertIsNone(found_job['remaining_time'])
+        else:
+            self.assertIsNotNone(found_job['estimated_time'])
+            self.assertTrue(found_job['estimated_time'] >= 2)
+            self.assertIsNotNone(found_job['remaining_time'])
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+        self.waitUntilSettled()
+
+    def test_time_database(self):
+        "Test the time database"
+
+        self._test_time_database(1)
+        self._test_time_database(2)
+
     def test_two_failed_changes_at_head(self):
         "Test that changes are reparented correctly if 2 fail at head"
 
@@ -597,6 +604,36 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    def test_parse_skip_if(self):
+        job_yaml = """
+jobs:
+  - name: job_name
+    skip-if:
+      - project: ^project_name$
+        branch: ^stable/icehouse$
+        all-files-match-any:
+          - ^filename$
+      - project: ^project2_name$
+        all-files-match-any:
+          - ^filename2$
+    """.strip()
+        data = yaml.load(job_yaml)
+        config_job = data.get('jobs')[0]
+        cm = zuul.change_matcher
+        expected = cm.MatchAny([
+            cm.MatchAll([
+                cm.ProjectMatcher('^project_name$'),
+                cm.BranchMatcher('^stable/icehouse$'),
+                cm.MatchAllFiles([cm.FileMatcher('^filename$')]),
+            ]),
+            cm.MatchAll([
+                cm.ProjectMatcher('^project2_name$'),
+                cm.MatchAllFiles([cm.FileMatcher('^filename2$')]),
+            ]),
+        ])
+        matcher = self.sched._parseSkipIf(config_job)
+        self.assertEqual(expected, matcher)
+
     def test_patch_order(self):
         "Test that dependent patches are tested in the right order"
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -690,8 +727,8 @@
         # triggering events.  Since it will have the changes cached
         # already (without approvals), we need to clear the cache
         # first.
-        source = self.sched.layout.pipelines['gate'].source
-        source.maintainCache([])
+        for connection in self.connections.values():
+            connection.maintainCache([])
 
         self.worker.hold_jobs_in_build = True
         A.addApproval('APRV', 1)
@@ -726,8 +763,8 @@
         self.assertEqual(self.history[6].changes,
                          '1,1 2,1 3,1 4,1 5,1 6,1 7,1')
 
-    def test_trigger_cache(self):
-        "Test that the trigger cache operates correctly"
+    def test_source_cache(self):
+        "Test that the source cache operates correctly"
         self.worker.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -759,9 +796,9 @@
         self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
         self.waitUntilSettled()
 
-        self.log.debug("len %s" % self.gerrit._change_cache.keys())
+        self.log.debug("len %s" % self.fake_gerrit._change_cache.keys())
         # there should still be changes in the cache
-        self.assertNotEqual(len(self.gerrit._change_cache.keys()), 0)
+        self.assertNotEqual(len(self.fake_gerrit._change_cache.keys()), 0)
 
         self.worker.hold_jobs_in_build = False
         self.worker.release()
@@ -788,7 +825,6 @@
         A.addApproval('APRV', 1)
         a = source._getChange(1, 2, refresh=True)
         self.assertTrue(source.canMerge(a, mgr.getSubmitAllowNeeds()))
-        source.maintainCache([])
 
     def test_build_configuration(self):
         "Test that zuul merges the right commits for testing"
@@ -891,6 +927,54 @@
         self.assertEqual(len(self.history), 1)
         self.assertIn('project-post', job_names)
 
+    def test_post_ignore_deletes(self):
+        "Test that deleting refs does not trigger post jobs"
+
+        e = {
+            "type": "ref-updated",
+            "submitter": {
+                "name": "User Name",
+            },
+            "refUpdate": {
+                "oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
+                "newRev": "0000000000000000000000000000000000000000",
+                "refName": "master",
+                "project": "org/project",
+            }
+        }
+        self.fake_gerrit.addEvent(e)
+        self.waitUntilSettled()
+
+        job_names = [x.name for x in self.history]
+        self.assertEqual(len(self.history), 0)
+        self.assertNotIn('project-post', job_names)
+
+    def test_post_ignore_deletes_negative(self):
+        "Test that deleting refs does trigger post jobs"
+
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-dont-ignore-deletes.yaml')
+        self.sched.reconfigure(self.config)
+
+        e = {
+            "type": "ref-updated",
+            "submitter": {
+                "name": "User Name",
+            },
+            "refUpdate": {
+                "oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
+                "newRev": "0000000000000000000000000000000000000000",
+                "refName": "master",
+                "project": "org/project",
+            }
+        }
+        self.fake_gerrit.addEvent(e)
+        self.waitUntilSettled()
+
+        job_names = [x.name for x in self.history]
+        self.assertEqual(len(self.history), 1)
+        self.assertIn('project-post', job_names)
+
     def test_build_configuration_branch(self):
         "Test that the right commits are on alternate branches"
 
@@ -1256,6 +1340,9 @@
             self.getJobFromHistory('nonvoting-project-test2').result,
             'FAILURE')
 
+        for build in self.builds:
+            self.assertEqual(build.parameters['ZUUL_VOTING'], '0')
+
     def test_check_queue_success(self):
         "Test successful check queue jobs."
 
@@ -1396,7 +1483,7 @@
         self.worker.build_history = []
 
         path = os.path.join(self.git_root, "org/project")
-        print repack_repo(path)
+        print(repack_repo(path))
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         A.addApproval('CRVW', 2)
@@ -1415,15 +1502,15 @@
         "Test that the merger works with large changes after a repack"
         # https://bugs.launchpad.net/zuul/+bug/1078946
         # This test assumes the repo is already cloned; make sure it is
-        url = self.sched.triggers['gerrit'].getGitUrl(
+        url = self.fake_gerrit.getGitUrl(
             self.sched.layout.projects['org/project1'])
         self.merge_server.merger.addProject('org/project1', url)
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         A.addPatchset(large=True)
         path = os.path.join(self.upstream_root, "org/project1")
-        print repack_repo(path)
+        print(repack_repo(path))
         path = os.path.join(self.git_root, "org/project1")
-        print repack_repo(path)
+        print(repack_repo(path))
 
         A.addApproval('CRVW', 2)
         self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
@@ -2110,11 +2197,8 @@
 
     def test_test_config(self):
         "Test that we can test the config"
-        sched = zuul.scheduler.Scheduler()
-        sched.registerTrigger(None, 'gerrit')
-        sched.registerTrigger(None, 'timer')
-        sched.registerTrigger(None, 'zuul')
-        sched.testConfig(self.config.get('zuul', 'layout_config'))
+        self.sched.testConfig(self.config.get('zuul', 'layout_config'),
+                              self.connections)
 
     def test_build_description(self):
         "Test that build descriptions update"
@@ -2185,15 +2269,18 @@
         self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
         self.waitUntilSettled()
 
+        self.worker.release('project-merge')
+        self.waitUntilSettled()
+
         port = self.webapp.server.socket.getsockname()[1]
 
-        req = urllib2.Request("http://localhost:%s/status.json" % port)
-        f = urllib2.urlopen(req)
+        req = urllib.request.Request("http://localhost:%s/status.json" % port)
+        f = urllib.request.urlopen(req)
         headers = f.info()
         self.assertIn('Content-Length', headers)
         self.assertIn('Content-Type', headers)
-        self.assertEqual(headers['Content-Type'],
-                         'application/json; charset=UTF-8')
+        self.assertIsNotNone(re.match('^application/json(; charset=UTF-8)?$',
+                                      headers['Content-Type']))
         self.assertIn('Access-Control-Allow-Origin', headers)
         self.assertIn('Cache-Control', headers)
         self.assertIn('Last-Modified', headers)
@@ -2205,7 +2292,7 @@
         self.waitUntilSettled()
 
         data = json.loads(data)
-        status_jobs = set()
+        status_jobs = []
         for p in data['pipelines']:
             for q in p['change_queues']:
                 if p['name'] in ['gate', 'conflict']:
@@ -2217,10 +2304,24 @@
                         self.assertTrue(change['active'])
                         self.assertEqual(change['id'], '1,1')
                         for job in change['jobs']:
-                            status_jobs.add(job['name'])
-        self.assertIn('project-merge', status_jobs)
-        self.assertIn('project-test1', status_jobs)
-        self.assertIn('project-test2', status_jobs)
+                            status_jobs.append(job)
+        self.assertEqual('project-merge', status_jobs[0]['name'])
+        self.assertEqual('https://server/job/project-merge/0/',
+                         status_jobs[0]['url'])
+        self.assertEqual('http://logs.example.com/1/1/gate/project-merge/0',
+                         status_jobs[0]['report_url'])
+
+        self.assertEqual('project-test1', status_jobs[1]['name'])
+        self.assertEqual('https://server/job/project-test1/1/',
+                         status_jobs[1]['url'])
+        self.assertEqual('http://logs.example.com/1/1/gate/project-test1/1',
+                         status_jobs[1]['report_url'])
+
+        self.assertEqual('project-test2', status_jobs[2]['name'])
+        self.assertEqual('https://server/job/project-test2/2/',
+                         status_jobs[2]['url'])
+        self.assertEqual('http://logs.example.com/1/1/gate/project-test2/2',
+                         status_jobs[2]['report_url'])
 
     def test_merging_queues(self):
         "Test that transitively-connected change queues are merged"
@@ -2229,6 +2330,70 @@
         self.sched.reconfigure(self.config)
         self.assertEqual(len(self.sched.layout.pipelines['gate'].queues), 1)
 
+    def test_mutex(self):
+        "Test job mutexes"
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-mutex.yaml')
+        self.sched.reconfigure(self.config)
+
+        self.worker.hold_jobs_in_build = True
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'mutex-one')
+        self.assertEqual(self.builds[2].name, 'project-test1')
+
+        self.worker.release('mutex-one')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertEqual(self.builds[2].name, 'mutex-two')
+        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+
+        self.worker.release('mutex-two')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertEqual(self.builds[2].name, 'mutex-one')
+        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+
+        self.worker.release('mutex-one')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 3)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertEqual(self.builds[2].name, 'mutex-two')
+        self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
+
+        self.worker.release('mutex-two')
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.builds), 2)
+        self.assertEqual(self.builds[0].name, 'project-test1')
+        self.assertEqual(self.builds[1].name, 'project-test1')
+        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+
+        self.waitUntilSettled()
+        self.assertEqual(len(self.builds), 0)
+
+        self.assertEqual(A.reported, 1)
+        self.assertEqual(B.reported, 1)
+        self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
+
     def test_node_label(self):
         "Test that a job runs on a specific node label"
         self.worker.registerFunction('build:node-project-test1:debian')
@@ -2494,6 +2659,104 @@
         # Ensure the removed job was not included in the report.
         self.assertNotIn('project1-project2-integration', A.messages[0])
 
+    def test_double_live_reconfiguration_shared_queue(self):
+        # This was a real-world regression.  A change is added to
+        # gate; a reconfigure happens, a second change which depends
+        # on the first is added, and a second reconfiguration happens.
+        # Ensure that both changes merge.
+
+        # A failure may indicate incorrect caching or cleaning up of
+        # references during a reconfiguration.
+        self.worker.hold_jobs_in_build = True
+
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+        B.setDependsOn(A, 1)
+        A.addApproval('CRVW', 2)
+        B.addApproval('CRVW', 2)
+
+        # Add the parent change.
+        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.waitUntilSettled()
+        self.worker.release('.*-merge')
+        self.waitUntilSettled()
+
+        # Reconfigure (with only one change in the pipeline).
+        self.sched.reconfigure(self.config)
+        self.waitUntilSettled()
+
+        # Add the child change.
+        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.waitUntilSettled()
+        self.worker.release('.*-merge')
+        self.waitUntilSettled()
+
+        # Reconfigure (with both in the pipeline).
+        self.sched.reconfigure(self.config)
+        self.waitUntilSettled()
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.history), 8)
+
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2)
+        self.assertEqual(B.data['status'], 'MERGED')
+        self.assertEqual(B.reported, 2)
+
+    def test_live_reconfiguration_del_project(self):
+        # Test project deletion from layout
+        # while changes are enqueued
+
+        self.worker.hold_jobs_in_build = True
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+        C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
+
+        # A Depends-On: B
+        A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
+            A.subject, B.data['id'])
+        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.worker.release('.*-merge')
+        self.waitUntilSettled()
+        self.assertEqual(len(self.builds), 5)
+
+        # This layout defines only org/project, not org/project1
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-live-'
+                        'reconfiguration-del-project.yaml')
+        self.sched.reconfigure(self.config)
+        self.waitUntilSettled()
+
+        # Builds for C aborted, builds for A succeed,
+        # and have change B applied ahead
+        job_c = self.getJobFromHistory('project1-test1')
+        self.assertEqual(job_c.changes, '3,1')
+        self.assertEqual(job_c.result, 'ABORTED')
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+        self.waitUntilSettled()
+
+        self.assertEqual(self.getJobFromHistory('project-test1').changes,
+                         '2,1 1,1')
+
+        self.assertEqual(A.data['status'], 'NEW')
+        self.assertEqual(B.data['status'], 'NEW')
+        self.assertEqual(C.data['status'], 'NEW')
+        self.assertEqual(A.reported, 1)
+        self.assertEqual(B.reported, 0)
+        self.assertEqual(C.reported, 0)
+
+        self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
+        self.assertIn('Build succeeded', A.messages[0])
+
     def test_live_reconfiguration_functions(self):
         "Test live reconfiguration with a custom function"
         self.worker.registerFunction('build:node-project-test1:debian')
@@ -2581,6 +2844,25 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 2)
 
+    def test_tags(self):
+        "Test job tags"
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-tags.yaml')
+        self.sched.reconfigure(self.config)
+
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        results = {'project1-merge': 'extratag merge project1',
+                   'project2-merge': 'merge'}
+
+        for build in self.history:
+            self.assertEqual(results.get(build.name, ''),
+                             build.parameters.get('BUILD_TAGS'))
+
     def test_timer(self):
         "Test that a periodic job is triggered"
         self.worker.hold_jobs_in_build = True
@@ -2598,7 +2880,8 @@
 
         port = self.webapp.server.socket.getsockname()[1]
 
-        f = urllib.urlopen("http://localhost:%s/status.json" % port)
+        req = urllib.request.Request("http://localhost:%s/status.json" % port)
+        f = urllib.request.urlopen(req)
         data = f.read()
 
         self.worker.hold_jobs_in_build = False
@@ -2640,11 +2923,11 @@
                             'tests/fixtures/layout-idle.yaml')
             self.sched.reconfigure(self.config)
             self.registerJobs()
+            self.waitUntilSettled()
 
             # The pipeline triggers every second, so we should have seen
             # several by now.
             time.sleep(5)
-            self.waitUntilSettled()
 
             # Stop queuing timer triggered jobs so that the assertions
             # below don't race against more jobs being queued.
@@ -2652,6 +2935,7 @@
                             'tests/fixtures/layout-no-timer.yaml')
             self.sched.reconfigure(self.config)
             self.registerJobs()
+            self.waitUntilSettled()
 
             self.assertEqual(len(self.builds), 2)
             self.worker.release('.*')
@@ -3254,24 +3538,24 @@
             len(self.sched.layout.pipelines['gate'].merge_failure_actions), 2)
 
         self.assertTrue(isinstance(
-            self.sched.layout.pipelines['check'].merge_failure_actions[0].
-            reporter, zuul.reporter.gerrit.Reporter))
+            self.sched.layout.pipelines['check'].merge_failure_actions[0],
+            zuul.reporter.gerrit.GerritReporter))
 
         self.assertTrue(
             (
                 isinstance(self.sched.layout.pipelines['gate'].
-                           merge_failure_actions[0].reporter,
-                           zuul.reporter.smtp.Reporter) and
+                           merge_failure_actions[0],
+                           zuul.reporter.smtp.SMTPReporter) and
                 isinstance(self.sched.layout.pipelines['gate'].
-                           merge_failure_actions[1].reporter,
-                           zuul.reporter.gerrit.Reporter)
+                           merge_failure_actions[1],
+                           zuul.reporter.gerrit.GerritReporter)
             ) or (
                 isinstance(self.sched.layout.pipelines['gate'].
-                           merge_failure_actions[0].reporter,
-                           zuul.reporter.gerrit.Reporter) and
+                           merge_failure_actions[0],
+                           zuul.reporter.gerrit.GerritReporter) and
                 isinstance(self.sched.layout.pipelines['gate'].
-                           merge_failure_actions[1].reporter,
-                           zuul.reporter.smtp.Reporter)
+                           merge_failure_actions[1],
+                           zuul.reporter.smtp.SMTPReporter)
             )
         )
 
@@ -3310,6 +3594,31 @@
         self.assertEqual('The merge failed! For more information...',
                          self.smtp_messages[0]['body'])
 
+    def test_default_merge_failure_reports(self):
+        """Check that the default merge failure reports are correct."""
+
+        # A should report success, B should report merge failure.
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.addPatchset(['conflict'])
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        B.addPatchset(['conflict'])
+        A.addApproval('CRVW', 2)
+        B.addApproval('CRVW', 2)
+        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.waitUntilSettled()
+
+        self.assertEqual(3, len(self.history))  # A jobs
+        self.assertEqual(A.reported, 2)
+        self.assertEqual(B.reported, 2)
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(B.data['status'], 'NEW')
+        self.assertIn('Build succeeded', A.messages[1])
+        self.assertIn('Merge Failed', B.messages[1])
+        self.assertIn('automatically merged', B.messages[1])
+        self.assertNotIn('logs.example.com', B.messages[1])
+        self.assertNotIn('SKIPPED', B.messages[1])
+
     def test_swift_instructions(self):
         "Test that the correct swift instructions are sent to the workers"
         self.config.set('zuul', 'layout_config',
@@ -3464,8 +3773,8 @@
         self.assertEqual(A.data['status'], 'NEW')
         self.assertEqual(B.data['status'], 'NEW')
 
-        source = self.sched.layout.pipelines['gate'].source
-        source.maintainCache([])
+        for connection in self.connections.values():
+            connection.maintainCache([])
 
         self.worker.hold_jobs_in_build = True
         B.addApproval('APRV', 1)
@@ -3668,6 +3977,48 @@
         self.assertEqual(A.data['status'], 'NEW')
         self.assertEqual(B.data['status'], 'NEW')
 
+    def test_crd_gate_unknown(self):
+        "Test unknown projects in dependent pipeline"
+        self.init_repo("org/unknown")
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'B')
+        A.addApproval('CRVW', 2)
+        B.addApproval('CRVW', 2)
+
+        # A Depends-On: B
+        A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
+            A.subject, B.data['id'])
+
+        B.addApproval('APRV', 1)
+        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.waitUntilSettled()
+
+        # Unknown projects cannot share a queue with any other
+        # since they don't have common jobs with any other (they have no jobs).
+        # Changes which depend on unknown project changes
+        # should not be processed in dependent pipeline
+        self.assertEqual(A.data['status'], 'NEW')
+        self.assertEqual(B.data['status'], 'NEW')
+        self.assertEqual(A.reported, 0)
+        self.assertEqual(B.reported, 0)
+        self.assertEqual(len(self.history), 0)
+
+        # Simulate change B being gated outside this layout
+        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        B.setMerged()
+        self.waitUntilSettled()
+        self.assertEqual(len(self.history), 0)
+
+        # Now that B is merged, A should be able to be enqueued and
+        # merged.
+        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.waitUntilSettled()
+
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2)
+        self.assertEqual(B.data['status'], 'MERGED')
+        self.assertEqual(B.reported, 0)
+
     def test_crd_check(self):
         "Test cross-repo dependencies in independent pipelines"
 
@@ -3782,12 +4133,12 @@
         self.assertIn('Build succeeded', A.messages[0])
         self.assertIn('Build succeeded', B.messages[0])
 
-    def test_crd_check_reconfiguration(self):
+    def _test_crd_check_reconfiguration(self, project1, project2):
         "Test cross-repo dependencies re-enqueued in independent pipelines"
 
         self.gearman_server.hold_jobs_in_queue = True
-        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
-        B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+        A = self.fake_gerrit.addFakeChange(project1, 'master', 'A')
+        B = self.fake_gerrit.addFakeChange(project2, 'master', 'B')
 
         # A Depends-On: B
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
@@ -3820,6 +4171,17 @@
         self.assertEqual(self.history[0].changes, '2,1 1,1')
         self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
 
+    def test_crd_check_reconfiguration(self):
+        self._test_crd_check_reconfiguration('org/project1', 'org/project2')
+
+    def test_crd_undefined_project(self):
+        """Test that undefined projects in dependencies are handled for
+        independent pipelines"""
+        # It's a hack for fake gerrit,
+        # as it implies repo creation upon the creation of any change
+        self.init_repo("org/unknown")
+        self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
+
     def test_crd_check_ignore_dependencies(self):
         "Test cross-repo dependencies can be ignored"
         self.config.set('zuul', 'layout_config',
@@ -3904,3 +4266,199 @@
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
         self.waitUntilSettled()
         self.assertEqual(self.history[-1].changes, '3,2 2,1 1,2')
+
+    def test_crd_cycle_join(self):
+        "Test an updated change creates a cycle"
+        A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
+
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # Create B->A
+        B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+        B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
+            B.subject, A.data['id'])
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # Update A to add A->B (a cycle).
+        A.addPatchset()
+        A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
+            A.subject, B.data['id'])
+        # Normally we would submit the patchset-created event for
+        # processing here, however, we have no way of noting whether
+        # the dependency cycle detection correctly raised an
+        # exception, so instead, we reach into the source driver and
+        # call the method that would ultimately be called by the event
+        # processing.
+
+        source = self.sched.layout.pipelines['gate'].source
+        with testtools.ExpectedException(
+            Exception, "Dependency cycle detected"):
+            source._getChange(u'1', u'2', True)
+        self.log.debug("Got expected dependency cycle exception")
+
+        # Now if we update B to remove the depends-on, everything
+        # should be okay.  B; A->B
+
+        B.addPatchset()
+        B.data['commitMessage'] = '%s\n' % (B.subject,)
+        source._getChange(u'1', u'2', True)
+        source._getChange(u'2', u'2', True)
+
+    def test_disable_at(self):
+        "Test a pipeline will only report to the disabled trigger when failing"
+
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-disable-at.yaml')
+        self.sched.reconfigure(self.config)
+
+        self.assertEqual(3, self.sched.layout.pipelines['check'].disable_at)
+        self.assertEqual(
+            0, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertFalse(self.sched.layout.pipelines['check']._disabled)
+
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
+        D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
+        E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
+        F = self.fake_gerrit.addFakeChange('org/project', 'master', 'F')
+        G = self.fake_gerrit.addFakeChange('org/project', 'master', 'G')
+        H = self.fake_gerrit.addFakeChange('org/project', 'master', 'H')
+        I = self.fake_gerrit.addFakeChange('org/project', 'master', 'I')
+        J = self.fake_gerrit.addFakeChange('org/project', 'master', 'J')
+        K = self.fake_gerrit.addFakeChange('org/project', 'master', 'K')
+
+        self.worker.addFailTest('project-test1', A)
+        self.worker.addFailTest('project-test1', B)
+        # Let C pass, resetting the counter
+        self.worker.addFailTest('project-test1', D)
+        self.worker.addFailTest('project-test1', E)
+        self.worker.addFailTest('project-test1', F)
+        self.worker.addFailTest('project-test1', G)
+        self.worker.addFailTest('project-test1', H)
+        # I also passes but should only report to the disabled reporters
+        self.worker.addFailTest('project-test1', J)
+        self.worker.addFailTest('project-test1', K)
+
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        self.assertEqual(
+            2, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertFalse(self.sched.layout.pipelines['check']._disabled)
+
+        self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        self.assertEqual(
+            0, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertFalse(self.sched.layout.pipelines['check']._disabled)
+
+        self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(F.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # We should be disabled now
+        self.assertEqual(
+            3, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertTrue(self.sched.layout.pipelines['check']._disabled)
+
+        # We need to wait between each of these patches to make sure the
+        # smtp messages come back in an expected order
+        self.fake_gerrit.addEvent(G.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.fake_gerrit.addEvent(H.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.fake_gerrit.addEvent(I.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # The first 6 (ABCDEF) jobs should have reported back to gerrt thus
+        # leaving a message on each change
+        self.assertEqual(1, len(A.messages))
+        self.assertIn('Build failed.', A.messages[0])
+        self.assertEqual(1, len(B.messages))
+        self.assertIn('Build failed.', B.messages[0])
+        self.assertEqual(1, len(C.messages))
+        self.assertIn('Build succeeded.', C.messages[0])
+        self.assertEqual(1, len(D.messages))
+        self.assertIn('Build failed.', D.messages[0])
+        self.assertEqual(1, len(E.messages))
+        self.assertIn('Build failed.', E.messages[0])
+        self.assertEqual(1, len(F.messages))
+        self.assertIn('Build failed.', F.messages[0])
+
+        # The last 3 (GHI) would have only reported via smtp.
+        self.assertEqual(3, len(self.smtp_messages))
+        self.assertEqual(0, len(G.messages))
+        self.assertIn('Build failed.', self.smtp_messages[0]['body'])
+        self.assertIn('/7/1/check', self.smtp_messages[0]['body'])
+        self.assertEqual(0, len(H.messages))
+        self.assertIn('Build failed.', self.smtp_messages[1]['body'])
+        self.assertIn('/8/1/check', self.smtp_messages[1]['body'])
+        self.assertEqual(0, len(I.messages))
+        self.assertIn('Build succeeded.', self.smtp_messages[2]['body'])
+        self.assertIn('/9/1/check', self.smtp_messages[2]['body'])
+
+        # Now reload the configuration (simulate a HUP) to check the pipeline
+        # comes out of disabled
+        self.sched.reconfigure(self.config)
+
+        self.assertEqual(3, self.sched.layout.pipelines['check'].disable_at)
+        self.assertEqual(
+            0, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertFalse(self.sched.layout.pipelines['check']._disabled)
+
+        self.fake_gerrit.addEvent(J.getPatchsetCreatedEvent(1))
+        self.fake_gerrit.addEvent(K.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        self.assertEqual(
+            2, self.sched.layout.pipelines['check']._consecutive_failures)
+        self.assertFalse(self.sched.layout.pipelines['check']._disabled)
+
+        # J and K went back to gerrit
+        self.assertEqual(1, len(J.messages))
+        self.assertIn('Build failed.', J.messages[0])
+        self.assertEqual(1, len(K.messages))
+        self.assertIn('Build failed.', K.messages[0])
+        # No more messages reported via smtp
+        self.assertEqual(3, len(self.smtp_messages))
+
+    def test_success_pattern(self):
+        "Ensure bad build params are ignored"
+
+        # Use SMTP reporter to grab the result message easier
+        self.init_repo("org/docs")
+        self.config.set('zuul', 'layout_config',
+                        'tests/fixtures/layout-success-pattern.yaml')
+        self.sched.reconfigure(self.config)
+        self.worker.hold_jobs_in_build = True
+        self.registerJobs()
+
+        A = self.fake_gerrit.addFakeChange('org/docs', 'master', 'A')
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+
+        # Grab build id
+        self.assertEqual(len(self.builds), 1)
+        uuid = self.builds[0].unique[:7]
+
+        self.worker.hold_jobs_in_build = False
+        self.worker.release()
+        self.waitUntilSettled()
+
+        self.assertEqual(len(self.smtp_messages), 1)
+        body = self.smtp_messages[0]['body'].splitlines()
+        self.assertEqual('Build succeeded.', body[0])
+
+        self.assertIn(
+            '- docs-draft-test http://docs-draft.example.org/1/1/1/check/'
+            'docs-draft-test/%s/publish-docs/' % uuid,
+            body[2])
+        self.assertIn(
+            '- docs-draft-test2 https://server/job/docs-draft-test2/1/',
+            body[3])
diff --git a/tests/test_source.py b/tests/test_source.py
new file mode 100644
index 0000000..8a3e7d5
--- /dev/null
+++ b/tests/test_source.py
@@ -0,0 +1,25 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import testtools
+
+import zuul.source
+
+
+class TestGerritSource(testtools.TestCase):
+    log = logging.getLogger("zuul.test_source")
+
+    def test_source_name(self):
+        self.assertEqual('gerrit', zuul.source.gerrit.GerritSource.name)
diff --git a/tests/test_trigger.py b/tests/test_trigger.py
new file mode 100644
index 0000000..7eb1b69
--- /dev/null
+++ b/tests/test_trigger.py
@@ -0,0 +1,51 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import testtools
+
+import zuul.trigger
+
+
+class TestGerritTrigger(testtools.TestCase):
+    log = logging.getLogger("zuul.test_trigger")
+
+    def test_trigger_abc(self):
+        # We only need to instantiate a class for this
+        zuul.trigger.gerrit.GerritTrigger({})
+
+    def test_trigger_name(self):
+        self.assertEqual('gerrit', zuul.trigger.gerrit.GerritTrigger.name)
+
+
+class TestTimerTrigger(testtools.TestCase):
+    log = logging.getLogger("zuul.test_trigger")
+
+    def test_trigger_abc(self):
+        # We only need to instantiate a class for this
+        zuul.trigger.timer.TimerTrigger({})
+
+    def test_trigger_name(self):
+        self.assertEqual('timer', zuul.trigger.timer.TimerTrigger.name)
+
+
+class TestZuulTrigger(testtools.TestCase):
+    log = logging.getLogger("zuul.test_trigger")
+
+    def test_trigger_abc(self):
+        # We only need to instantiate a class for this
+        zuul.trigger.zuultrigger.ZuulTrigger({})
+
+    def test_trigger_name(self):
+        self.assertEqual('zuul', zuul.trigger.zuultrigger.ZuulTrigger.name)
diff --git a/tests/test_webapp.py b/tests/test_webapp.py
index b127c51..94f097a 100644
--- a/tests/test_webapp.py
+++ b/tests/test_webapp.py
@@ -16,7 +16,8 @@
 # under the License.
 
 import json
-import urllib2
+
+from six.moves import urllib
 
 from tests.base import ZuulTestCase
 
@@ -44,41 +45,41 @@
     def test_webapp_status(self):
         "Test that we can filter to only certain changes in the webapp."
 
-        req = urllib2.Request(
+        req = urllib.request.Request(
             "http://localhost:%s/status" % self.port)
-        f = urllib2.urlopen(req)
+        f = urllib.request.urlopen(req)
         data = json.loads(f.read())
 
         self.assertIn('pipelines', data)
 
     def test_webapp_status_compat(self):
         # testing compat with status.json
-        req = urllib2.Request(
+        req = urllib.request.Request(
             "http://localhost:%s/status.json" % self.port)
-        f = urllib2.urlopen(req)
+        f = urllib.request.urlopen(req)
         data = json.loads(f.read())
 
         self.assertIn('pipelines', data)
 
     def test_webapp_bad_url(self):
         # do we 404 correctly
-        req = urllib2.Request(
+        req = urllib.request.Request(
             "http://localhost:%s/status/foo" % self.port)
-        self.assertRaises(urllib2.HTTPError, urllib2.urlopen, req)
+        self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, req)
 
     def test_webapp_find_change(self):
         # can we filter by change id
-        req = urllib2.Request(
+        req = urllib.request.Request(
             "http://localhost:%s/status/change/1,1" % self.port)
-        f = urllib2.urlopen(req)
+        f = urllib.request.urlopen(req)
         data = json.loads(f.read())
 
         self.assertEqual(1, len(data), data)
         self.assertEqual("org/project", data[0]['project'])
 
-        req = urllib2.Request(
+        req = urllib.request.Request(
             "http://localhost:%s/status/change/2,1" % self.port)
-        f = urllib2.urlopen(req)
+        f = urllib.request.urlopen(req)
         data = json.loads(f.read())
 
         self.assertEqual(1, len(data), data)
diff --git a/tools/trigger-job.py b/tools/trigger-job.py
index dff4e3f..7123afc 100755
--- a/tools/trigger-job.py
+++ b/tools/trigger-job.py
@@ -68,7 +68,7 @@
     job = gear.Job("build:%s" % args.job,
                    json.dumps(data),
                    unique=data['ZUUL_UUID'])
-    c.submitJob(job)
+    c.submitJob(job, precedence=gear.PRECEDENCE_HIGH)
 
     while not job.complete:
         time.sleep(1)
diff --git a/tools/zuul-changes.py b/tools/zuul-changes.py
index 9dbf504..8b854c7 100755
--- a/tools/zuul-changes.py
+++ b/tools/zuul-changes.py
@@ -35,7 +35,7 @@
                 if not change['live']:
                     continue
                 cid, cps = change['id'].split(',')
-                print (
+                print(
                     "zuul enqueue --trigger gerrit --pipeline %s "
                     "--project %s --change %s,%s" % (
                         options.pipeline_name,
diff --git a/tools/zuul-clear-refs.py b/tools/zuul-clear-refs.py
new file mode 100755
index 0000000..60ce744
--- /dev/null
+++ b/tools/zuul-clear-refs.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# Copyright 2014-2015 Antoine "hashar" Musso
+# Copyright 2014-2015 Wikimedia Foundation Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# pylint: disable=locally-disabled, invalid-name
+
+"""
+Zuul references cleaner.
+
+Clear up references under /refs/zuul/ by inspecting the age of the commit the
+reference points to.  If the commit date is older than a number of days
+specificed by --until, the reference is deleted from the git repository.
+
+Use --dry-run --verbose to finely inspect the script behavior.
+"""
+
+import argparse
+import git
+import logging
+import time
+import sys
+
+NOW = int(time.time())
+DEFAULT_DAYS = 360
+ZUUL_REF_PREFIX = 'refs/zuul/'
+
+parser = argparse.ArgumentParser(
+    description=__doc__,
+    formatter_class=argparse.RawDescriptionHelpFormatter,
+)
+parser.add_argument('--until', dest='days_ago', default=DEFAULT_DAYS, type=int,
+                    help='references older than this number of day will '
+                         'be deleted. Default: %s' % DEFAULT_DAYS)
+parser.add_argument('-n', '--dry-run', dest='dryrun', action='store_true',
+                    help='do not delete references')
+parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
+                    help='set log level from info to debug')
+parser.add_argument('gitrepo', help='path to a Zuul git repository')
+args = parser.parse_args()
+
+logging.basicConfig()
+log = logging.getLogger('zuul-clear-refs')
+if args.verbose:
+    log.setLevel(logging.DEBUG)
+else:
+    log.setLevel(logging.INFO)
+
+try:
+    repo = git.Repo(args.gitrepo)
+except git.exc.InvalidGitRepositoryError:
+    log.error("Invalid git repo: %s" % args.gitrepo)
+    sys.exit(1)
+
+for ref in repo.references:
+
+    if not ref.path.startswith(ZUUL_REF_PREFIX):
+        continue
+    if type(ref) is not git.refs.reference.Reference:
+        # Paranoia: ignore heads/tags/remotes ..
+        continue
+
+    try:
+        commit_ts = ref.commit.committed_date
+    except LookupError:
+        # GitPython does not properly handle PGP signed tags
+        log.exception("Error in commit: %s, ref: %s. Type: %s",
+                      ref.commit, ref.path, type(ref))
+        continue
+
+    commit_age = int((NOW - commit_ts) / 86400)  # days
+    log.debug(
+        "%s at %s is %3s days old",
+        ref.commit,
+        ref.path,
+        commit_age,
+    )
+    if commit_age > args.days_ago:
+        if args.dryrun:
+            log.info("Would delete old ref: %s (%s)", ref.path, ref.commit)
+        else:
+            log.info("Deleting old ref: %s (%s)", ref.path, ref.commit)
+            ref.delete(repo, ref.path)
diff --git a/tox.ini b/tox.ini
index d716bb7..06ccbcd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,10 +5,12 @@
 
 [testenv]
 # Set STATSD env variables so that statsd code paths are tested.
-setenv = STATSD_HOST=localhost
+setenv = STATSD_HOST=127.0.0.1
          STATSD_PORT=8125
          VIRTUAL_ENV={envdir}
          OS_TEST_TIMEOUT=30
+         OS_LOG_DEFAULTS={env:OS_LOG_DEFAULTS:gear.Server=INFO,gear.Client=INFO}
+passenv = ZUUL_TEST_ROOT
 usedevelop = True
 install_command = pip install {opts} {packages}
 deps = -r{toxinidir}/requirements.txt
@@ -16,10 +18,17 @@
 commands =
   python setup.py testr --slowest --testr-args='{posargs}'
 
-[tox:jenkins]
-downloadcache = ~/cache/pip
+[testenv:bindep]
+# Do not install any requirements. We want this to be fast and work even if
+# system dependencies are missing, since it's used to tell you what system
+# dependencies are missing! This also means that bindep must be installed
+# separately, outside of the requirements files.
+deps = bindep
+commands = bindep test
 
 [testenv:pep8]
+# streamer is python3 only, so we need to run flake8 in python3
+basepython = python3
 commands = flake8 {posargs}
 
 [testenv:cover]
diff --git a/zuul/ansible/__init__.py b/zuul/ansible/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/__init__.py
diff --git a/zuul/ansible/library/__init__.py b/zuul/ansible/library/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/library/__init__.py
diff --git a/zuul/ansible/library/zuul_console.py b/zuul/ansible/library/zuul_console.py
new file mode 100644
index 0000000..78f3249
--- /dev/null
+++ b/zuul/ansible/library/zuul_console.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 IBM Corp.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import socket
+import threading
+
+
+def daemonize():
+    # A really basic daemonize method that should work well enough for
+    # now in this circumstance. Based on the public domain code at:
+    # http://web.archive.org/web/20131017130434/http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
+
+    pid = os.fork()
+    if pid > 0:
+        return True
+
+    os.chdir('/')
+    os.setsid()
+    os.umask(0)
+
+    pid = os.fork()
+    if pid > 0:
+        sys.exit(0)
+
+    sys.stdout.flush()
+    sys.stderr.flush()
+    i = open('/dev/null', 'r')
+    o = open('/dev/null', 'a+')
+    e = open('/dev/null', 'a+', 0)
+    os.dup2(i.fileno(), sys.stdin.fileno())
+    os.dup2(o.fileno(), sys.stdout.fileno())
+    os.dup2(e.fileno(), sys.stderr.fileno())
+    return False
+
+
+class Console(object):
+    def __init__(self, path):
+        self.path = path
+        self.file = open(path)
+        self.stat = os.stat(path)
+        self.size = self.stat.st_size
+
+
+class Server(object):
+    def __init__(self, path, port):
+        self.path = path
+        s = None
+        for res in socket.getaddrinfo(None, port, socket.AF_UNSPEC,
+                                      socket.SOCK_STREAM, 0,
+                                      socket.AI_PASSIVE):
+            af, socktype, proto, canonname, sa = res
+            try:
+                s = socket.socket(af, socktype, proto)
+                s.setsockopt(socket.SOL_SOCKET,
+                             socket.SO_REUSEADDR, 1)
+            except socket.error:
+                s = None
+                continue
+            try:
+                s.bind(sa)
+                s.listen(1)
+            except socket.error:
+                s.close()
+                s = None
+                continue
+            break
+        if s is None:
+            sys.exit(1)
+        self.socket = s
+
+    def accept(self):
+        conn, addr = self.socket.accept()
+        return conn
+
+    def run(self):
+        while True:
+            conn = self.accept()
+            t = threading.Thread(target=self.handleOneConnection, args=(conn,))
+            t.daemon = True
+            t.start()
+
+    def chunkConsole(self, conn):
+        try:
+            console = Console(self.path)
+        except Exception:
+            return
+        while True:
+            chunk = console.file.read(4096)
+            if not chunk:
+                break
+            conn.send(chunk)
+        return console
+
+    def followConsole(self, console, conn):
+        while True:
+            # As long as we have unread data, keep reading/sending
+            while True:
+                chunk = console.file.read(4096)
+                if chunk:
+                    conn.send(chunk)
+                else:
+                    break
+
+            # At this point, we are waiting for more data to be written
+            time.sleep(0.5)
+
+            # Check to see if the remote end has sent any data, if so,
+            # discard
+            r, w, e = select.select([conn], [], [conn], 0)
+            if conn in e:
+                return False
+            if conn in r:
+                ret = conn.recv(1024)
+                # Discard anything read, if input is eof, it has
+                # disconnected.
+                if not ret:
+                    return False
+
+            # See if the file has been truncated
+            try:
+                st = os.stat(console.path)
+                if (st.st_ino != console.stat.st_ino or
+                    st.st_size < console.size):
+                    return True
+            except Exception:
+                return True
+            console.size = st.st_size
+
+    def handleOneConnection(self, conn):
+        # FIXME: this won't notice disconnects until it tries to send
+        console = None
+        try:
+            while True:
+                if console is not None:
+                    try:
+                        console.file.close()
+                    except:
+                        pass
+                while True:
+                    console = self.chunkConsole(conn)
+                    if console:
+                        break
+                    time.sleep(0.5)
+                while True:
+                    if self.followConsole(console, conn):
+                        break
+                    else:
+                        return
+        finally:
+            try:
+                conn.close()
+            except Exception:
+                pass
+
+
+def test():
+    s = Server('/tmp/console.html', 8088)
+    s.run()
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            path=dict(default='/tmp/console.html'),
+            port=dict(default=8088, type='int'),
+        )
+    )
+
+    p = module.params
+    path = p['path']
+    port = p['port']
+
+    if daemonize():
+        module.exit_json()
+
+    s = Server(path, port)
+    s.run()
+
+from ansible.module_utils.basic import *  # noqa
+
+if __name__ == '__main__':
+    main()
+# test()
diff --git a/zuul/ansible/library/zuul_log.py b/zuul/ansible/library/zuul_log.py
new file mode 100644
index 0000000..4b377d9
--- /dev/null
+++ b/zuul/ansible/library/zuul_log.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 IBM Corp.
+# Copyright (c) 2016 Red Hat
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+
+
+class Console(object):
+    def __enter__(self):
+        self.logfile = open('/tmp/console.html', 'a', 0)
+        return self
+
+    def __exit__(self, etype, value, tb):
+        self.logfile.close()
+
+    def addLine(self, ln):
+        ts = datetime.datetime.now()
+        outln = '%s | %s' % (str(ts), ln)
+        self.logfile.write(outln)
+
+
+def log(msg):
+    if not isinstance(msg, list):
+        msg = [msg]
+    with Console() as console:
+        for line in msg:
+            console.addLine("[Zuul] %s\n" % line)
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            msg=dict(required=True, type='raw'),
+        )
+    )
+
+    p = module.params
+    log(p['msg'])
+    module.exit_json(changed=True)
+
+from ansible.module_utils.basic import *  # noqa
+
+if __name__ == '__main__':
+    main()
diff --git a/zuul/ansible/library/zuul_runner.py b/zuul/ansible/library/zuul_runner.py
new file mode 100644
index 0000000..7689fb3
--- /dev/null
+++ b/zuul/ansible/library/zuul_runner.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 IBM Corp.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import getpass
+import os
+import subprocess
+import threading
+
+
+class Console(object):
+    def __enter__(self):
+        self.logfile = open('/tmp/console.html', 'a', 0)
+        return self
+
+    def __exit__(self, etype, value, tb):
+        self.logfile.close()
+
+    def addLine(self, ln):
+        # Note this format with deliminator is "inspired" by the old
+        # Jenkins format but with microsecond resolution instead of
+        # millisecond.  It is kept so log parsing/formatting remains
+        # consistent.
+        ts = datetime.datetime.now()
+        outln = '%s | %s' % (ts, ln)
+        self.logfile.write(outln)
+
+
+def get_env():
+    env = {}
+    env['HOME'] = os.path.expanduser('~')
+    env['USER'] = getpass.getuser()
+
+    # Known locations for PAM mod_env sources
+    for fn in ['/etc/environment', '/etc/default/locale']:
+        if os.path.exists(fn):
+            with open(fn) as f:
+                for line in f:
+                    if not line:
+                        continue
+                    if line[0] == '#':
+                        continue
+                    if '=' not in line:
+                        continue
+                    k, v = line.strip().split('=')
+                    for q in ["'", '"']:
+                        if v[0] == q:
+                            v = v.strip(q)
+                    env[k] = v
+    return env
+
+
+def follow(fd):
+    newline_warning = False
+    with Console() as console:
+        while True:
+            line = fd.readline()
+            if not line:
+                break
+            if not line.endswith('\n'):
+                line += '\n'
+                newline_warning = True
+            console.addLine(line)
+        if newline_warning:
+            console.addLine('[Zuul] No trailing newline\n')
+
+
+def run(cwd, cmd, args):
+    env = get_env()
+    env.update(args)
+    proc = subprocess.Popen(
+        ['/bin/bash', '-l', '-c', cmd],
+        cwd=cwd,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.STDOUT,
+        env=env,
+    )
+
+    t = threading.Thread(target=follow, args=(proc.stdout,))
+    t.daemon = True
+    t.start()
+
+    ret = proc.wait()
+    # Give the thread that is writing the console log up to 10 seconds
+    # to catch up and exit.  If it hasn't done so by then, it is very
+    # likely stuck in readline() because it spawed a child that is
+    # holding stdout or stderr open.
+    t.join(10)
+    with Console() as console:
+        if t.isAlive():
+            console.addLine("[Zuul] standard output/error still open "
+                            "after child exited")
+        console.addLine("[Zuul] Task exit code: %s\n" % ret)
+    return ret
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            command=dict(required=True, default=None),
+            cwd=dict(required=True, default=None),
+            parameters=dict(default={}, type='dict')
+        )
+    )
+
+    p = module.params
+    env = p['parameters'].copy()
+    ret = run(p['cwd'], p['command'], env)
+    if ret == 0:
+        module.exit_json(changed=True, rc=ret)
+    else:
+        module.fail_json(msg="Exit code %s" % ret, rc=ret)
+
+from ansible.module_utils.basic import *  # noqa
+
+if __name__ == '__main__':
+    main()
diff --git a/zuul/ansible/plugins/__init__.py b/zuul/ansible/plugins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/plugins/__init__.py
diff --git a/zuul/ansible/plugins/callback_plugins/__init__.py b/zuul/ansible/plugins/callback_plugins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/zuul/ansible/plugins/callback_plugins/__init__.py
diff --git a/zuul/ansible/plugins/callback_plugins/timeout.py b/zuul/ansible/plugins/callback_plugins/timeout.py
new file mode 100644
index 0000000..1cfd10d
--- /dev/null
+++ b/zuul/ansible/plugins/callback_plugins/timeout.py
@@ -0,0 +1,52 @@
+# Copyright 2016 IBM Corp.
+#
+# This file is part of Zuul
+#
+# This file is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this file.  If not, see <http://www.gnu.org/licenses/>.
+
+import time
+
+from ansible.executor.task_result import TaskResult
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+    def __init__(self, *args, **kw):
+        super(CallbackModule, self).__init__(*args, **kw)
+        self._elapsed_time = 0.0
+        self._task_start_time = None
+        self._play = None
+
+    def v2_playbook_on_play_start(self, play):
+        self._play = play
+
+    def playbook_on_task_start(self, name, is_conditional):
+        self._task_start_time = time.time()
+
+    def v2_on_any(self, *args, **kw):
+        result = None
+        if args and isinstance(args[0], TaskResult):
+            result = args[0]
+        if not result:
+            return
+
+        if self._task_start_time is not None:
+            task_time = time.time() - self._task_start_time
+            self._elapsed_time += task_time
+        if self._play and result._host:
+            manager = self._play.get_variable_manager()
+            facts = dict(elapsed_time=int(self._elapsed_time))
+
+            manager.set_nonpersistent_facts(result._host, facts)
+        self._task_start_time = None
diff --git a/zuul/change_matcher.py b/zuul/change_matcher.py
index ed380f0..ca2d93f 100644
--- a/zuul/change_matcher.py
+++ b/zuul/change_matcher.py
@@ -101,7 +101,7 @@
         yield self.commit_regex
 
     def matches(self, change):
-        if not (hasattr(change, 'files') and change.files):
+        if not (hasattr(change, 'files') and len(change.files) > 1):
             return False
         for file_ in change.files:
             matched_file = False
diff --git a/zuul/cmd/__init__.py b/zuul/cmd/__init__.py
index d754815..5ffd431 100644
--- a/zuul/cmd/__init__.py
+++ b/zuul/cmd/__init__.py
@@ -14,8 +14,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import six
 from six.moves import configparser as ConfigParser
-import cStringIO
 import extras
 import logging
 import logging.config
@@ -26,7 +26,9 @@
 
 yappi = extras.try_import('yappi')
 
-# No zuul imports here because they pull in paramiko which must not be
+import zuul.lib.connections
+
+# Do not import modules that will pull in paramiko which must not be
 # imported until after the daemonization.
 # https://github.com/paramiko/paramiko/issues/59
 # Similar situation with gear and statsd.
@@ -45,7 +47,7 @@
             yappi.start()
         else:
             yappi.stop()
-            yappi_out = cStringIO.StringIO()
+            yappi_out = six.BytesIO()
             yappi.get_func_stats().print_all(out=yappi_out)
             yappi.get_thread_stats().print_all(out=yappi_out)
             log.debug(yappi_out.getvalue())
@@ -59,6 +61,7 @@
     def __init__(self):
         self.args = None
         self.config = None
+        self.connections = {}
 
     def _get_version(self):
         from zuul.version import version_info as zuul_version_info
@@ -86,3 +89,7 @@
             logging.config.fileConfig(fp)
         else:
             logging.basicConfig(level=logging.DEBUG)
+
+    def configure_connections(self):
+        self.connections = zuul.lib.connections.configure_connections(
+            self.config)
diff --git a/zuul/cmd/client.py b/zuul/cmd/client.py
index 6e14ff5..1ce2828 100644
--- a/zuul/cmd/client.py
+++ b/zuul/cmd/client.py
@@ -154,7 +154,7 @@
         running_items = client.get_running_jobs()
 
         if len(running_items) == 0:
-            print "No jobs currently running"
+            print("No jobs currently running")
             return True
 
         all_fields = self._show_running_jobs_columns()
@@ -181,7 +181,7 @@
                         v += all_fields[f]['append']
                     values.append(v)
                 table.add_row(values)
-        print table
+        print(table)
         return True
 
     def _epoch_to_relative_time(self, epoch):
@@ -263,6 +263,12 @@
             'number': {
                 'title': 'Number'
             },
+            'node_labels': {
+                'title': 'Node Labels'
+            },
+            'node_name': {
+                'title': 'Node Name'
+            },
             'worker.name': {
                 'title': 'Worker'
             },
@@ -276,7 +282,7 @@
             'worker.fqdn': {
                 'title': 'Worker Domain'
             },
-            'worker.progam': {
+            'worker.program': {
                 'title': 'Worker Program'
             },
             'worker.version': {
diff --git a/zuul/cmd/cloner.py b/zuul/cmd/cloner.py
index e4a0e7b..4f8b9f4 100755
--- a/zuul/cmd/cloner.py
+++ b/zuul/cmd/cloner.py
@@ -27,13 +27,15 @@
     'branch',
     'ref',
     'url',
+    'project',
+    'newrev',
 )
 
 
 class Cloner(zuul.cmd.ZuulApp):
     log = logging.getLogger("zuul.Cloner")
 
-    def parse_arguments(self):
+    def parse_arguments(self, args=sys.argv[1:]):
         """Parse command line arguments and returns argparse structure"""
         parser = argparse.ArgumentParser(
             description='Zuul Project Gating System Cloner.')
@@ -51,8 +53,11 @@
                             version=self._get_version(),
                             help='show zuul version')
         parser.add_argument('--cache-dir', dest='cache_dir',
+                            default=os.environ.get('ZUUL_CACHE_DIR'),
                             help=('a directory that holds cached copies of '
-                                  'repos from which to make an initial clone.'
+                                  'repos from which to make an initial clone. '
+                                  'Can also be set via ZUUL_CACHE_DIR '
+                                  'environment variable.'
                                   ))
         parser.add_argument('git_base_url',
                             help='reference repo to clone from')
@@ -87,7 +92,7 @@
                 default=os.environ.get(env_name)
             )
 
-        args = parser.parse_args()
+        args = parser.parse_args(args)
         # Validate ZUUL_* arguments. If ref is provided then URL is required.
         zuul_args = [zuul_opt for zuul_opt, val in vars(args).items()
                      if zuul_opt.startswith('zuul') and val is not None]
@@ -95,6 +100,10 @@
             parser.error("Specifying a Zuul ref requires a Zuul url. "
                          "Define Zuul arguments either via environment "
                          "variables or using options above.")
+        if 'zuul_newrev' in zuul_args and 'zuul_project' not in zuul_args:
+            parser.error("ZUUL_NEWREV has been specified without "
+                         "ZUUL_PROJECT. Please define a ZUUL_PROJECT or do "
+                         "not set ZUUL_NEWREV.")
 
         self.args = args
 
@@ -142,6 +151,8 @@
             clone_map_file=self.args.clone_map_file,
             project_branches=project_branches,
             cache_dir=self.args.cache_dir,
+            zuul_newrev=self.args.zuul_newrev,
+            zuul_project=self.args.zuul_project,
         )
         cloner.execute()
 
diff --git a/zuul/cmd/launcher.py b/zuul/cmd/launcher.py
new file mode 100644
index 0000000..49643ae
--- /dev/null
+++ b/zuul/cmd/launcher.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright 2013-2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import daemon
+import extras
+
+# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
+# instead it depends on lockfile-0.9.1 which uses pidfile.
+pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
+
+import logging
+import os
+import socket
+import sys
+import signal
+
+import zuul.cmd
+import zuul.launcher.ansiblelaunchserver
+
+# No zuul imports that pull in paramiko here; it must not be
+# imported until after the daemonization.
+# https://github.com/paramiko/paramiko/issues/59
+# Similar situation with gear and statsd.
+
+
+class Launcher(zuul.cmd.ZuulApp):
+
+    def parse_arguments(self):
+        parser = argparse.ArgumentParser(description='Zuul launch worker.')
+        parser.add_argument('-c', dest='config',
+                            help='specify the config file')
+        parser.add_argument('-d', dest='nodaemon', action='store_true',
+                            help='do not run as a daemon')
+        parser.add_argument('--version', dest='version', action='version',
+                            version=self._get_version(),
+                            help='show zuul version')
+        parser.add_argument('--keep-jobdir', dest='keep_jobdir',
+                            action='store_true',
+                            help='keep local jobdirs after run completes')
+        parser.add_argument('command',
+                            choices=zuul.launcher.ansiblelaunchserver.COMMANDS,
+                            nargs='?')
+
+        self.args = parser.parse_args()
+
+    def send_command(self, cmd):
+        if self.config.has_option('zuul', 'state_dir'):
+            state_dir = os.path.expanduser(
+                self.config.get('zuul', 'state_dir'))
+        else:
+            state_dir = '/var/lib/zuul'
+        path = os.path.join(state_dir, 'launcher.socket')
+        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        s.connect(path)
+        s.sendall('%s\n' % cmd)
+
+    def exit_handler(self):
+        self.launcher.stop()
+        self.launcher.join()
+
+    def main(self, daemon=True):
+        # See comment at top of file about zuul imports
+
+        self.setup_logging('launcher', 'log_config')
+
+        self.log = logging.getLogger("zuul.Launcher")
+
+        LaunchServer = zuul.launcher.ansiblelaunchserver.LaunchServer
+        self.launcher = LaunchServer(self.config,
+                                     keep_jobdir=self.args.keep_jobdir)
+        self.launcher.start()
+
+        signal.signal(signal.SIGUSR2, zuul.cmd.stack_dump_handler)
+        if daemon:
+            self.launcher.join()
+        else:
+            while True:
+                try:
+                    signal.pause()
+                except KeyboardInterrupt:
+                    print("Ctrl + C: asking launcher to exit nicely...\n")
+                    self.exit_handler()
+                    sys.exit(0)
+
+
+def main():
+    server = Launcher()
+    server.parse_arguments()
+    server.read_config()
+
+    if server.args.command in zuul.launcher.ansiblelaunchserver.COMMANDS:
+        server.send_command(server.args.command)
+        sys.exit(0)
+
+    server.configure_connections()
+
+    if server.config.has_option('launcher', 'pidfile'):
+        pid_fn = os.path.expanduser(server.config.get('launcher', 'pidfile'))
+    else:
+        pid_fn = '/var/run/zuul-launcher/zuul-launcher.pid'
+    pid = pid_file_module.TimeoutPIDLockFile(pid_fn, 10)
+
+    if server.args.nodaemon:
+        server.main(False)
+    else:
+        with daemon.DaemonContext(pidfile=pid):
+            server.main(True)
+
+
+if __name__ == "__main__":
+    sys.path.insert(0, '.')
+    main()
diff --git a/zuul/cmd/merger.py b/zuul/cmd/merger.py
index dc3484a..797a990 100644
--- a/zuul/cmd/merger.py
+++ b/zuul/cmd/merger.py
@@ -58,7 +58,8 @@
 
         self.setup_logging('merger', 'log_config')
 
-        self.merger = zuul.merger.server.MergeServer(self.config)
+        self.merger = zuul.merger.server.MergeServer(self.config,
+                                                     self.connections)
         self.merger.start()
 
         signal.signal(signal.SIGUSR1, self.exit_handler)
@@ -67,7 +68,7 @@
             try:
                 signal.pause()
             except KeyboardInterrupt:
-                print "Ctrl + C: asking merger to exit nicely...\n"
+                print("Ctrl + C: asking merger to exit nicely...\n")
                 self.exit_handler(signal.SIGINT, None)
 
 
@@ -76,6 +77,7 @@
     server.parse_arguments()
 
     server.read_config()
+    server.configure_connections()
 
     if server.config.has_option('zuul', 'state_dir'):
         state_dir = os.path.expanduser(server.config.get('zuul', 'state_dir'))
@@ -87,9 +89,7 @@
         f.close()
         os.unlink(test_fn)
     except Exception:
-        print
-        print "Unable to write to state directory: %s" % state_dir
-        print
+        print("\nUnable to write to state directory: %s\n" % state_dir)
         raise
 
     if server.config.has_option('merger', 'pidfile'):
diff --git a/zuul/cmd/server.py b/zuul/cmd/server.py
index 2d99a1f..0b7538d 100755
--- a/zuul/cmd/server.py
+++ b/zuul/cmd/server.py
@@ -60,6 +60,7 @@
 
     def reconfigure_handler(self, signum, frame):
         signal.signal(signal.SIGHUP, signal.SIG_IGN)
+        self.log.debug("Reconfiguration triggered")
         self.read_config()
         self.setup_logging('zuul', 'log_config')
         try:
@@ -85,14 +86,13 @@
         import zuul.trigger.gerrit
 
         logging.basicConfig(level=logging.DEBUG)
-        self.sched = zuul.scheduler.Scheduler()
-        self.sched.registerReporter(None, 'gerrit')
-        self.sched.registerReporter(None, 'smtp')
-        self.sched.registerTrigger(None, 'gerrit')
-        self.sched.registerTrigger(None, 'timer')
-        self.sched.registerTrigger(None, 'zuul')
+        self.sched = zuul.scheduler.Scheduler(self.config,
+                                              testonly=True)
+        self.configure_connections()
+        self.sched.registerConnections(self.connections, load=False)
         layout = self.sched.testConfig(self.config.get('zuul',
-                                                       'layout_config'))
+                                                       'layout_config'),
+                                       self.connections)
         if not job_list_path:
             return False
 
@@ -108,7 +108,7 @@
                 jobs.add(v)
         for job in sorted(layout.jobs):
             if job not in jobs:
-                print "Job %s not defined" % job
+                print("FAILURE: Job %s not defined" % job)
                 failure = True
         return failure
 
@@ -118,18 +118,18 @@
         if child_pid == 0:
             os.close(pipe_write)
             self.setup_logging('gearman_server', 'log_config')
-            import gear
+            import zuul.lib.gearserver
             statsd_host = os.environ.get('STATSD_HOST')
             statsd_port = int(os.environ.get('STATSD_PORT', 8125))
             if self.config.has_option('gearman_server', 'listen_address'):
                 host = self.config.get('gearman_server', 'listen_address')
             else:
                 host = None
-            gear.Server(4730,
-                        host=host,
-                        statsd_host=statsd_host,
-                        statsd_port=statsd_port,
-                        statsd_prefix='zuul.geard')
+            zuul.lib.gearserver.GearServer(4730,
+                                           host=host,
+                                           statsd_host=statsd_host,
+                                           statsd_port=statsd_port,
+                                           statsd_prefix='zuul.geard')
 
             # Keep running until the parent dies:
             pipe_read = os.fdopen(pipe_read)
@@ -150,11 +150,6 @@
         import zuul.launcher.gearman
         import zuul.merger.client
         import zuul.lib.swift
-        import zuul.reporter.gerrit
-        import zuul.reporter.smtp
-        import zuul.trigger.gerrit
-        import zuul.trigger.timer
-        import zuul.trigger.zuultrigger
         import zuul.webapp
         import zuul.rpclistener
 
@@ -166,44 +161,41 @@
         self.setup_logging('zuul', 'log_config')
         self.log = logging.getLogger("zuul.Server")
 
-        self.sched = zuul.scheduler.Scheduler()
+        self.sched = zuul.scheduler.Scheduler(self.config)
+        # TODO(jhesketh): Move swift into a connection?
         self.swift = zuul.lib.swift.Swift(self.config)
 
         gearman = zuul.launcher.gearman.Gearman(self.config, self.sched,
                                                 self.swift)
         merger = zuul.merger.client.MergeClient(self.config, self.sched)
-        gerrit = zuul.trigger.gerrit.Gerrit(self.config, self.sched)
-        timer = zuul.trigger.timer.Timer(self.config, self.sched)
-        zuultrigger = zuul.trigger.zuultrigger.ZuulTrigger(self.config,
-                                                           self.sched)
+
         if self.config.has_option('zuul', 'status_expiry'):
             cache_expiry = self.config.getint('zuul', 'status_expiry')
         else:
             cache_expiry = 1
-        webapp = zuul.webapp.WebApp(self.sched, cache_expiry=cache_expiry)
-        rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
-        gerrit_reporter = zuul.reporter.gerrit.Reporter(gerrit)
-        smtp_reporter = zuul.reporter.smtp.Reporter(
-            self.config.get('smtp', 'default_from')
-            if self.config.has_option('smtp', 'default_from') else 'zuul',
-            self.config.get('smtp', 'default_to')
-            if self.config.has_option('smtp', 'default_to') else 'zuul',
-            self.config.get('smtp', 'server')
-            if self.config.has_option('smtp', 'server') else 'localhost',
-            self.config.get('smtp', 'port')
-            if self.config.has_option('smtp', 'port') else 25
-        )
 
+        if self.config.has_option('webapp', 'listen_address'):
+            listen_address = self.config.get('webapp', 'listen_address')
+        else:
+            listen_address = '0.0.0.0'
+
+        if self.config.has_option('webapp', 'port'):
+            port = self.config.getint('webapp', 'port')
+        else:
+            port = 8001
+
+        webapp = zuul.webapp.WebApp(
+            self.sched, port=port, cache_expiry=cache_expiry,
+            listen_address=listen_address)
+        rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
+
+        self.configure_connections()
         self.sched.setLauncher(gearman)
         self.sched.setMerger(merger)
-        self.sched.registerTrigger(gerrit)
-        self.sched.registerTrigger(timer)
-        self.sched.registerTrigger(zuultrigger)
-        self.sched.registerReporter(gerrit_reporter)
-        self.sched.registerReporter(smtp_reporter)
 
         self.log.info('Starting scheduler')
         self.sched.start()
+        self.sched.registerConnections(self.connections)
         self.sched.reconfigure(self.config)
         self.sched.resume()
         self.log.info('Starting Webapp')
@@ -218,7 +210,7 @@
             try:
                 signal.pause()
             except KeyboardInterrupt:
-                print "Ctrl + C: asking scheduler to exit nicely...\n"
+                print("Ctrl + C: asking scheduler to exit nicely...\n")
                 self.exit_handler(signal.SIGINT, None)
 
 
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
new file mode 100644
index 0000000..066b4db
--- /dev/null
+++ b/zuul/connection/__init__.py
@@ -0,0 +1,71 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseConnection(object):
+    """Base class for connections.
+
+    A connection is a shared object that sources, triggers and reporters can
+    use to speak with a remote API without needing to establish a new
+    connection each time or without having to authenticate each time.
+
+    Multiple instances of the same connection may exist with different
+    credentials, for example, thus allowing for different pipelines to operate
+    on different Gerrit installations or post back as a different user etc.
+
+    Connections can implement their own public methods. Required connection
+    methods are validated by the {trigger, source, reporter} they are loaded
+    into. For example, a trigger will likely require some kind of query method
+    while a reporter may need a review method."""
+
+    def __init__(self, connection_name, connection_config):
+        # connection_name is the name given to this connection in zuul.ini
+        # connection_config is a dictionary of config_section from zuul.ini for
+        # this connection.
+        # __init__ shouldn't make the actual connection in case this connection
+        # isn't used in the layout.
+        self.connection_name = connection_name
+        self.connection_config = connection_config
+
+        # Keep track of the sources, triggers and reporters using this
+        # connection
+        self.attached_to = {
+            'source': [],
+            'trigger': [],
+            'reporter': [],
+        }
+
+    def onLoad(self):
+        pass
+
+    def onStop(self):
+        pass
+
+    def registerScheduler(self, sched):
+        self.sched = sched
+
+    def registerUse(self, what, instance):
+        self.attached_to[what].append(instance)
+
+    def maintainCache(self, relevant):
+        """Make cache contain relevant changes.
+
+        This lets the user supply a list of change objects that are
+        still in use.  Anything in our cache that isn't in the supplied
+        list should be safe to remove from the cache."""
diff --git a/zuul/connection/gerrit.py b/zuul/connection/gerrit.py
new file mode 100644
index 0000000..62891cd
--- /dev/null
+++ b/zuul/connection/gerrit.py
@@ -0,0 +1,479 @@
+# Copyright 2011 OpenStack, LLC.
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import threading
+import select
+import json
+import time
+from six.moves import queue as Queue
+from six.moves import urllib
+import paramiko
+import logging
+import pprint
+import voluptuous as v
+
+from zuul.connection import BaseConnection
+from zuul.model import TriggerEvent
+
+
+class GerritEventConnector(threading.Thread):
+    """Move events from Gerrit to the scheduler."""
+
+    log = logging.getLogger("zuul.GerritEventConnector")
+    delay = 10.0
+
+    def __init__(self, connection):
+        super(GerritEventConnector, self).__init__()
+        self.daemon = True
+        self.connection = connection
+        self._stopped = False
+
+    def stop(self):
+        self._stopped = True
+        self.connection.addEvent(None)
+
+    def _handleEvent(self):
+        ts, data = self.connection.getEvent()
+        if self._stopped:
+            return
+        # Gerrit can produce inconsistent data immediately after an
+        # event, So ensure that we do not deliver the event to Zuul
+        # until at least a certain amount of time has passed.  Note
+        # that if we receive several events in succession, we will
+        # only need to delay for the first event.  In essence, Zuul
+        # should always be a constant number of seconds behind Gerrit.
+        now = time.time()
+        time.sleep(max((ts + self.delay) - now, 0.0))
+        event = TriggerEvent()
+        event.type = data.get('type')
+        event.trigger_name = 'gerrit'
+        change = data.get('change')
+        if change:
+            event.project_name = change.get('project')
+            event.branch = change.get('branch')
+            event.change_number = change.get('number')
+            event.change_url = change.get('url')
+            patchset = data.get('patchSet')
+            if patchset:
+                event.patch_number = patchset.get('number')
+                event.refspec = patchset.get('ref')
+            event.approvals = data.get('approvals', [])
+            event.comment = data.get('comment')
+        refupdate = data.get('refUpdate')
+        if refupdate:
+            event.project_name = refupdate.get('project')
+            event.ref = refupdate.get('refName')
+            event.oldrev = refupdate.get('oldRev')
+            event.newrev = refupdate.get('newRev')
+        # Map the event types to a field name holding a Gerrit
+        # account attribute. See Gerrit stream-event documentation
+        # in cmd-stream-events.html
+        accountfield_from_type = {
+            'patchset-created': 'uploader',
+            'draft-published': 'uploader',  # Gerrit 2.5/2.6
+            'change-abandoned': 'abandoner',
+            'change-restored': 'restorer',
+            'change-merged': 'submitter',
+            'merge-failed': 'submitter',  # Gerrit 2.5/2.6
+            'comment-added': 'author',
+            'ref-updated': 'submitter',
+            'reviewer-added': 'reviewer',  # Gerrit 2.5/2.6
+        }
+        try:
+            event.account = data.get(accountfield_from_type[event.type])
+        except KeyError:
+            self.log.warning("Received unrecognized event type '%s' from Gerrit.\
+                    Can not get account information." % event.type)
+            event.account = None
+
+        if (event.change_number and
+            self.connection.sched.getProject(event.project_name)):
+            # Call _getChange for the side effect of updating the
+            # cache.  Note that this modifies Change objects outside
+            # the main thread.
+            # NOTE(jhesketh): Ideally we'd just remove the change from the
+            # cache to denote that it needs updating. However the change
+            # object is already used by Item's and hence BuildSet's etc. and
+            # we need to update those objects by reference so that they have
+            # the correct/new information and also avoid hitting gerrit
+            # multiple times.
+            if self.connection.attached_to['source']:
+                self.connection.attached_to['source'][0]._getChange(
+                    event.change_number, event.patch_number, refresh=True)
+                # We only need to do this once since the connection maintains
+                # the cache (which is shared between all the sources)
+                # NOTE(jhesketh): We may couple sources and connections again
+                # at which point this becomes more sensible.
+        self.connection.sched.addEvent(event)
+
+    def run(self):
+        while True:
+            if self._stopped:
+                return
+            try:
+                self._handleEvent()
+            except:
+                self.log.exception("Exception moving Gerrit event:")
+            finally:
+                self.connection.eventDone()
+
+
+class GerritWatcher(threading.Thread):
+    log = logging.getLogger("gerrit.GerritWatcher")
+    poll_timeout = 500
+
+    def __init__(self, gerrit_connection, username, hostname, port=29418,
+                 keyfile=None):
+        threading.Thread.__init__(self)
+        self.username = username
+        self.keyfile = keyfile
+        self.hostname = hostname
+        self.port = port
+        self.gerrit_connection = gerrit_connection
+        self._stopped = False
+
+    def _read(self, fd):
+        l = fd.readline()
+        data = json.loads(l)
+        self.log.debug("Received data from Gerrit event stream: \n%s" %
+                       pprint.pformat(data))
+        self.gerrit_connection.addEvent(data)
+
+    def _listen(self, stdout, stderr):
+        poll = select.poll()
+        poll.register(stdout.channel)
+        while not self._stopped:
+            ret = poll.poll(self.poll_timeout)
+            for (fd, event) in ret:
+                if fd == stdout.channel.fileno():
+                    if event == select.POLLIN:
+                        self._read(stdout)
+                    else:
+                        raise Exception("event on ssh connection")
+
+    def _run(self):
+        try:
+            client = paramiko.SSHClient()
+            client.load_system_host_keys()
+            client.set_missing_host_key_policy(paramiko.WarningPolicy())
+            client.connect(self.hostname,
+                           username=self.username,
+                           port=self.port,
+                           key_filename=self.keyfile)
+
+            stdin, stdout, stderr = client.exec_command("gerrit stream-events")
+
+            self._listen(stdout, stderr)
+
+            if not stdout.channel.exit_status_ready():
+                # The stream-event is still running but we are done polling
+                # on stdout most likely due to being asked to stop.
+                # Try to stop the stream-events command sending Ctrl-C
+                stdin.write("\x03")
+                time.sleep(.2)
+                if not stdout.channel.exit_status_ready():
+                    # we're still not ready to exit, lets force the channel
+                    # closed now.
+                    stdout.channel.close()
+            ret = stdout.channel.recv_exit_status()
+            self.log.debug("SSH exit status: %s" % ret)
+            client.close()
+
+            if ret and ret not in [-1, 130]:
+                raise Exception("Gerrit error executing stream-events")
+        except:
+            self.log.exception("Exception on ssh event stream:")
+            time.sleep(5)
+
+    def run(self):
+        while not self._stopped:
+            self._run()
+
+    def stop(self):
+        self.log.debug("Stopping watcher")
+        self._stopped = True
+
+
+class GerritConnection(BaseConnection):
+    driver_name = 'gerrit'
+    log = logging.getLogger("connection.gerrit")
+
+    def __init__(self, connection_name, connection_config):
+        super(GerritConnection, self).__init__(connection_name,
+                                               connection_config)
+        if 'server' not in self.connection_config:
+            raise Exception('server is required for gerrit connections in '
+                            '%s' % self.connection_name)
+        if 'user' not in self.connection_config:
+            raise Exception('user is required for gerrit connections in '
+                            '%s' % self.connection_name)
+
+        self.user = self.connection_config.get('user')
+        self.server = self.connection_config.get('server')
+        self.port = int(self.connection_config.get('port', 29418))
+        self.keyfile = self.connection_config.get('sshkey', None)
+        self.watcher_thread = None
+        self.event_queue = None
+        self.client = None
+
+        self.baseurl = self.connection_config.get('baseurl',
+                                                  'https://%s' % self.server)
+
+        self._change_cache = {}
+        self.gerrit_event_connector = None
+
+    def getCachedChange(self, key):
+        if key in self._change_cache:
+            return self._change_cache.get(key)
+        return None
+
+    def updateChangeCache(self, key, value):
+        self._change_cache[key] = value
+
+    def deleteCachedChange(self, key):
+        if key in self._change_cache:
+            del self._change_cache[key]
+
+    def maintainCache(self, relevant):
+        # This lets the user supply a list of change objects that are
+        # still in use.  Anything in our cache that isn't in the supplied
+        # list should be safe to remove from the cache.
+        remove = []
+        for key, change in self._change_cache.items():
+            if change not in relevant:
+                remove.append(key)
+        for key in remove:
+            del self._change_cache[key]
+
+    def addEvent(self, data):
+        return self.event_queue.put((time.time(), data))
+
+    def getEvent(self):
+        return self.event_queue.get()
+
+    def eventDone(self):
+        self.event_queue.task_done()
+
+    def review(self, project, change, message, action={}):
+        cmd = 'gerrit review --project %s' % project
+        if message:
+            cmd += ' --message "%s"' % message
+        for key, val in action.items():
+            if val is True:
+                cmd += ' --%s' % key
+            else:
+                cmd += ' --%s %s' % (key, val)
+        cmd += ' %s' % change
+        out, err = self._ssh(cmd)
+        return err
+
+    def query(self, query):
+        args = '--all-approvals --comments --commit-message'
+        args += ' --current-patch-set --dependencies --files'
+        args += ' --patch-sets --submit-records'
+        cmd = 'gerrit query --format json %s %s' % (
+            args, query)
+        out, err = self._ssh(cmd)
+        if not out:
+            return False
+        lines = out.split('\n')
+        if not lines:
+            return False
+        data = json.loads(lines[0])
+        if not data:
+            return False
+        self.log.debug("Received data from Gerrit query: \n%s" %
+                       (pprint.pformat(data)))
+        return data
+
+    def simpleQuery(self, query):
+        def _query_chunk(query):
+            args = '--commit-message --current-patch-set'
+
+            cmd = 'gerrit query --format json %s %s' % (
+                args, query)
+            out, err = self._ssh(cmd)
+            if not out:
+                return False
+            lines = out.split('\n')
+            if not lines:
+                return False
+
+            # filter out blank lines
+            data = [json.loads(line) for line in lines
+                    if line.startswith('{')]
+
+            # check last entry for more changes
+            more_changes = None
+            if 'moreChanges' in data[-1]:
+                more_changes = data[-1]['moreChanges']
+
+            # we have to remove the statistics line
+            del data[-1]
+
+            if not data:
+                return False, more_changes
+            self.log.debug("Received data from Gerrit query: \n%s" %
+                           (pprint.pformat(data)))
+            return data, more_changes
+
+        # gerrit returns 500 results by default, so implement paging
+        # for large projects like nova
+        alldata = []
+        chunk, more_changes = _query_chunk(query)
+        while(chunk):
+            alldata.extend(chunk)
+            if more_changes is None:
+                # continue sortKey based (before Gerrit 2.9)
+                resume = "resume_sortkey:'%s'" % chunk[-1]["sortKey"]
+            elif more_changes:
+                # continue moreChanges based (since Gerrit 2.9)
+                resume = "-S %d" % len(alldata)
+            else:
+                # no more changes
+                break
+
+            chunk, more_changes = _query_chunk("%s %s" % (query, resume))
+        return alldata
+
+    def _open(self):
+        client = paramiko.SSHClient()
+        client.load_system_host_keys()
+        client.set_missing_host_key_policy(paramiko.WarningPolicy())
+        client.connect(self.server,
+                       username=self.user,
+                       port=self.port,
+                       key_filename=self.keyfile)
+        self.client = client
+
+    def _ssh(self, command, stdin_data=None):
+        if not self.client:
+            self._open()
+
+        try:
+            self.log.debug("SSH command:\n%s" % command)
+            stdin, stdout, stderr = self.client.exec_command(command)
+        except:
+            self._open()
+            stdin, stdout, stderr = self.client.exec_command(command)
+
+        if stdin_data:
+            stdin.write(stdin_data)
+
+        out = stdout.read()
+        self.log.debug("SSH received stdout:\n%s" % out)
+
+        ret = stdout.channel.recv_exit_status()
+        self.log.debug("SSH exit status: %s" % ret)
+
+        err = stderr.read()
+        self.log.debug("SSH received stderr:\n%s" % err)
+        if ret:
+            raise Exception("Gerrit error executing %s" % command)
+        return (out, err)
+
+    def getInfoRefs(self, project):
+        url = "%s/p/%s/info/refs?service=git-upload-pack" % (
+            self.baseurl, project)
+        try:
+            data = urllib.request.urlopen(url).read()
+        except:
+            self.log.error("Cannot get references from %s" % url)
+            raise  # keeps urllib error informations
+        ret = {}
+        read_headers = False
+        read_advertisement = False
+        if data[4] != '#':
+            raise Exception("Gerrit repository does not support "
+                            "git-upload-pack")
+        i = 0
+        while i < len(data):
+            if len(data) - i < 4:
+                raise Exception("Invalid length in info/refs")
+            plen = int(data[i:i + 4], 16)
+            i += 4
+            # It's the length of the packet, including the 4 bytes of the
+            # length itself, unless it's null, in which case the length is
+            # not included.
+            if plen > 0:
+                plen -= 4
+            if len(data) - i < plen:
+                raise Exception("Invalid data in info/refs")
+            line = data[i:i + plen]
+            i += plen
+            if not read_headers:
+                if plen == 0:
+                    read_headers = True
+                continue
+            if not read_advertisement:
+                read_advertisement = True
+                continue
+            if plen == 0:
+                # The terminating null
+                continue
+            line = line.strip()
+            revision, ref = line.split()
+            ret[ref] = revision
+        return ret
+
+    def getGitUrl(self, project):
+        url = 'ssh://%s@%s:%s/%s' % (self.user, self.server, self.port,
+                                     project.name)
+        return url
+
+    def getGitwebUrl(self, project, sha=None):
+        url = '%s/gitweb?p=%s.git' % (self.baseurl, project)
+        if sha:
+            url += ';a=commitdiff;h=' + sha
+        return url
+
+    def onLoad(self):
+        self.log.debug("Starting Gerrit Conncetion/Watchers")
+        self._start_watcher_thread()
+        self._start_event_connector()
+
+    def onStop(self):
+        self.log.debug("Stopping Gerrit Conncetion/Watchers")
+        self._stop_watcher_thread()
+        self._stop_event_connector()
+
+    def _stop_watcher_thread(self):
+        if self.watcher_thread:
+            self.watcher_thread.stop()
+            self.watcher_thread.join()
+
+    def _start_watcher_thread(self):
+        self.event_queue = Queue.Queue()
+        self.watcher_thread = GerritWatcher(
+            self,
+            self.user,
+            self.server,
+            self.port,
+            keyfile=self.keyfile)
+        self.watcher_thread.start()
+
+    def _stop_event_connector(self):
+        if self.gerrit_event_connector:
+            self.gerrit_event_connector.stop()
+            self.gerrit_event_connector.join()
+
+    def _start_event_connector(self):
+        self.gerrit_event_connector = GerritEventConnector(self)
+        self.gerrit_event_connector.start()
+
+
+def getSchema():
+    gerrit_connection = v.Any(str, v.Schema({}, extra=True))
+    return gerrit_connection
diff --git a/zuul/connection/smtp.py b/zuul/connection/smtp.py
new file mode 100644
index 0000000..d3eccff
--- /dev/null
+++ b/zuul/connection/smtp.py
@@ -0,0 +1,63 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import voluptuous as v
+import smtplib
+
+from email.mime.text import MIMEText
+
+from zuul.connection import BaseConnection
+
+
+class SMTPConnection(BaseConnection):
+    driver_name = 'smtp'
+    log = logging.getLogger("connection.smtp")
+
+    def __init__(self, connection_name, connection_config):
+
+        super(SMTPConnection, self).__init__(connection_name,
+                                             connection_config)
+
+        self.smtp_server = self.connection_config.get(
+            'server', 'localhost')
+        self.smtp_port = self.connection_config.get('port', 25)
+        self.smtp_default_from = self.connection_config.get(
+            'default_from', 'zuul')
+        self.smtp_default_to = self.connection_config.get(
+            'default_to', 'zuul')
+
+    def sendMail(self, subject, message, from_email=None, to_email=None):
+        # Create a text/plain email message
+        from_email = from_email \
+            if from_email is not None else self.smtp_default_from
+        to_email = to_email if to_email is not None else self.smtp_default_to
+
+        msg = MIMEText(message)
+        msg['Subject'] = subject
+        msg['From'] = from_email
+        msg['To'] = to_email
+
+        try:
+            s = smtplib.SMTP(self.smtp_server, self.smtp_port)
+            s.sendmail(from_email, to_email.split(','), msg.as_string())
+            s.quit()
+        except:
+            return "Could not send email via SMTP"
+        return
+
+
+def getSchema():
+    smtp_connection = v.Any(str, v.Schema({}, extra=True))
+    return smtp_connection
diff --git a/zuul/exceptions.py b/zuul/exceptions.py
new file mode 100644
index 0000000..40a1e40
--- /dev/null
+++ b/zuul/exceptions.py
@@ -0,0 +1,35 @@
+# Copyright 2015 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class ChangeNotFound(Exception):
+    def __init__(self, number, ps):
+        self.number = number
+        self.ps = ps
+        self.change = "%s,%s" % (str(number), str(ps))
+        message = "Change %s not found" % self.change
+        super(ChangeNotFound, self).__init__(message)
+
+
+class RevNotFound(Exception):
+    def __init__(self, project, rev):
+        self.project = project
+        self.revision = rev
+        message = ("Failed to checkout project '%s' at revision '%s'"
+                   % (self.project, self.revision))
+        super(RevNotFound, self).__init__(message)
+
+
+class MergeFailure(Exception):
+    pass
diff --git a/zuul/launcher/ansiblelaunchserver.py b/zuul/launcher/ansiblelaunchserver.py
new file mode 100644
index 0000000..95fc2fa
--- /dev/null
+++ b/zuul/launcher/ansiblelaunchserver.py
@@ -0,0 +1,1384 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import logging
+import os
+import re
+import shutil
+import signal
+import socket
+import subprocess
+import tempfile
+import threading
+import time
+import traceback
+import Queue
+import uuid
+
+import gear
+import yaml
+import jenkins_jobs.builder
+import jenkins_jobs.formatter
+import zmq
+
+import zuul.ansible.library
+import zuul.ansible.plugins.callback_plugins
+from zuul.lib import commandsocket
+
+ANSIBLE_WATCHDOG_GRACE = 5 * 60
+ANSIBLE_DEFAULT_TIMEOUT = 2 * 60 * 60
+ANSIBLE_DEFAULT_POST_TIMEOUT = 10 * 60
+
+
+COMMANDS = ['reconfigure', 'stop', 'pause', 'unpause', 'release', 'graceful',
+            'verbose', 'unverbose']
+
+
+def boolify(x):
+    if isinstance(x, str):
+        return bool(int(x))
+    return bool(x)
+
+
+class LaunchGearWorker(gear.Worker):
+    def __init__(self, *args, **kw):
+        self.__launch_server = kw.pop('launch_server')
+        super(LaunchGearWorker, self).__init__(*args, **kw)
+
+    def handleNoop(self, packet):
+        workers = len(self.__launch_server.node_workers)
+        delay = (workers ** 2) / 1000.0
+        time.sleep(delay)
+        return super(LaunchGearWorker, self).handleNoop(packet)
+
+
+class NodeGearWorker(gear.Worker):
+    MASS_DO = 101
+
+    def sendMassDo(self, functions):
+        data = b'\x00'.join([gear.convert_to_bytes(x) for x in functions])
+        self.broadcast_lock.acquire()
+        try:
+            p = gear.Packet(gear.constants.REQ, self.MASS_DO, data)
+            self.broadcast(p)
+        finally:
+            self.broadcast_lock.release()
+
+
+class Watchdog(object):
+    def __init__(self, timeout, function, args):
+        self.timeout = timeout
+        self.function = function
+        self.args = args
+        self.thread = threading.Thread(target=self._run)
+        self.thread.daemon = True
+
+    def _run(self):
+        while self._running and time.time() < self.end:
+            time.sleep(10)
+        if self._running:
+            self.function(*self.args)
+
+    def start(self):
+        self._running = True
+        self.end = time.time() + self.timeout
+        self.thread.start()
+
+    def stop(self):
+        self._running = False
+
+
+class JobDir(object):
+    def __init__(self, keep=False):
+        self.keep = keep
+        self.root = tempfile.mkdtemp()
+        self.ansible_root = os.path.join(self.root, 'ansible')
+        os.makedirs(self.ansible_root)
+        self.known_hosts = os.path.join(self.ansible_root, 'known_hosts')
+        self.inventory = os.path.join(self.ansible_root, 'inventory')
+        self.playbook = os.path.join(self.ansible_root, 'playbook')
+        self.post_playbook = os.path.join(self.ansible_root, 'post_playbook')
+        self.config = os.path.join(self.ansible_root, 'ansible.cfg')
+        self.script_root = os.path.join(self.ansible_root, 'scripts')
+        self.ansible_log = os.path.join(self.ansible_root, 'ansible_log.txt')
+        os.makedirs(self.script_root)
+        self.staging_root = os.path.join(self.root, 'staging')
+        os.makedirs(self.staging_root)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, etype, value, tb):
+        if not self.keep:
+            shutil.rmtree(self.root)
+
+
+class LaunchServer(object):
+    log = logging.getLogger("zuul.LaunchServer")
+    site_section_re = re.compile('site "(.*?)"')
+    node_section_re = re.compile('node "(.*?)"')
+
+    def __init__(self, config, keep_jobdir=False):
+        self.config = config
+        self.options = dict(
+            verbose=False
+        )
+        self.keep_jobdir = keep_jobdir
+        self.hostname = socket.gethostname()
+        self.registered_functions = set()
+        self.node_workers = {}
+        self.jobs = {}
+        self.builds = {}
+        self.zmq_send_queue = Queue.Queue()
+        self.termination_queue = Queue.Queue()
+        self.sites = {}
+        self.static_nodes = {}
+        self.command_map = dict(
+            reconfigure=self.reconfigure,
+            stop=self.stop,
+            pause=self.pause,
+            unpause=self.unpause,
+            release=self.release,
+            graceful=self.graceful,
+            verbose=self.verboseOn,
+            unverbose=self.verboseOff,
+        )
+
+        if config.has_option('launcher', 'accept_nodes'):
+            self.accept_nodes = config.getboolean('launcher',
+                                                  'accept_nodes')
+        else:
+            self.accept_nodes = True
+        self.config_accept_nodes = self.accept_nodes
+
+        if self.config.has_option('zuul', 'state_dir'):
+            state_dir = os.path.expanduser(
+                self.config.get('zuul', 'state_dir'))
+        else:
+            state_dir = '/var/lib/zuul'
+        path = os.path.join(state_dir, 'launcher.socket')
+        self.command_socket = commandsocket.CommandSocket(path)
+        ansible_dir = os.path.join(state_dir, 'ansible')
+        plugins_dir = os.path.join(ansible_dir, 'plugins')
+        self.callback_dir = os.path.join(plugins_dir, 'callback_plugins')
+        if not os.path.exists(self.callback_dir):
+            os.makedirs(self.callback_dir)
+        self.library_dir = os.path.join(ansible_dir, 'library')
+        if not os.path.exists(self.library_dir):
+            os.makedirs(self.library_dir)
+
+        callback_path = os.path.dirname(os.path.abspath(
+            zuul.ansible.plugins.callback_plugins.__file__))
+        for fn in os.listdir(callback_path):
+            shutil.copy(os.path.join(callback_path, fn), self.callback_dir)
+
+        library_path = os.path.dirname(os.path.abspath(
+            zuul.ansible.library.__file__))
+        for fn in os.listdir(library_path):
+            shutil.copy(os.path.join(library_path, fn), self.library_dir)
+
+        for section in config.sections():
+            m = self.site_section_re.match(section)
+            if m:
+                sitename = m.group(1)
+                d = {}
+                d['host'] = config.get(section, 'host')
+                d['user'] = config.get(section, 'user')
+                if config.has_option(section, 'pass'):
+                    d['pass'] = config.get(section, 'pass')
+                else:
+                    d['pass'] = ''
+                if config.has_option(section, 'root'):
+                    d['root'] = config.get(section, 'root')
+                else:
+                    d['root'] = '/'
+                self.sites[sitename] = d
+                continue
+            m = self.node_section_re.match(section)
+            if m:
+                nodename = m.group(1)
+                d = {}
+                d['name'] = nodename
+                d['host'] = config.get(section, 'host')
+                if config.has_option(section, 'description'):
+                    d['description'] = config.get(section, 'description')
+                else:
+                    d['description'] = ''
+                if config.has_option(section, 'labels'):
+                    d['labels'] = config.get(section, 'labels').split(',')
+                else:
+                    d['labels'] = []
+                self.static_nodes[nodename] = d
+                continue
+
+    def start(self):
+        self._gearman_running = True
+        self._zmq_running = True
+        self._reaper_running = True
+        self._command_running = True
+
+        # Setup ZMQ
+        self.zcontext = zmq.Context()
+        self.zsocket = self.zcontext.socket(zmq.PUB)
+        self.zsocket.bind("tcp://*:8888")
+
+        # Setup Gearman
+        server = self.config.get('gearman', 'server')
+        if self.config.has_option('gearman', 'port'):
+            port = self.config.get('gearman', 'port')
+        else:
+            port = 4730
+        self.worker = LaunchGearWorker('Zuul Launch Server',
+                                       launch_server=self)
+        self.worker.addServer(server, port)
+        self.log.debug("Waiting for server")
+        self.worker.waitForServer()
+        self.log.debug("Registering")
+        self.register()
+
+        # Start command socket
+        self.log.debug("Starting command processor")
+        self.command_socket.start()
+        self.command_thread = threading.Thread(target=self.runCommand)
+        self.command_thread.daemon = True
+        self.command_thread.start()
+
+        # Load JJB config
+        self.loadJobs()
+
+        # Start ZMQ worker thread
+        self.log.debug("Starting ZMQ processor")
+        self.zmq_thread = threading.Thread(target=self.runZMQ)
+        self.zmq_thread.daemon = True
+        self.zmq_thread.start()
+
+        # Start node worker reaper thread
+        self.log.debug("Starting reaper")
+        self.reaper_thread = threading.Thread(target=self.runReaper)
+        self.reaper_thread.daemon = True
+        self.reaper_thread.start()
+
+        # Start Gearman worker thread
+        self.log.debug("Starting worker")
+        self.gearman_thread = threading.Thread(target=self.run)
+        self.gearman_thread.daemon = True
+        self.gearman_thread.start()
+
+        # Start static workers
+        for node in self.static_nodes.values():
+            self.log.debug("Creating static node with arguments: %s" % (node,))
+            self._launchWorker(node)
+
+    def loadJobs(self):
+        self.log.debug("Loading jobs")
+        builder = JJB()
+        path = self.config.get('launcher', 'jenkins_jobs')
+        builder.load_files([path])
+        builder.parser.expandYaml()
+        unseen = set(self.jobs.keys())
+        for job in builder.parser.jobs:
+            builder.expandMacros(job)
+            self.jobs[job['name']] = job
+            unseen.discard(job['name'])
+        for name in unseen:
+            del self.jobs[name]
+
+    def register(self):
+        new_functions = set()
+        if self.accept_nodes:
+            new_functions.add("node_assign:zuul")
+        new_functions.add("stop:%s" % self.hostname)
+        new_functions.add("set_description:%s" % self.hostname)
+        new_functions.add("node_revoke:%s" % self.hostname)
+
+        for function in new_functions - self.registered_functions:
+            self.worker.registerFunction(function)
+        for function in self.registered_functions - new_functions:
+            self.worker.unRegisterFunction(function)
+        self.registered_functions = new_functions
+
+    def reconfigure(self):
+        self.log.debug("Reconfiguring")
+        self.loadJobs()
+        for node in self.node_workers.values():
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='reconfigure'))
+            except Exception:
+                self.log.exception("Exception sending reconfigure command "
+                                   "to worker:")
+        self.log.debug("Reconfiguration complete")
+
+    def pause(self):
+        self.log.debug("Pausing")
+        self.accept_nodes = False
+        self.register()
+        for node in self.node_workers.values():
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='pause'))
+            except Exception:
+                self.log.exception("Exception sending pause command "
+                                   "to worker:")
+        self.log.debug("Paused")
+
+    def unpause(self):
+        self.log.debug("Unpausing")
+        self.accept_nodes = self.config_accept_nodes
+        self.register()
+        for node in self.node_workers.values():
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='unpause'))
+            except Exception:
+                self.log.exception("Exception sending unpause command "
+                                   "to worker:")
+        self.log.debug("Unpaused")
+
+    def release(self):
+        self.log.debug("Releasing idle nodes")
+        for node in self.node_workers.values():
+            if node.name in self.static_nodes:
+                continue
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='release'))
+            except Exception:
+                self.log.exception("Exception sending release command "
+                                   "to worker:")
+        self.log.debug("Finished releasing idle nodes")
+
+    def graceful(self):
+        # Note: this is run in the command processing thread; no more
+        # external commands will be processed after this.
+        self.log.debug("Gracefully stopping")
+        self.pause()
+        self.release()
+        self.log.debug("Waiting for all builds to finish")
+        while self.builds:
+            time.sleep(5)
+        self.log.debug("All builds are finished")
+        self.stop()
+
+    def stop(self):
+        self.log.debug("Stopping")
+        # First, stop accepting new jobs
+        self._gearman_running = False
+        self._reaper_running = False
+        self.worker.shutdown()
+        # Then stop all of the workers
+        for node in self.node_workers.values():
+            try:
+                if node.isAlive():
+                    node.stop()
+            except Exception:
+                self.log.exception("Exception sending stop command to worker:")
+        # Stop ZMQ afterwords so that the send queue is flushed
+        self._zmq_running = False
+        self.zmq_send_queue.put(None)
+        self.zmq_send_queue.join()
+        # Stop command processing
+        self._command_running = False
+        self.command_socket.stop()
+        # Join the gearman thread which was stopped earlier.
+        self.gearman_thread.join()
+        # The command thread is joined in the join() method of this
+        # class, which is called by the command shell.
+        self.log.debug("Stopped")
+
+    def verboseOn(self):
+        self.log.debug("Enabling verbose mode")
+        self.options['verbose'] = True
+
+    def verboseOff(self):
+        self.log.debug("Disabling verbose mode")
+        self.options['verbose'] = False
+
+    def join(self):
+        self.command_thread.join()
+
+    def runCommand(self):
+        while self._command_running:
+            try:
+                command = self.command_socket.get()
+                self.command_map[command]()
+            except Exception:
+                self.log.exception("Exception while processing command")
+
+    def runZMQ(self):
+        while self._zmq_running or not self.zmq_send_queue.empty():
+            try:
+                item = self.zmq_send_queue.get()
+                self.log.debug("Got ZMQ event %s" % (item,))
+                if item is None:
+                    continue
+                self.zsocket.send(item)
+            except Exception:
+                self.log.exception("Exception while processing ZMQ events")
+            finally:
+                self.zmq_send_queue.task_done()
+
+    def run(self):
+        while self._gearman_running:
+            try:
+                job = self.worker.getJob()
+                try:
+                    if job.name.startswith('node_assign:'):
+                        self.log.debug("Got node_assign job: %s" % job.unique)
+                        self.assignNode(job)
+                    elif job.name.startswith('stop:'):
+                        self.log.debug("Got stop job: %s" % job.unique)
+                        self.stopJob(job)
+                    elif job.name.startswith('set_description:'):
+                        self.log.debug("Got set_description job: %s" %
+                                       job.unique)
+                        job.sendWorkComplete()
+                    elif job.name.startswith('node_revoke:'):
+                        self.log.debug("Got node_revoke job: %s" % job.unique)
+                        self.revokeNode(job)
+                    else:
+                        self.log.error("Unable to handle job %s" % job.name)
+                        job.sendWorkFail()
+                except Exception:
+                    self.log.exception("Exception while running job")
+                    job.sendWorkException(traceback.format_exc())
+            except gear.InterruptedError:
+                return
+            except Exception:
+                self.log.exception("Exception while getting job")
+
+    def assignNode(self, job):
+        args = json.loads(job.arguments)
+        self.log.debug("Assigned node with arguments: %s" % (args,))
+        self._launchWorker(args)
+        data = dict(manager=self.hostname)
+        job.sendWorkData(json.dumps(data))
+        job.sendWorkComplete()
+
+    def _launchWorker(self, args):
+        worker = NodeWorker(self.config, self.jobs, self.builds,
+                            self.sites, args['name'], args['host'],
+                            args['description'], args['labels'],
+                            self.hostname, self.zmq_send_queue,
+                            self.termination_queue, self.keep_jobdir,
+                            self.callback_dir, self.library_dir,
+                            self.options)
+        self.node_workers[worker.name] = worker
+
+        worker.thread = threading.Thread(target=worker.run)
+        worker.thread.start()
+
+    def revokeNode(self, job):
+        try:
+            args = json.loads(job.arguments)
+            self.log.debug("Revoke job with arguments: %s" % (args,))
+            name = args['name']
+            node = self.node_workers.get(name)
+            if not node:
+                self.log.debug("Unable to find worker %s" % (name,))
+                return
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='stop'))
+                else:
+                    self.log.debug("Node %s is not alive while revoking node" %
+                                   (node.name,))
+            except Exception:
+                self.log.exception("Exception sending stop command "
+                                   "to worker:")
+        finally:
+            job.sendWorkComplete()
+
+    def stopJob(self, job):
+        try:
+            args = json.loads(job.arguments)
+            self.log.debug("Stop job with arguments: %s" % (args,))
+            unique = args['number']
+            build_worker_name = self.builds.get(unique)
+            if not build_worker_name:
+                self.log.debug("Unable to find build for job %s" % (unique,))
+                return
+            node = self.node_workers.get(build_worker_name)
+            if not node:
+                self.log.debug("Unable to find worker for job %s" % (unique,))
+                return
+            try:
+                if node.isAlive():
+                    node.queue.put(dict(action='abort'))
+                else:
+                    self.log.debug("Node %s is not alive while aborting job" %
+                                   (node.name,))
+            except Exception:
+                self.log.exception("Exception sending abort command "
+                                   "to worker:")
+        finally:
+            job.sendWorkComplete()
+
+    def runReaper(self):
+        # We don't actually care if all the events are processed
+        while self._reaper_running:
+            try:
+                item = self.termination_queue.get()
+                self.log.debug("Got termination event %s" % (item,))
+                if item is None:
+                    continue
+                worker = self.node_workers[item]
+                self.log.debug("Joining %s" % (item,))
+                worker.thread.join()
+                self.log.debug("Joined %s" % (item,))
+                del self.node_workers[item]
+            except Exception:
+                self.log.exception("Exception while processing "
+                                   "termination events:")
+            finally:
+                self.termination_queue.task_done()
+
+
+class NodeWorker(object):
+    def __init__(self, config, jobs, builds, sites, name, host,
+                 description, labels, manager_name, zmq_send_queue,
+                 termination_queue, keep_jobdir, callback_dir,
+                 library_dir, options):
+        self.log = logging.getLogger("zuul.NodeWorker.%s" % (name,))
+        self.log.debug("Creating node worker %s" % (name,))
+        self.config = config
+        self.jobs = jobs
+        self.builds = builds
+        self.sites = sites
+        self.name = name
+        self.host = host
+        self.description = description
+        if not isinstance(labels, list):
+            labels = [labels]
+        self.labels = labels
+        self.thread = None
+        self.registered_functions = set()
+        # If the unpaused Event is set, that means we should run jobs.
+        # If it is clear, then we are paused and should not run jobs.
+        self.unpaused = threading.Event()
+        self.unpaused.set()
+        self._running = True
+        self.queue = Queue.Queue()
+        self.manager_name = manager_name
+        self.zmq_send_queue = zmq_send_queue
+        self.termination_queue = termination_queue
+        self.keep_jobdir = keep_jobdir
+        self.running_job_lock = threading.Lock()
+        self.pending_registration = False
+        self.registration_lock = threading.Lock()
+        self._get_job_lock = threading.Lock()
+        self._got_job = False
+        self._job_complete_event = threading.Event()
+        self._running_job = False
+        self._aborted_job = False
+        self._sent_complete_event = False
+        self.ansible_job_proc = None
+        self.ansible_post_proc = None
+        self.workspace_root = config.get('launcher', 'workspace_root')
+        if self.config.has_option('launcher', 'private_key_file'):
+            self.private_key_file = config.get('launcher', 'private_key_file')
+        else:
+            self.private_key_file = '~/.ssh/id_rsa'
+        if self.config.has_option('launcher', 'username'):
+            self.username = config.get('launcher', 'username')
+        else:
+            self.username = 'zuul'
+        self.callback_dir = callback_dir
+        self.library_dir = library_dir
+        self.options = options
+
+    def isAlive(self):
+        # Meant to be called from the manager
+        if self.thread and self.thread.is_alive():
+            return True
+        return False
+
+    def run(self):
+        self.log.debug("Node worker %s starting" % (self.name,))
+        server = self.config.get('gearman', 'server')
+        if self.config.has_option('gearman', 'port'):
+            port = self.config.get('gearman', 'port')
+        else:
+            port = 4730
+        self.worker = NodeGearWorker(self.name)
+        self.worker.addServer(server, port)
+        self.log.debug("Waiting for server")
+        self.worker.waitForServer()
+        self.log.debug("Registering")
+        self.register()
+
+        self.gearman_thread = threading.Thread(target=self.runGearman)
+        self.gearman_thread.daemon = True
+        self.gearman_thread.start()
+
+        self.log.debug("Started")
+
+        while self._running or not self.queue.empty():
+            try:
+                self._runQueue()
+            except Exception:
+                self.log.exception("Exception in queue manager:")
+
+    def stop(self):
+        # If this is called locally, setting _running will be
+        # effictive, if it's called remotely, it will not be, but it
+        # will be set by the queue thread.
+        self.log.debug("Submitting stop request")
+        self._running = False
+        self.unpaused.set()
+        self.queue.put(dict(action='stop'))
+        self.queue.join()
+
+    def pause(self):
+        self.unpaused.clear()
+        self.worker.stopWaitingForJobs()
+
+    def unpause(self):
+        self.unpaused.set()
+
+    def release(self):
+        # If this node is idle, stop it.
+        old_unpaused = self.unpaused.is_set()
+        if old_unpaused:
+            self.pause()
+        with self._get_job_lock:
+            if self._got_job:
+                self.log.debug("This worker is not idle")
+                if old_unpaused:
+                    self.unpause()
+                return
+        self.log.debug("Stopping due to release command")
+        self.queue.put(dict(action='stop'))
+
+    def _runQueue(self):
+        item = self.queue.get()
+        try:
+            if item['action'] == 'stop':
+                self.log.debug("Received stop request")
+                self._running = False
+                self.termination_queue.put(self.name)
+                if not self.abortRunningJob():
+                    self.sendFakeCompleteEvent()
+                else:
+                    self._job_complete_event.wait()
+                self.worker.shutdown()
+            if item['action'] == 'pause':
+                self.log.debug("Received pause request")
+                self.pause()
+            if item['action'] == 'unpause':
+                self.log.debug("Received unpause request")
+                self.unpause()
+            if item['action'] == 'release':
+                self.log.debug("Received release request")
+                self.release()
+            elif item['action'] == 'reconfigure':
+                self.log.debug("Received reconfigure request")
+                self.register()
+            elif item['action'] == 'abort':
+                self.log.debug("Received abort request")
+                self.abortRunningJob()
+        finally:
+            self.queue.task_done()
+
+    def runGearman(self):
+        while self._running:
+            try:
+                self.unpaused.wait()
+                if self._running:
+                    self._runGearman()
+            except Exception:
+                self.log.exception("Exception in gearman manager:")
+            with self._get_job_lock:
+                self._got_job = False
+
+    def _runGearman(self):
+        if self.pending_registration:
+            self.register()
+        with self._get_job_lock:
+            try:
+                job = self.worker.getJob()
+                self._got_job = True
+            except gear.InterruptedError:
+                return
+        self.log.debug("Node worker %s got job %s" % (self.name, job.name))
+        try:
+            if job.name not in self.registered_functions:
+                self.log.error("Unable to handle job %s" % job.name)
+                job.sendWorkFail()
+                return
+            self.launch(job)
+        except Exception:
+            self.log.exception("Exception while running job")
+            job.sendWorkException(traceback.format_exc())
+
+    def generateFunctionNames(self, job):
+        # This only supports "node: foo" and "node: foo || bar"
+        ret = set()
+        job_labels = job.get('node')
+        matching_labels = set()
+        if job_labels:
+            job_labels = [x.strip() for x in job_labels.split('||')]
+            matching_labels = set(self.labels) & set(job_labels)
+            if not matching_labels:
+                return ret
+        ret.add('build:%s' % (job['name'],))
+        for label in matching_labels:
+            ret.add('build:%s:%s' % (job['name'], label))
+        return ret
+
+    def register(self):
+        if not self.registration_lock.acquire(False):
+            self.log.debug("Registration already in progress")
+            return
+        try:
+            if self._running_job:
+                self.pending_registration = True
+                self.log.debug("Ignoring registration due to running job")
+                return
+            self.log.debug("Updating registration")
+            self.pending_registration = False
+            new_functions = set()
+            for job in self.jobs.values():
+                new_functions |= self.generateFunctionNames(job)
+            self.worker.sendMassDo(new_functions)
+            self.registered_functions = new_functions
+        finally:
+            self.registration_lock.release()
+
+    def abortRunningJob(self):
+        self._aborted_job = True
+        return self.abortRunningProc(self.ansible_job_proc)
+
+    def abortRunningProc(self, proc):
+        aborted = False
+        self.log.debug("Abort: acquiring job lock")
+        with self.running_job_lock:
+            if self._running_job:
+                self.log.debug("Abort: a job is running")
+                if proc:
+                    self.log.debug("Abort: sending kill signal to job "
+                                   "process group")
+                    try:
+                        pgid = os.getpgid(proc.pid)
+                        os.killpg(pgid, signal.SIGKILL)
+                        aborted = True
+                    except Exception:
+                        self.log.exception("Exception while killing "
+                                           "ansible process:")
+            else:
+                self.log.debug("Abort: no job is running")
+
+        return aborted
+
+    def launch(self, job):
+        self.log.info("Node worker %s launching job %s" %
+                      (self.name, job.name))
+
+        # Make sure we can parse what we need from the job first
+        args = json.loads(job.arguments)
+        offline = boolify(args.get('OFFLINE_NODE_WHEN_COMPLETE', False))
+        job_name = job.name.split(':')[1]
+
+        # Initialize the result so we have something regardless of
+        # whether the job actually runs
+        result = None
+        self._sent_complete_event = False
+        self._aborted_job = False
+
+        try:
+            self.sendStartEvent(job_name, args)
+        except Exception:
+            self.log.exception("Exception while sending job start event")
+
+        try:
+            result = self.runJob(job, args)
+        except Exception:
+            self.log.exception("Exception while launching job thread")
+
+        self._running_job = False
+
+        try:
+            data = json.dumps(dict(result=result))
+            job.sendWorkComplete(data)
+        except Exception:
+            self.log.exception("Exception while sending job completion packet")
+
+        try:
+            self.sendCompleteEvent(job_name, result, args)
+        except Exception:
+            self.log.exception("Exception while sending job completion event")
+
+        try:
+            del self.builds[job.unique]
+        except Exception:
+            self.log.exception("Exception while clearing build record")
+
+        self._job_complete_event.set()
+        if offline and self._running:
+            self.stop()
+
+    def sendStartEvent(self, name, parameters):
+        build = dict(node_name=self.name,
+                     host_name=self.manager_name,
+                     parameters=parameters)
+
+        event = dict(name=name,
+                     build=build)
+
+        item = "onStarted %s" % json.dumps(event)
+        self.log.debug("Sending over ZMQ: %s" % (item,))
+        self.zmq_send_queue.put(item)
+
+    def sendCompleteEvent(self, name, status, parameters):
+        build = dict(status=status,
+                     node_name=self.name,
+                     host_name=self.manager_name,
+                     parameters=parameters)
+
+        event = dict(name=name,
+                     build=build)
+
+        item = "onFinalized %s" % json.dumps(event)
+        self.log.debug("Sending over ZMQ: %s" % (item,))
+        self.zmq_send_queue.put(item)
+        self._sent_complete_event = True
+
+    def sendFakeCompleteEvent(self):
+        if self._sent_complete_event:
+            return
+        self.sendCompleteEvent('zuul:launcher-shutdown',
+                               'SUCCESS', {})
+
+    def runJob(self, job, args):
+        self.ansible_job_proc = None
+        self.ansible_post_proc = None
+        result = None
+        with self.running_job_lock:
+            if not self._running:
+                return result
+            self._running_job = True
+            self._job_complete_event.clear()
+
+        self.log.debug("Job %s: beginning" % (job.unique,))
+        self.builds[job.unique] = self.name
+        with JobDir(self.keep_jobdir) as jobdir:
+            self.log.debug("Job %s: job root at %s" %
+                           (job.unique, jobdir.root))
+            timeout = self.prepareAnsibleFiles(jobdir, job, args)
+
+            data = {
+                'manager': self.manager_name,
+                'number': job.unique,
+                'url': 'telnet://%s:8088' % self.host,
+            }
+            job.sendWorkData(json.dumps(data))
+            job.sendWorkStatus(0, 100)
+
+            job_status = self.runAnsiblePlaybook(jobdir, timeout)
+            if job_status is None:
+                # The result of the job is indeterminate.  Zuul will
+                # run it again.
+                return result
+
+            post_status = self.runAnsiblePostPlaybook(jobdir, job_status)
+            if not post_status:
+                result = 'POST_FAILURE'
+            elif job_status:
+                result = 'SUCCESS'
+            else:
+                result = 'FAILURE'
+
+            if self._aborted_job:
+                # A Null result will cause zuul to relaunch the job if
+                # it needs to.
+                result = None
+
+        return result
+
+    def getHostList(self):
+        return [('node', dict(
+            ansible_host=self.host, ansible_user=self.username))]
+
+    def _substituteVariables(self, text, variables):
+        def lookup(match):
+            return variables.get(match.group(1), '')
+        return re.sub('\$([A-Za-z0-9_]+)', lookup, text)
+
+    def _getRsyncOptions(self, source, parameters):
+        # Treat the publisher source as a filter; ant and rsync behave
+        # fairly close in this manner, except for leading directories.
+        source = self._substituteVariables(source, parameters)
+        # If the source starts with ** then we want to match any
+        # number of directories, so don't anchor the include filter.
+        # If it does not start with **, then the intent is likely to
+        # at least start by matching an immediate file or subdirectory
+        # (even if later we have a ** in the middle), so in this case,
+        # anchor it to the root of the transfer (the workspace).
+        if not source.startswith('**'):
+            source = os.path.join('/', source)
+        # These options mean: include the thing we want, include any
+        # directories (so that we continue to search for the thing we
+        # want no matter how deep it is), exclude anything that
+        # doesn't match the thing we want or is a directory, then get
+        # rid of empty directories left over at the end.
+        rsync_opts = ['--include="%s"' % source,
+                      '--include="*/"',
+                      '--exclude="*"',
+                      '--prune-empty-dirs']
+        return rsync_opts
+
+    def _makeSCPTask(self, jobdir, publisher, parameters):
+        tasks = []
+        for scpfile in publisher['scp']['files']:
+            scproot = tempfile.mkdtemp(dir=jobdir.staging_root)
+            os.chmod(scproot, 0o755)
+
+            site = publisher['scp']['site']
+            if scpfile.get('copy-console'):
+                # Include the local ansible directory in the console
+                # upload.  This uploads the playbook and ansible logs.
+                copyargs = dict(src=jobdir.ansible_root + '/',
+                                dest=os.path.join(scproot, '_zuul_ansible'))
+                task = dict(copy=copyargs,
+                            delegate_to='127.0.0.1')
+                tasks.append(task)
+
+                # Fetch the console log from the remote host.
+                src = '/tmp/console.html'
+                rsync_opts = []
+            else:
+                src = parameters['WORKSPACE']
+                if not src.endswith('/'):
+                    src = src + '/'
+                rsync_opts = self._getRsyncOptions(scpfile['source'],
+                                                   parameters)
+
+            syncargs = dict(src=src,
+                            dest=scproot,
+                            copy_links='yes',
+                            mode='pull')
+            if rsync_opts:
+                syncargs['rsync_opts'] = rsync_opts
+            task = dict(synchronize=syncargs)
+            if not scpfile.get('copy-after-failure'):
+                task['when'] = 'success'
+            tasks.append(task)
+
+            task = self._makeSCPTaskLocalAction(
+                site, scpfile, scproot, parameters)
+            tasks.append(task)
+        return tasks
+
+    def _makeSCPTaskLocalAction(self, site, scpfile, scproot, parameters):
+        if site not in self.sites:
+            raise Exception("Undefined SCP site: %s" % (site,))
+        site = self.sites[site]
+        dest = scpfile['target'].lstrip('/')
+        dest = self._substituteVariables(dest, parameters)
+        dest = os.path.join(site['root'], dest)
+        dest = os.path.normpath(dest)
+        if not dest.startswith(site['root']):
+            raise Exception("Target path %s is not below site root" %
+                            (dest,))
+
+        rsync_cmd = [
+            '/usr/bin/rsync', '--delay-updates', '-F',
+            '--compress', '-rt', '--safe-links',
+            '--rsync-path="mkdir -p {dest} && rsync"',
+            '--rsh="/usr/bin/ssh -i {private_key_file} -S none '
+            '-o StrictHostKeyChecking=no -q"',
+            '--out-format="<<CHANGED>>%i %n%L"',
+            '{source}', '"{user}@{host}:{dest}"'
+        ]
+        if scpfile.get('keep-hierarchy'):
+            source = '"%s/"' % scproot
+        else:
+            source = '`/usr/bin/find "%s" -type f`' % scproot
+        shellargs = ' '.join(rsync_cmd).format(
+            source=source,
+            dest=dest,
+            private_key_file=self.private_key_file,
+            host=site['host'],
+            user=site['user'])
+        task = dict(shell=shellargs,
+                    delegate_to='127.0.0.1')
+        if not scpfile.get('copy-after-failure'):
+            task['when'] = 'success'
+
+        return task
+
+    def _makeFTPTask(self, jobdir, publisher, parameters):
+        tasks = []
+        ftp = publisher['ftp']
+        site = ftp['site']
+        if site not in self.sites:
+            raise Exception("Undefined FTP site: %s" % site)
+        site = self.sites[site]
+
+        ftproot = tempfile.mkdtemp(dir=jobdir.staging_root)
+        ftpcontent = os.path.join(ftproot, 'content')
+        os.makedirs(ftpcontent)
+        ftpscript = os.path.join(ftproot, 'script')
+
+        src = parameters['WORKSPACE']
+        if not src.endswith('/'):
+            src = src + '/'
+        rsync_opts = self._getRsyncOptions(ftp['source'],
+                                           parameters)
+        syncargs = dict(src=src,
+                        dest=ftpcontent,
+                        copy_links='yes',
+                        mode='pull')
+        if rsync_opts:
+            syncargs['rsync_opts'] = rsync_opts
+        task = dict(synchronize=syncargs,
+                    when='success')
+        tasks.append(task)
+        task = dict(shell='lftp -f %s' % ftpscript,
+                    when='success',
+                    delegate_to='127.0.0.1')
+        ftpsource = ftpcontent
+        if ftp.get('remove-prefix'):
+            ftpsource = os.path.join(ftpcontent, ftp['remove-prefix'])
+        while ftpsource[-1] == '/':
+            ftpsource = ftpsource[:-1]
+        ftptarget = ftp['target'].lstrip('/')
+        ftptarget = self._substituteVariables(ftptarget, parameters)
+        ftptarget = os.path.join(site['root'], ftptarget)
+        ftptarget = os.path.normpath(ftptarget)
+        if not ftptarget.startswith(site['root']):
+            raise Exception("Target path %s is not below site root" %
+                            (ftptarget,))
+        while ftptarget[-1] == '/':
+            ftptarget = ftptarget[:-1]
+        with open(ftpscript, 'w') as script:
+            script.write('open %s\n' % site['host'])
+            script.write('user %s %s\n' % (site['user'], site['pass']))
+            script.write('mirror -R %s %s\n' % (ftpsource, ftptarget))
+        tasks.append(task)
+        return tasks
+
+    def _makeBuilderTask(self, jobdir, builder, parameters):
+        tasks = []
+        script_fn = '%s.sh' % str(uuid.uuid4().hex)
+        script_path = os.path.join(jobdir.script_root, script_fn)
+        with open(script_path, 'w') as script:
+            data = builder['shell']
+            if not data.startswith('#!'):
+                data = '#!/bin/bash -x\n %s' % (data,)
+            script.write(data)
+
+        remote_path = os.path.join('/tmp', script_fn)
+        copy = dict(src=script_path,
+                    dest=remote_path,
+                    mode=0o555)
+        task = dict(copy=copy)
+        tasks.append(task)
+
+        runner = dict(command=remote_path,
+                      cwd=parameters['WORKSPACE'],
+                      parameters=parameters)
+        task = dict(zuul_runner=runner)
+        task['name'] = ('zuul_runner with {{ timeout | int - elapsed_time }} '
+                        'second timeout')
+        task['when'] = '{{ elapsed_time < timeout | int }}'
+        task['async'] = '{{ timeout | int - elapsed_time }}'
+        task['poll'] = 5
+        tasks.append(task)
+
+        filetask = dict(path=remote_path,
+                        state='absent')
+        task = dict(file=filetask)
+        tasks.append(task)
+
+        return tasks
+
+    def _transformPublishers(self, jjb_job):
+        early_publishers = []
+        late_publishers = []
+        old_publishers = jjb_job.get('publishers', [])
+        for publisher in old_publishers:
+            early_scpfiles = []
+            late_scpfiles = []
+            if 'scp' not in publisher:
+                early_publishers.append(publisher)
+                continue
+            copy_console = False
+            for scpfile in publisher['scp']['files']:
+                if scpfile.get('copy-console'):
+                    scpfile['keep-hierarchy'] = True
+                    late_scpfiles.append(scpfile)
+                    copy_console = True
+                else:
+                    early_scpfiles.append(scpfile)
+            publisher['scp']['files'] = early_scpfiles + late_scpfiles
+            if copy_console:
+                late_publishers.append(publisher)
+            else:
+                early_publishers.append(publisher)
+        publishers = early_publishers + late_publishers
+        if old_publishers != publishers:
+            self.log.debug("Transformed job publishers")
+        return early_publishers, late_publishers
+
+    def prepareAnsibleFiles(self, jobdir, gearman_job, args):
+        job_name = gearman_job.name.split(':')[1]
+        jjb_job = self.jobs[job_name]
+
+        parameters = args.copy()
+        parameters['WORKSPACE'] = os.path.join(self.workspace_root, job_name)
+
+        with open(jobdir.inventory, 'w') as inventory:
+            for host_name, host_vars in self.getHostList():
+                inventory.write(host_name)
+                for k, v in host_vars.items():
+                    inventory.write(' %s=%s' % (k, v))
+                inventory.write('\n')
+
+        timeout = None
+        timeout_var = None
+        for wrapper in jjb_job.get('wrappers', []):
+            if isinstance(wrapper, dict):
+                build_timeout = wrapper.get('timeout')
+                if isinstance(build_timeout, dict):
+                    timeout_var = build_timeout.get('timeout-var')
+                    timeout = build_timeout.get('timeout')
+                    if timeout is not None:
+                        timeout = int(timeout) * 60
+        if not timeout:
+            timeout = ANSIBLE_DEFAULT_TIMEOUT
+        if timeout_var:
+            parameters[timeout_var] = str(timeout * 1000)
+
+        with open(jobdir.playbook, 'w') as playbook:
+            pre_tasks = []
+            tasks = []
+            main_block = []
+            error_block = []
+            variables = []
+
+            shellargs = "ssh-keyscan %s > %s" % (
+                self.host, jobdir.known_hosts)
+            pre_tasks.append(dict(shell=shellargs,
+                             delegate_to='127.0.0.1'))
+
+            tasks.append(dict(block=main_block,
+                              rescue=error_block))
+
+            task = dict(file=dict(path='/tmp/console.html', state='absent'))
+            main_block.append(task)
+
+            task = dict(zuul_console=dict(path='/tmp/console.html', port=8088))
+            main_block.append(task)
+
+            task = dict(file=dict(path=parameters['WORKSPACE'],
+                                  state='directory'))
+            main_block.append(task)
+
+            msg = [
+                "Launched by %s" % self.manager_name,
+                "Building remotely on %s in workspace %s" % (
+                    self.name, parameters['WORKSPACE'])]
+            task = dict(zuul_log=dict(msg=msg))
+            main_block.append(task)
+
+            for builder in jjb_job.get('builders', []):
+                if 'shell' in builder:
+                    main_block.extend(
+                        self._makeBuilderTask(jobdir, builder, parameters))
+            task = dict(zuul_log=dict(msg="Job complete, result: SUCCESS"))
+            main_block.append(task)
+
+            task = dict(zuul_log=dict(msg="Job complete, result: FAILURE"))
+            error_block.append(task)
+            error_block.append(dict(fail=dict(msg='FAILURE')))
+
+            variables.append(dict(timeout=timeout))
+            play = dict(hosts='node', name='Job body', vars=variables,
+                        pre_tasks=pre_tasks, tasks=tasks)
+            playbook.write(yaml.safe_dump([play], default_flow_style=False))
+
+        early_publishers, late_publishers = self._transformPublishers(jjb_job)
+
+        with open(jobdir.post_playbook, 'w') as playbook:
+            blocks = []
+            for publishers in [early_publishers, late_publishers]:
+                block = []
+                for publisher in publishers:
+                    if 'scp' in publisher:
+                        block.extend(self._makeSCPTask(jobdir, publisher,
+                                                       parameters))
+                    if 'ftp' in publisher:
+                        block.extend(self._makeFTPTask(jobdir, publisher,
+                                                       parameters))
+                blocks.append(block)
+
+            # The 'always' section contains the log publishing tasks,
+            # the 'block' contains all the other publishers.  This way
+            # we run the log publisher regardless of whether the rest
+            # of the publishers succeed.
+            tasks = []
+            tasks.append(dict(block=blocks[0],
+                              always=blocks[1]))
+
+            play = dict(hosts='node', name='Publishers',
+                        tasks=tasks)
+            playbook.write(yaml.safe_dump([play], default_flow_style=False))
+
+        with open(jobdir.config, 'w') as config:
+            config.write('[defaults]\n')
+            config.write('hostfile = %s\n' % jobdir.inventory)
+            config.write('keep_remote_files = True\n')
+            config.write('local_tmp = %s/.ansible/tmp\n' % jobdir.root)
+            config.write('private_key_file = %s\n' % self.private_key_file)
+            config.write('retry_files_enabled = False\n')
+            config.write('log_path = %s\n' % jobdir.ansible_log)
+            config.write('gathering = explicit\n')
+            config.write('callback_plugins = %s\n' % self.callback_dir)
+            config.write('library = %s\n' % self.library_dir)
+
+            config.write('[ssh_connection]\n')
+            ssh_args = "-o ControlMaster=auto -o ControlPersist=60s " \
+                "-o UserKnownHostsFile=%s" % jobdir.known_hosts
+            config.write('ssh_args = %s\n' % ssh_args)
+
+        return timeout
+
+    def _ansibleTimeout(self, proc, msg):
+        self.log.warning(msg)
+        self.abortRunningProc(proc)
+
+    def runAnsiblePlaybook(self, jobdir, timeout):
+        # Set LOGNAME env variable so Ansible log_path log reports
+        # the correct user.
+        env_copy = os.environ.copy()
+        env_copy['LOGNAME'] = 'zuul'
+
+        if self.options['verbose']:
+            verbose = '-vvv'
+        else:
+            verbose = '-v'
+
+        cmd = ['ansible-playbook', jobdir.playbook, verbose]
+        self.log.debug("Ansible command: %s" % (cmd,))
+
+        self.ansible_job_proc = subprocess.Popen(
+            cmd,
+            cwd=jobdir.ansible_root,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            preexec_fn=os.setsid,
+            env=env_copy,
+        )
+        ret = None
+        watchdog = Watchdog(timeout + ANSIBLE_WATCHDOG_GRACE,
+                            self._ansibleTimeout,
+                            (self.ansible_job_proc,
+                             "Ansible timeout exceeded"))
+        watchdog.start()
+        try:
+            for line in iter(self.ansible_job_proc.stdout.readline, b''):
+                line = line[:1024].rstrip()
+                self.log.debug("Ansible output: %s" % (line,))
+            ret = self.ansible_job_proc.wait()
+        finally:
+            watchdog.stop()
+        self.log.debug("Ansible exit code: %s" % (ret,))
+        self.ansible_job_proc = None
+        if ret == 3:
+            # AnsibleHostUnreachable: We had a network issue connecting to
+            # our zuul-worker.
+            return None
+        elif ret == -9:
+            # Received abort request.
+            return None
+        return ret == 0
+
+    def runAnsiblePostPlaybook(self, jobdir, success):
+        # Set LOGNAME env variable so Ansible log_path log reports
+        # the correct user.
+        env_copy = os.environ.copy()
+        env_copy['LOGNAME'] = 'zuul'
+
+        if self.options['verbose']:
+            verbose = '-vvv'
+        else:
+            verbose = '-v'
+
+        cmd = ['ansible-playbook', jobdir.post_playbook,
+               '-e', 'success=%s' % success, verbose]
+        self.log.debug("Ansible post command: %s" % (cmd,))
+
+        self.ansible_post_proc = subprocess.Popen(
+            cmd,
+            cwd=jobdir.ansible_root,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            preexec_fn=os.setsid,
+            env=env_copy,
+        )
+        ret = None
+        watchdog = Watchdog(ANSIBLE_DEFAULT_POST_TIMEOUT,
+                            self._ansibleTimeout,
+                            (self.ansible_post_proc,
+                             "Ansible post timeout exceeded"))
+        watchdog.start()
+        try:
+            for line in iter(self.ansible_post_proc.stdout.readline, b''):
+                line = line[:1024].rstrip()
+                self.log.debug("Ansible post output: %s" % (line,))
+            ret = self.ansible_post_proc.wait()
+        finally:
+            watchdog.stop()
+        self.log.debug("Ansible post exit code: %s" % (ret,))
+        self.ansible_post_proc = None
+        return ret == 0
+
+
+class JJB(jenkins_jobs.builder.Builder):
+    def __init__(self):
+        self.global_config = None
+        self._plugins_list = []
+
+    def expandComponent(self, component_type, component, template_data):
+        component_list_type = component_type + 's'
+        new_components = []
+        if isinstance(component, dict):
+            name, component_data = next(iter(component.items()))
+            if template_data:
+                component_data = jenkins_jobs.formatter.deep_format(
+                    component_data, template_data, True)
+        else:
+            name = component
+            component_data = {}
+
+        new_component = self.parser.data.get(component_type, {}).get(name)
+        if new_component:
+            for new_sub_component in new_component[component_list_type]:
+                new_components.extend(
+                    self.expandComponent(component_type,
+                                         new_sub_component, component_data))
+        else:
+            new_components.append({name: component_data})
+        return new_components
+
+    def expandMacros(self, job):
+        for component_type in ['builder', 'publisher', 'wrapper']:
+            component_list_type = component_type + 's'
+            new_components = []
+            for new_component in job.get(component_list_type, []):
+                new_components.extend(self.expandComponent(component_type,
+                                                           new_component, {}))
+            job[component_list_type] = new_components
diff --git a/zuul/launcher/gearman.py b/zuul/launcher/gearman.py
index 2f5c6df..02f78fd 100644
--- a/zuul/launcher/gearman.py
+++ b/zuul/launcher/gearman.py
@@ -17,6 +17,7 @@
 import json
 import logging
 import os
+import six
 import time
 import threading
 from uuid import uuid4
@@ -164,6 +165,11 @@
             port = config.get('gearman', 'port')
         else:
             port = 4730
+        if config.has_option('gearman', 'check_job_registration'):
+            self.job_registration = config.getboolean(
+                'gearman', 'check_job_registration')
+        else:
+            self.job_registration = True
 
         self.gearman = ZuulGearmanClient(self)
         self.gearman.addServer(server, port)
@@ -244,7 +250,7 @@
                 s_config = {}
                 s_config.update((k, v.format(item=item, job=job,
                                              change=item.change))
-                                if isinstance(v, basestring)
+                                if isinstance(v, six.string_types)
                                 else (k, v)
                                 for k, v in s.items())
 
@@ -280,6 +286,7 @@
                       ZUUL_PROJECT=item.change.project.name)
         params['ZUUL_PIPELINE'] = pipeline.name
         params['ZUUL_URL'] = item.current_build_set.zuul_url
+        params['ZUUL_VOTING'] = job.voting and '1' or '0'
         if hasattr(item.change, 'refspec'):
             changes_str = '^'.join(
                 ['%s:%s:%s' % (i.change.project.name, i.change.branch,
@@ -353,7 +360,8 @@
         build.__gearman_job = gearman_job
         self.builds[uuid] = build
 
-        if not self.isJobRegistered(gearman_job.name):
+        if self.job_registration and not self.isJobRegistered(
+                gearman_job.name):
             self.log.error("Job %s is not registered with Gearman" %
                            gearman_job)
             self.onBuildCompleted(gearman_job, 'NOT_REGISTERED')
@@ -425,9 +433,11 @@
 
         build = self.builds.get(job.unique)
         if build:
+            data = getJobData(job)
+            build.node_labels = data.get('node_labels', [])
+            build.node_name = data.get('node_name')
             if not build.canceled:
                 if result is None:
-                    data = getJobData(job)
                     result = data.get('result')
                 if result is None:
                     build.retry = True
@@ -456,9 +466,6 @@
                 build.number = data.get('number')
                 build.__gearman_manager = data.get('manager')
                 self.sched.onBuildStarted(build)
-
-            if job.denominator:
-                build.estimated_time = float(job.denominator) / 1000
         else:
             self.log.error("Unable to find build %s" % job.unique)
 
@@ -505,7 +512,7 @@
             # us where the job is running.
             return False
 
-        if not self.isJobRegistered(name):
+        if self.job_registration and not self.isJobRegistered(name):
             return False
 
         desc_uuid = str(uuid4().hex)
diff --git a/zuul/layoutvalidator.py b/zuul/layoutvalidator.py
index 88d10e2..e1e8ac6 100644
--- a/zuul/layoutvalidator.py
+++ b/zuul/layoutvalidator.py
@@ -18,8 +18,6 @@
 import voluptuous as v
 import string
 
-from zuul.trigger import gerrit
-
 
 # Several forms accept either a single item or a list, this makes
 # specifying that in the schema easy (and explicit).
@@ -36,60 +34,20 @@
 
     precedence = v.Any('normal', 'low', 'high')
 
-    variable_dict = v.Schema({}, extra=True)
+    approval = v.Schema({'username': str,
+                         'email-filter': str,
+                         'email': str,
+                         'older-than': str,
+                         'newer-than': str,
+                         }, extra=True)
 
-    require_approval = v.Schema({'username': str,
-                                 'email-filter': str,
-                                 'email': str,
-                                 'older-than': str,
-                                 'newer-than': str,
-                                 }, extra=True)
-
-    gerrit_trigger = {v.Required('event'):
-                      toList(v.Any('patchset-created',
-                                   'draft-published',
-                                   'change-abandoned',
-                                   'change-restored',
-                                   'change-merged',
-                                   'comment-added',
-                                   'ref-updated')),
-                      'comment_filter': toList(str),
-                      'comment': toList(str),
-                      'email_filter': toList(str),
-                      'email': toList(str),
-                      'username_filter': toList(str),
-                      'username': toList(str),
-                      'branch': toList(str),
-                      'ref': toList(str),
-                      'approval': toList(variable_dict),
-                      'require-approval': toList(require_approval),
-                      }
-
-    timer_trigger = {v.Required('time'): str}
-
-    zuul_trigger = {v.Required('event'):
-                    toList(v.Any('parent-change-enqueued',
-                                 'project-change-merged')),
-                    'pipeline': toList(str),
-                    'require-approval': toList(require_approval),
-                    }
-
-    trigger = v.Required({'gerrit': toList(gerrit_trigger),
-                          'timer': toList(timer_trigger),
-                          'zuul': toList(zuul_trigger)})
-
-    report_actions = {'gerrit': variable_dict,
-                      'smtp': {'to': str,
-                               'from': str,
-                               'subject': str,
-                               },
-                      }
-
-    require = {'approval': toList(require_approval),
+    require = {'approval': toList(approval),
                'open': bool,
                'current-patchset': bool,
                'status': toList(str)}
 
+    reject = {'approval': toList(approval)}
+
     window = v.All(int, v.Range(min=0))
     window_floor = v.All(int, v.Range(min=1))
     window_type = v.Any('linear', 'exponential')
@@ -97,21 +55,19 @@
 
     pipeline = {v.Required('name'): str,
                 v.Required('manager'): manager,
-                'source': v.Any('gerrit'),
+                'source': str,
                 'precedence': precedence,
                 'description': str,
                 'require': require,
+                'reject': reject,
                 'success-message': str,
                 'failure-message': str,
                 'merge-failure-message': str,
                 'footer-message': str,
                 'dequeue-on-new-patchset': bool,
                 'ignore-dependencies': bool,
-                'trigger': trigger,
-                'success': report_actions,
-                'failure': report_actions,
-                'merge-failure': report_actions,
-                'start': report_actions,
+                'disable-after-consecutive-failures':
+                    v.All(int, v.Range(min=1)),
                 'window': window,
                 'window-floor': window_floor,
                 'window-increase-type': window_type,
@@ -119,7 +75,6 @@
                 'window-decrease-type': window_type,
                 'window-decrease-factor': window_factor,
                 }
-    pipelines = [pipeline]
 
     project_template = {v.Required('name'): str}
     project_templates = [project_template]
@@ -148,6 +103,8 @@
            'success-pattern': str,
            'hold-following-changes': bool,
            'voting': bool,
+           'mutex': str,
+           'tags': toList(str),
            'parameter-function': str,
            'branch': toList(str),
            'files': toList(str),
@@ -200,7 +157,42 @@
 
         return parameters
 
-    def getSchema(self, data):
+    def getDriverSchema(self, dtype, connections):
+        # TODO(jhesketh): Make the driver discovery dynamic
+        connection_drivers = {
+            'trigger': {
+                'gerrit': 'zuul.trigger.gerrit',
+            },
+            'reporter': {
+                'gerrit': 'zuul.reporter.gerrit',
+                'smtp': 'zuul.reporter.smtp',
+            },
+        }
+        standard_drivers = {
+            'trigger': {
+                'timer': 'zuul.trigger.timer',
+                'zuul': 'zuul.trigger.zuultrigger',
+            }
+        }
+
+        schema = {}
+        # Add the configured connections as available layout options
+        for connection_name, connection in connections.items():
+            for dname, dmod in connection_drivers.get(dtype, {}).items():
+                if connection.driver_name == dname:
+                    schema[connection_name] = toList(__import__(
+                        connection_drivers[dtype][dname],
+                        fromlist=['']).getSchema())
+
+        # Standard drivers are always available and don't require a unique
+        # (connection) name
+        for dname, dmod in standard_drivers.get(dtype, {}).items():
+            schema[dname] = toList(__import__(
+                standard_drivers[dtype][dname], fromlist=['']).getSchema())
+
+        return schema
+
+    def getSchema(self, data, connections=None):
         if not isinstance(data, dict):
             raise Exception("Malformed layout configuration: top-level type "
                             "should be a dictionary")
@@ -254,9 +246,31 @@
         for p in pipelines:
             project_template[p] = self.validateJob
         project_templates = [project_template]
+
+        # TODO(jhesketh): source schema is still defined above as sources
+        # currently aren't key/value so there is nothing to validate. Need to
+        # revisit this and figure out how to allow drivers with and without
+        # params. eg support all:
+        #   source: gerrit
+        # and
+        #   source:
+        #     gerrit:
+        #       - val
+        #       - val2
+        # and
+        #   source:
+        #     gerrit: something
+        # etc...
+        self.pipeline['trigger'] = v.Required(
+            self.getDriverSchema('trigger', connections))
+        for action in ['start', 'success', 'failure', 'merge-failure',
+                       'disabled']:
+            self.pipeline[action] = self.getDriverSchema('reporter',
+                                                         connections)
+
         # Gather our sub schemas
         schema = v.Schema({'includes': self.includes,
-                           v.Required('pipelines'): self.pipelines,
+                           v.Required('pipelines'): [self.pipeline],
                            'jobs': self.jobs,
                            'project-templates': project_templates,
                            v.Required('projects'): projects,
@@ -273,8 +287,45 @@
                                 path + [i])
             items.append(item['name'])
 
-    def validate(self, data):
-        schema = LayoutSchema().getSchema(data)
+    def extraDriverValidation(self, dtype, driver_data, connections=None):
+        # Some drivers may have extra validation to run on the layout
+        # TODO(jhesketh): Make the driver discovery dynamic
+        connection_drivers = {
+            'trigger': {
+                'gerrit': 'zuul.trigger.gerrit',
+            },
+            'reporter': {
+                'gerrit': 'zuul.reporter.gerrit',
+                'smtp': 'zuul.reporter.smtp',
+            },
+        }
+        standard_drivers = {
+            'trigger': {
+                'timer': 'zuul.trigger.timer',
+                'zuul': 'zuul.trigger.zuultrigger',
+            }
+        }
+
+        for dname, d_conf in driver_data.items():
+            for connection_name, connection in connections.items():
+                if connection_name == dname:
+                    if (connection.driver_name in
+                        connection_drivers.get(dtype, {}).keys()):
+                        module = __import__(
+                            connection_drivers[dtype][connection.driver_name],
+                            fromlist=['']
+                        )
+                        if 'validate_conf' in dir(module):
+                            module.validate_conf(d_conf)
+                    break
+            if dname in standard_drivers.get(dtype, {}).keys():
+                module = __import__(standard_drivers[dtype][dname],
+                                    fromlist=[''])
+                if 'validate_conf' in dir(module):
+                    module.validate_conf(d_conf)
+
+    def validate(self, data, connections=None):
+        schema = LayoutSchema().getSchema(data, connections)
         schema(data)
         self.checkDuplicateNames(data['pipelines'], ['pipelines'])
         if 'jobs' in data:
@@ -283,6 +334,11 @@
         if 'project-templates' in data:
             self.checkDuplicateNames(
                 data['project-templates'], ['project-templates'])
+
         for pipeline in data['pipelines']:
-            if 'gerrit' in pipeline['trigger']:
-                gerrit.validate_trigger(pipeline['trigger'])
+            self.extraDriverValidation('trigger', pipeline['trigger'],
+                                       connections)
+            for action in ['start', 'success', 'failure', 'merge-failure']:
+                if action in pipeline:
+                    self.extraDriverValidation('reporter', pipeline[action],
+                                               connections)
diff --git a/zuul/lib/clonemapper.py b/zuul/lib/clonemapper.py
index ae558cd..57ac177 100644
--- a/zuul/lib/clonemapper.py
+++ b/zuul/lib/clonemapper.py
@@ -19,6 +19,9 @@
 import os
 import re
 
+import six
+
+
 OrderedDict = extras.try_imports(['collections.OrderedDict',
                                   'ordereddict.OrderedDict'])
 
@@ -59,17 +62,17 @@
             raise Exception("Expansion error. Check error messages above")
 
         self.log.info("Mapping projects to workspace...")
-        for project, dest in ret.iteritems():
+        for project, dest in six.iteritems(ret):
             dest = os.path.normpath(os.path.join(workspace, dest[0]))
             ret[project] = dest
             self.log.info("  %s -> %s", project, dest)
 
         self.log.debug("Checking overlap in destination directories...")
         check = defaultdict(list)
-        for project, dest in ret.iteritems():
+        for project, dest in six.iteritems(ret):
             check[dest].append(project)
 
-        dupes = dict((d, p) for (d, p) in check.iteritems() if len(p) > 1)
+        dupes = dict((d, p) for (d, p) in six.iteritems(check) if len(p) > 1)
         if dupes:
             raise Exception("Some projects share the same destination: %s",
                             dupes)
diff --git a/zuul/lib/cloner.py b/zuul/lib/cloner.py
index 0ac7f0f..197c426 100644
--- a/zuul/lib/cloner.py
+++ b/zuul/lib/cloner.py
@@ -19,7 +19,10 @@
 import re
 import yaml
 
+import six
+
 from git import GitCommandError
+from zuul import exceptions
 from zuul.lib.clonemapper import CloneMapper
 from zuul.merger.merger import Repo
 
@@ -29,7 +32,8 @@
 
     def __init__(self, git_base_url, projects, workspace, zuul_branch,
                  zuul_ref, zuul_url, branch=None, clone_map_file=None,
-                 project_branches=None, cache_dir=None):
+                 project_branches=None, cache_dir=None, zuul_newrev=None,
+                 zuul_project=None):
 
         self.clone_map = []
         self.dests = None
@@ -43,6 +47,10 @@
         self.zuul_ref = zuul_ref or ''
         self.zuul_url = zuul_url
         self.project_branches = project_branches or {}
+        self.project_revisions = {}
+
+        if zuul_newrev and zuul_project:
+            self.project_revisions[zuul_project] = zuul_newrev
 
         if clone_map_file:
             self.readCloneMap(clone_map_file)
@@ -62,7 +70,7 @@
         dests = mapper.expand(workspace=self.workspace)
 
         self.log.info("Preparing %s repositories", len(dests))
-        for project, dest in dests.iteritems():
+        for project, dest in six.iteritems(dests):
             self.prepareRepo(project, dest)
         self.log.info("Prepared all repositories")
 
@@ -70,9 +78,10 @@
         # Check for a cached git repo first
         git_cache = '%s/%s' % (self.cache_dir, project)
         git_upstream = '%s/%s' % (self.git_url, project)
+        repo_is_cloned = os.path.exists(os.path.join(dest, '.git'))
         if (self.cache_dir and
             os.path.exists(git_cache) and
-            not os.path.exists(dest)):
+            not repo_is_cloned):
             # file:// tells git not to hard-link across repos
             git_cache = 'file://%s' % git_cache
             self.log.info("Creating repo %s from cache %s",
@@ -102,7 +111,14 @@
             repo.fetchFrom(zuul_remote, ref)
             self.log.debug("Fetched ref %s from %s", ref, project)
             return True
-        except (ValueError, GitCommandError):
+        except ValueError:
+            self.log.debug("Project %s in Zuul does not have ref %s",
+                           project, ref)
+            return False
+        except GitCommandError as error:
+            # Bail out if fetch fails due to infrastructure reasons
+            if error.stderr.startswith('fatal: unable to access'):
+                raise
             self.log.debug("Project %s in Zuul does not have ref %s",
                            project, ref)
             return False
@@ -111,10 +127,15 @@
         """Clone a repository for project at dest and apply a reference
         suitable for testing. The reference lookup is attempted in this order:
 
-         1) Zuul reference for the indicated branch
-         2) Zuul reference for the master branch
-         3) The tip of the indicated branch
-         4) The tip of the master branch
+         1) The indicated revision for specific project
+         2) Zuul reference for the indicated branch
+         3) Zuul reference for the master branch
+         4) The tip of the indicated branch
+         5) The tip of the master branch
+
+        If an "indicated revision" is specified for this project, and we are
+        unable to meet this requirement, we stop attempting to check this
+        repo out and raise a zuul.exceptions.RevNotFound exception.
 
         The "indicated branch" is one of the following:
 
@@ -134,6 +155,10 @@
         # `git branch` is happy with.
         repo.reset()
 
+        indicated_revision = None
+        if project in self.project_revisions:
+            indicated_revision = self.project_revisions[project]
+
         indicated_branch = self.branch or self.zuul_branch
         if project in self.project_branches:
             indicated_branch = self.project_branches[project]
@@ -148,8 +173,9 @@
             self.log.info("upstream repo has branch %s", indicated_branch)
             fallback_branch = indicated_branch
         else:
-            self.log.info("upstream repo is missing branch %s",
-                          self.branch)
+            if indicated_branch:
+                self.log.info("upstream repo is missing branch %s",
+                              indicated_branch)
             # FIXME should be origin HEAD branch which might not be 'master'
             fallback_branch = 'master'
 
@@ -159,13 +185,26 @@
         else:
             fallback_zuul_ref = None
 
+        # If the user has requested an explicit revision to be checked out,
+        # we use it above all else, and if we cannot satisfy this requirement
+        # we raise an error and do not attempt to continue.
+        if indicated_revision:
+            self.log.info("Attempting to check out revision %s for "
+                          "project %s", indicated_revision, project)
+            try:
+                self.fetchFromZuul(repo, project, self.zuul_ref)
+                commit = repo.checkout(indicated_revision)
+            except (ValueError, GitCommandError):
+                raise exceptions.RevNotFound(project, indicated_revision)
+            self.log.info("Prepared '%s' repo at revision '%s'", project,
+                          indicated_revision)
         # If we have a non empty zuul_ref to use, use it. Otherwise we fall
         # back to checking out the branch.
-        if ((override_zuul_ref and
-            self.fetchFromZuul(repo, project, override_zuul_ref)) or
-            (fallback_zuul_ref and
-             fallback_zuul_ref != override_zuul_ref and
-            self.fetchFromZuul(repo, project, fallback_zuul_ref))):
+        elif ((override_zuul_ref and
+              self.fetchFromZuul(repo, project, override_zuul_ref)) or
+              (fallback_zuul_ref and
+               fallback_zuul_ref != override_zuul_ref and
+              self.fetchFromZuul(repo, project, fallback_zuul_ref))):
             # Work around a bug in GitPython which can not parse FETCH_HEAD
             gitcmd = git.Git(dest)
             fetch_head = gitcmd.rev_parse('FETCH_HEAD')
diff --git a/zuul/lib/commandsocket.py b/zuul/lib/commandsocket.py
new file mode 100644
index 0000000..1b7fed9
--- /dev/null
+++ b/zuul/lib/commandsocket.py
@@ -0,0 +1,83 @@
+# Copyright 2014 OpenStack Foundation
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2016 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import socket
+import threading
+import Queue
+
+
+class CommandSocket(object):
+    log = logging.getLogger("zuul.CommandSocket")
+
+    def __init__(self, path):
+        self.running = False
+        self.path = path
+        self.queue = Queue.Queue()
+
+    def start(self):
+        self.running = True
+        if os.path.exists(self.path):
+            os.unlink(self.path)
+        self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        self.socket.bind(self.path)
+        self.socket.listen(1)
+        self.socket_thread = threading.Thread(target=self._socketListener)
+        self.socket_thread.daemon = True
+        self.socket_thread.start()
+
+    def stop(self):
+        # First, wake up our listener thread with a connection and
+        # tell it to stop running.
+        self.running = False
+        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        s.connect(self.path)
+        s.sendall('_stop\n')
+        # The command '_stop' will be ignored by our listener, so
+        # directly inject it into the queue so that consumers of this
+        # class which are waiting in .get() are awakened.  They can
+        # either handle '_stop' or just ignore the unknown command and
+        # then check to see if they should continue to run before
+        # re-entering their loop.
+        self.queue.put('_stop')
+        self.socket_thread.join()
+
+    def _socketListener(self):
+        while self.running:
+            try:
+                s, addr = self.socket.accept()
+                self.log.debug("Accepted socket connection %s" % (s,))
+                buf = ''
+                while True:
+                    buf += s.recv(1)
+                    if buf[-1] == '\n':
+                        break
+                buf = buf.strip()
+                self.log.debug("Received %s from socket" % (buf,))
+                s.close()
+                # Because we use '_stop' internally to wake up a
+                # waiting thread, don't allow it to actually be
+                # injected externally.
+                if buf != '_stop':
+                    self.queue.put(buf)
+            except Exception:
+                self.log.exception("Exception in socket handler")
+
+    def get(self):
+        if not self.running:
+            raise Exception("CommandSocket.get called while stopped")
+        return self.queue.get()
diff --git a/zuul/lib/connections.py b/zuul/lib/connections.py
new file mode 100644
index 0000000..92ddb0f
--- /dev/null
+++ b/zuul/lib/connections.py
@@ -0,0 +1,66 @@
+# Copyright 2015 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+import zuul.connection.gerrit
+import zuul.connection.smtp
+
+
+def configure_connections(config):
+    # Register connections from the config
+
+    # TODO(jhesketh): import connection modules dynamically
+    connections = {}
+
+    for section_name in config.sections():
+        con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
+                             section_name, re.I)
+        if not con_match:
+            continue
+        con_name = con_match.group(2)
+        con_config = dict(config.items(section_name))
+
+        if 'driver' not in con_config:
+            raise Exception("No driver specified for connection %s."
+                            % con_name)
+
+        con_driver = con_config['driver']
+
+        # TODO(jhesketh): load the required class automatically
+        if con_driver == 'gerrit':
+            connections[con_name] = \
+                zuul.connection.gerrit.GerritConnection(con_name,
+                                                        con_config)
+        elif con_driver == 'smtp':
+            connections[con_name] = \
+                zuul.connection.smtp.SMTPConnection(con_name, con_config)
+        else:
+            raise Exception("Unknown driver, %s, for connection %s"
+                            % (con_config['driver'], con_name))
+
+    # If the [gerrit] or [smtp] sections still exist, load them in as a
+    # connection named 'gerrit' or 'smtp' respectfully
+
+    if 'gerrit' in config.sections():
+        connections['gerrit'] = \
+            zuul.connection.gerrit.GerritConnection(
+                'gerrit', dict(config.items('gerrit')))
+
+    if 'smtp' in config.sections():
+        connections['smtp'] = \
+            zuul.connection.smtp.SMTPConnection(
+                'smtp', dict(config.items('smtp')))
+
+    return connections
diff --git a/zuul/lib/gearserver.py b/zuul/lib/gearserver.py
new file mode 100644
index 0000000..9cddca3
--- /dev/null
+++ b/zuul/lib/gearserver.py
@@ -0,0 +1,35 @@
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import gear
+
+MASS_DO = 101
+
+
+class GearServer(gear.Server):
+    def handlePacket(self, packet):
+        if packet.ptype == MASS_DO:
+            self.log.info("Received packet from %s: %s" % (packet.connection,
+                                                           packet))
+            self.handleMassDo(packet)
+        else:
+            return super(GearServer, self).handlePacket(packet)
+
+    def handleMassDo(self, packet):
+        packet.connection.functions = set()
+        for name in packet.data.split(b'\x00'):
+            self.log.debug("Adding function %s to %s" % (
+                name, packet.connection))
+            packet.connection.functions.add(name)
+            self.functions.add(name)
diff --git a/zuul/lib/gerrit.py b/zuul/lib/gerrit.py
deleted file mode 100644
index 90faf40..0000000
--- a/zuul/lib/gerrit.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# Copyright 2011 OpenStack, LLC.
-# Copyright 2012 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import threading
-import select
-import json
-import time
-from six.moves import queue as Queue
-import paramiko
-import logging
-import pprint
-
-
-class GerritWatcher(threading.Thread):
-    log = logging.getLogger("gerrit.GerritWatcher")
-
-    def __init__(self, gerrit, username, hostname, port=29418, keyfile=None):
-        threading.Thread.__init__(self)
-        self.username = username
-        self.keyfile = keyfile
-        self.hostname = hostname
-        self.port = port
-        self.gerrit = gerrit
-
-    def _read(self, fd):
-        l = fd.readline()
-        data = json.loads(l)
-        self.log.debug("Received data from Gerrit event stream: \n%s" %
-                       pprint.pformat(data))
-        self.gerrit.addEvent((time.time(), data))
-
-    def _listen(self, stdout, stderr):
-        poll = select.poll()
-        poll.register(stdout.channel)
-        while True:
-            ret = poll.poll()
-            for (fd, event) in ret:
-                if fd == stdout.channel.fileno():
-                    if event == select.POLLIN:
-                        self._read(stdout)
-                    else:
-                        raise Exception("event on ssh connection")
-
-    def _run(self):
-        try:
-            client = paramiko.SSHClient()
-            client.load_system_host_keys()
-            client.set_missing_host_key_policy(paramiko.WarningPolicy())
-            client.connect(self.hostname,
-                           username=self.username,
-                           port=self.port,
-                           key_filename=self.keyfile)
-
-            stdin, stdout, stderr = client.exec_command("gerrit stream-events")
-
-            self._listen(stdout, stderr)
-
-            ret = stdout.channel.recv_exit_status()
-            self.log.debug("SSH exit status: %s" % ret)
-
-            if ret:
-                raise Exception("Gerrit error executing stream-events")
-        except:
-            self.log.exception("Exception on ssh event stream:")
-            time.sleep(5)
-
-    def run(self):
-        while True:
-            self._run()
-
-
-class Gerrit(object):
-    log = logging.getLogger("gerrit.Gerrit")
-
-    def __init__(self, hostname, username, port=29418, keyfile=None):
-        self.username = username
-        self.hostname = hostname
-        self.port = port
-        self.keyfile = keyfile
-        self.watcher_thread = None
-        self.event_queue = None
-        self.client = None
-
-    def startWatching(self):
-        self.event_queue = Queue.Queue()
-        self.watcher_thread = GerritWatcher(
-            self,
-            self.username,
-            self.hostname,
-            self.port,
-            keyfile=self.keyfile)
-        self.watcher_thread.start()
-
-    def addEvent(self, data):
-        return self.event_queue.put(data)
-
-    def getEvent(self):
-        return self.event_queue.get()
-
-    def eventDone(self):
-        self.event_queue.task_done()
-
-    def review(self, project, change, message, action={}):
-        cmd = 'gerrit review --project %s' % project
-        if message:
-            cmd += ' --message "%s"' % message
-        for k, v in action.items():
-            if v is True:
-                cmd += ' --%s' % k
-            else:
-                cmd += ' --label %s=%s' % (k, v)
-        cmd += ' %s' % change
-        out, err = self._ssh(cmd)
-        return err
-
-    def query(self, query):
-        args = '--all-approvals --comments --commit-message'
-        args += ' --current-patch-set --dependencies --files'
-        args += ' --patch-sets --submit-records'
-        cmd = 'gerrit query --format json %s %s' % (
-            args, query)
-        out, err = self._ssh(cmd)
-        if not out:
-            return False
-        lines = out.split('\n')
-        if not lines:
-            return False
-        data = json.loads(lines[0])
-        if not data:
-            return False
-        self.log.debug("Received data from Gerrit query: \n%s" %
-                       (pprint.pformat(data)))
-        return data
-
-    def simpleQuery(self, query):
-        def _query_chunk(query):
-            args = '--commit-message --current-patch-set'
-
-            cmd = 'gerrit query --format json %s %s' % (
-                args, query)
-            out, err = self._ssh(cmd)
-            if not out:
-                return False
-            lines = out.split('\n')
-            if not lines:
-                return False
-            data = [json.loads(line) for line in lines
-                    if "sortKey" in line]
-            if not data:
-                return False
-            self.log.debug("Received data from Gerrit query: \n%s" %
-                           (pprint.pformat(data)))
-            return data
-
-        # gerrit returns 500 results by default, so implement paging
-        # for large projects like nova
-        alldata = []
-        chunk = _query_chunk(query)
-        while(chunk):
-            alldata.extend(chunk)
-            sortkey = "resume_sortkey:'%s'" % chunk[-1]["sortKey"]
-            chunk = _query_chunk("%s %s" % (query, sortkey))
-        return alldata
-
-    def _open(self):
-        client = paramiko.SSHClient()
-        client.load_system_host_keys()
-        client.set_missing_host_key_policy(paramiko.WarningPolicy())
-        client.connect(self.hostname,
-                       username=self.username,
-                       port=self.port,
-                       key_filename=self.keyfile)
-        self.client = client
-
-    def _ssh(self, command):
-        if not self.client:
-            self._open()
-
-        try:
-            self.log.debug("SSH command:\n%s" % command)
-            stdin, stdout, stderr = self.client.exec_command(command)
-        except:
-            self._open()
-            stdin, stdout, stderr = self.client.exec_command(command)
-
-        out = stdout.read()
-        self.log.debug("SSH received stdout:\n%s" % out)
-
-        ret = stdout.channel.recv_exit_status()
-        self.log.debug("SSH exit status: %s" % ret)
-
-        err = stderr.read()
-        self.log.debug("SSH received stderr:\n%s" % err)
-        if ret:
-            raise Exception("Gerrit error executing %s" % command)
-        return (out, err)
diff --git a/zuul/lib/swift.py b/zuul/lib/swift.py
index 3c411d3..b5d3bc7 100644
--- a/zuul/lib/swift.py
+++ b/zuul/lib/swift.py
@@ -19,8 +19,8 @@
 import os
 import random
 import six
+from six.moves import urllib
 import string
-import urlparse
 
 
 class Swift(object):
@@ -156,7 +156,7 @@
         url = os.path.join(self.storage_url, settings['container'],
                            settings['file_path_prefix'],
                            destination_prefix)
-        u = urlparse.urlparse(url)
+        u = urllib.parse.urlparse(url)
 
         hmac_body = '%s\n%s\n%s\n%s\n%s' % (u.path, redirect,
                                             settings['max_file_size'],
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index 1e881bf..3bc29e6 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -188,24 +188,29 @@
 class Merger(object):
     log = logging.getLogger("zuul.Merger")
 
-    def __init__(self, working_root, sshkey, email, username):
+    def __init__(self, working_root, connections, email, username):
         self.repos = {}
         self.working_root = working_root
         if not os.path.exists(working_root):
             os.makedirs(working_root)
-        if sshkey:
-            self._makeSSHWrapper(sshkey)
+        self._makeSSHWrappers(working_root, connections)
         self.email = email
         self.username = username
 
-    def _makeSSHWrapper(self, key):
-        name = os.path.join(self.working_root, '.ssh_wrapper')
+    def _makeSSHWrappers(self, working_root, connections):
+        for connection_name, connection in connections.items():
+            sshkey = connection.connection_config.get('sshkey')
+            if sshkey:
+                self._makeSSHWrapper(sshkey, working_root, connection_name)
+
+    def _makeSSHWrapper(self, key, merge_root, connection_name='default'):
+        wrapper_name = '.ssh_wrapper_%s' % connection_name
+        name = os.path.join(merge_root, wrapper_name)
         fd = open(name, 'w')
         fd.write('#!/bin/bash\n')
         fd.write('ssh -i %s $@\n' % key)
         fd.close()
-        os.chmod(name, 0755)
-        os.environ['GIT_SSH'] = name
+        os.chmod(name, 0o755)
 
     def addProject(self, project, url):
         repo = None
@@ -263,10 +268,19 @@
 
         return commit
 
+    def _setGitSsh(self, connection_name):
+        wrapper_name = '.ssh_wrapper_%s' % connection_name
+        name = os.path.join(self.working_root, wrapper_name)
+        if os.path.isfile(name):
+            os.environ['GIT_SSH'] = name
+        elif 'GIT_SSH' in os.environ:
+            del os.environ['GIT_SSH']
+
     def _mergeItem(self, item, recent):
         self.log.debug("Processing refspec %s for project %s / %s ref %s" %
                        (item['refspec'], item['project'], item['branch'],
                         item['ref']))
+        self._setGitSsh(item['connection_name'])
         repo = self.getRepo(item['project'], item['url'])
         key = (item['project'], item['branch'])
         # See if we have a commit for this change already in this repo
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index 1a02322..d56993c 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -19,13 +19,13 @@
 
 import gear
 
-import merger
+from zuul.merger import merger
 
 
 class MergeServer(object):
     log = logging.getLogger("zuul.MergeServer")
 
-    def __init__(self, config):
+    def __init__(self, config, connections={}):
         self.config = config
         self.zuul_url = config.get('merger', 'zuul_url')
 
@@ -44,13 +44,8 @@
         else:
             merge_name = None
 
-        if self.config.has_option('gerrit', 'sshkey'):
-            sshkey = self.config.get('gerrit', 'sshkey')
-        else:
-            sshkey = None
-
-        self.merger = merger.Merger(merge_root, sshkey,
-                                    merge_email, merge_name)
+        self.merger = merger.Merger(merge_root, connections, merge_email,
+                                    merge_name)
 
     def start(self):
         self._running = True
diff --git a/zuul/model.py b/zuul/model.py
index f8e0d25..46b0b98 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -13,7 +13,9 @@
 # under the License.
 
 import copy
+import os
 import re
+import struct
 import time
 from uuid import uuid4
 import extras
@@ -22,6 +24,8 @@
                                   'ordereddict.OrderedDict'])
 
 
+EMPTY_GIT_REF = '0' * 40  # git sha of all zeros, used during creates/deletes
+
 MERGER_MERGE = 1          # "git merge"
 MERGER_MERGE_RESOLVE = 2  # "git merge -s resolve"
 MERGER_CHERRY_PICK = 3    # "git cherry-pick"
@@ -79,9 +83,14 @@
         self.queues = []
         self.precedence = PRECEDENCE_NORMAL
         self.source = None
-        self.start_actions = None
-        self.success_actions = None
-        self.failure_actions = None
+        self.start_actions = []
+        self.success_actions = []
+        self.failure_actions = []
+        self.merge_failure_actions = []
+        self.disabled_actions = []
+        self.disable_at = None
+        self._consecutive_failures = 0
+        self._disabled = False
         self.window = None
         self.window_floor = None
         self.window_increase_type = None
@@ -101,7 +110,11 @@
         return job_tree
 
     def getProjects(self):
-        return sorted(self.job_trees.keys(), lambda a, b: cmp(a.name, b.name))
+        # cmp is not in python3, applied idiom from
+        # http://python-future.org/compatible_idioms.html#cmp
+        return sorted(
+            self.job_trees.keys(),
+            key=lambda p: p.name)
 
     def addQueue(self, queue):
         self.queues.append(queue)
@@ -127,7 +140,7 @@
             return []
         return item.change.filterJobs(tree.getJobs())
 
-    def _findJobsToRun(self, job_trees, item):
+    def _findJobsToRun(self, job_trees, item, mutex):
         torun = []
         if item.item_ahead:
             # Only run jobs if any 'hold' jobs on the change ahead
@@ -146,20 +159,23 @@
                 else:
                     # There is no build for the root of this job tree,
                     # so we should run it.
-                    torun.append(job)
+                    if mutex.acquire(item, job):
+                        # If this job needs a mutex, either acquire it or make
+                        # sure that we have it before running the job.
+                        torun.append(job)
             # If there is no job, this is a null job tree, and we should
             # run all of its jobs.
             if result == 'SUCCESS' or not job:
-                torun.extend(self._findJobsToRun(tree.job_trees, item))
+                torun.extend(self._findJobsToRun(tree.job_trees, item, mutex))
         return torun
 
-    def findJobsToRun(self, item):
+    def findJobsToRun(self, item, mutex):
         if not item.live:
             return []
         tree = self.getJobTree(item.change.project)
         if not tree:
             return []
-        return self._findJobsToRun(tree.job_trees, item)
+        return self._findJobsToRun(tree.job_trees, item, mutex)
 
     def haveAllJobsStarted(self, item):
         for job in self.getJobs(item):
@@ -256,7 +272,7 @@
             items.extend(shared_queue.queue)
         return items
 
-    def formatStatusJSON(self):
+    def formatStatusJSON(self, url_pattern=None):
         j_pipeline = dict(name=self.name,
                           description=self.description)
         j_queues = []
@@ -273,7 +289,7 @@
                     if j_changes:
                         j_queue['heads'].append(j_changes)
                     j_changes = []
-                j_changes.append(e.formatJSON())
+                j_changes.append(e.formatJSON(url_pattern))
                 if (len(j_changes) > 1 and
                         (j_changes[-2]['remaining_time'] is not None) and
                         (j_changes[-1]['remaining_time'] is not None)):
@@ -285,28 +301,6 @@
         return j_pipeline
 
 
-class ActionReporter(object):
-    """An ActionReporter has a reporter and its configured parameters"""
-
-    def __repr__(self):
-        return '<ActionReporter %s, %s>' % (self.reporter, self.params)
-
-    def __init__(self, reporter, params):
-        self.reporter = reporter
-        self.params = params
-
-    def report(self, change, message):
-        """Sends the built message off to the configured reporter.
-        Takes the change and message and adds the configured parameters.
-        """
-        return self.reporter.report(change, message, self.params)
-
-    def getSubmitAllowNeeds(self):
-        """Gets the submit allow needs from the reporter based off the
-        parameters."""
-        return self.reporter.getSubmitAllowNeeds(self.params)
-
-
 class ChangeQueue(object):
     """DependentPipelines have multiple parallel queues shared by
     different projects; this is one of them.  For instance, there may
@@ -427,13 +421,17 @@
             elif self.window_decrease_type == 'exponential':
                 self.window = max(
                     self.window_floor,
-                    self.window / self.window_decrease_factor)
+                    int(self.window / self.window_decrease_factor))
 
 
 class Project(object):
-    def __init__(self, name):
+    def __init__(self, name, foreign=False):
         self.name = name
         self.merge_mode = MERGER_MERGE_RESOLVE
+        # foreign projects are those referenced in dependencies
+        # of layout projects, this should matter
+        # when deciding whether to enqueue their changes
+        self.foreign = foreign
 
     def __str__(self):
         return self.name
@@ -452,6 +450,8 @@
         self.failure_pattern = None
         self.success_pattern = None
         self.parameter_function = None
+        self.tags = set()
+        self.mutex = None
         # A metajob should only supply values for attributes that have
         # been explicitly provided, so avoid setting boolean defaults.
         if self.is_metajob:
@@ -498,6 +498,13 @@
             self.skip_if_matcher = other.skip_if_matcher.copy()
         if other.swift:
             self.swift.update(other.swift)
+        if other.mutex:
+            self.mutex = other.mutex
+        # Tags are merged via a union rather than a destructive copy
+        # because they are intended to accumulate as metajobs are
+        # applied.
+        if other.tags:
+            self.tags = self.tags.union(other.tags)
         # Only non-None values should be copied for boolean attributes.
         if other.hold_following_changes is not None:
             self.hold_following_changes = other.hold_following_changes
@@ -581,6 +588,8 @@
         self.retry = False
         self.parameters = {}
         self.worker = Worker()
+        self.node_labels = []
+        self.node_name = None
 
     def __repr__(self):
         return ('<Build %s of %s on %s>' %
@@ -721,7 +730,34 @@
     def setReportedResult(self, result):
         self.current_build_set.result = result
 
-    def formatJSON(self):
+    def formatJobResult(self, job, url_pattern=None):
+        build = self.current_build_set.getBuild(job.name)
+        result = build.result
+        pattern = url_pattern
+        if result == 'SUCCESS':
+            if job.success_message:
+                result = job.success_message
+            if job.success_pattern:
+                pattern = job.success_pattern
+        elif result == 'FAILURE':
+            if job.failure_message:
+                result = job.failure_message
+            if job.failure_pattern:
+                pattern = job.failure_pattern
+        url = None
+        if pattern:
+            try:
+                url = pattern.format(change=self.change,
+                                     pipeline=self.pipeline,
+                                     job=job,
+                                     build=build)
+            except Exception:
+                pass  # FIXME: log this or something?
+        if not url:
+            url = build.url or job.name
+        return (result, url)
+
+    def formatJSON(self, url_pattern=None):
         changeish = self.change
         ret = {}
         ret['active'] = self.active
@@ -758,11 +794,13 @@
             elapsed = None
             remaining = None
             result = None
-            url = None
+            build_url = None
+            report_url = None
             worker = None
             if build:
                 result = build.result
-                url = build.url
+                build_url = build.url
+                (unused, report_url) = self.formatJobResult(job, url_pattern)
                 if build.start_time:
                     if build.end_time:
                         elapsed = int((build.end_time -
@@ -790,7 +828,8 @@
                 'name': job.name,
                 'elapsed_time': elapsed,
                 'remaining_time': remaining,
-                'url': url,
+                'url': build_url,
+                'report_url': report_url,
                 'result': result,
                 'voting': job.voting,
                 'uuid': build.uuid if build else None,
@@ -802,7 +841,9 @@
                 'canceled': build.canceled if build else None,
                 'retry': build.retry if build else None,
                 'number': build.number if build else None,
-                'worker': worker
+                'node_labels': build.node_labels if build else [],
+                'node_name': build.node_name if build else None,
+                'worker': worker,
             })
 
         if self.pipeline.haveAllJobsStarted(self):
@@ -1028,68 +1069,104 @@
 
 
 class BaseFilter(object):
-    def __init__(self, required_approvals=[]):
+    def __init__(self, required_approvals=[], reject_approvals=[]):
         self._required_approvals = copy.deepcopy(required_approvals)
-        self.required_approvals = required_approvals
+        self.required_approvals = self._tidy_approvals(required_approvals)
+        self._reject_approvals = copy.deepcopy(reject_approvals)
+        self.reject_approvals = self._tidy_approvals(reject_approvals)
 
-        for a in self.required_approvals:
+    def _tidy_approvals(self, approvals):
+        for a in approvals:
             for k, v in a.items():
                 if k == 'username':
-                    pass
+                    a['username'] = re.compile(v)
                 elif k in ['email', 'email-filter']:
                     a['email'] = re.compile(v)
                 elif k == 'newer-than':
                     a[k] = time_to_seconds(v)
                 elif k == 'older-than':
                     a[k] = time_to_seconds(v)
-                else:
-                    if not isinstance(v, list):
-                        a[k] = [v]
             if 'email-filter' in a:
                 del a['email-filter']
+        return approvals
+
+    def _match_approval_required_approval(self, rapproval, approval):
+        # Check if the required approval and approval match
+        if 'description' not in approval:
+            return False
+        now = time.time()
+        by = approval.get('by', {})
+        for k, v in rapproval.items():
+            if k == 'username':
+                if (not v.search(by.get('username', ''))):
+                        return False
+            elif k == 'email':
+                if (not v.search(by.get('email', ''))):
+                        return False
+            elif k == 'newer-than':
+                t = now - v
+                if (approval['grantedOn'] < t):
+                        return False
+            elif k == 'older-than':
+                t = now - v
+                if (approval['grantedOn'] >= t):
+                    return False
+            else:
+                if not isinstance(v, list):
+                    v = [v]
+                if (normalizeCategory(approval['description']) != k or
+                        int(approval['value']) not in v):
+                    return False
+        return True
+
+    def matchesApprovals(self, change):
+        if (self.required_approvals and not change.approvals
+                or self.reject_approvals and not change.approvals):
+            # A change with no approvals can not match
+            return False
+
+        # TODO(jhesketh): If we wanted to optimise this slightly we could
+        # analyse both the REQUIRE and REJECT filters by looping over the
+        # approvals on the change and keeping track of what we have checked
+        # rather than needing to loop on the change approvals twice
+        return (self.matchesRequiredApprovals(change) and
+                self.matchesNoRejectApprovals(change))
 
     def matchesRequiredApprovals(self, change):
-        now = time.time()
+        # Check if any approvals match the requirements
         for rapproval in self.required_approvals:
-            matches_approval = False
+            matches_rapproval = False
             for approval in change.approvals:
-                if 'description' not in approval:
-                    continue
-                found_approval = True
-                by = approval.get('by', {})
-                for k, v in rapproval.items():
-                    if k == 'username':
-                        if (by.get('username', '') != v):
-                            found_approval = False
-                    elif k == 'email':
-                        if (not v.search(by.get('email', ''))):
-                            found_approval = False
-                    elif k == 'newer-than':
-                        t = now - v
-                        if (approval['grantedOn'] < t):
-                            found_approval = False
-                    elif k == 'older-than':
-                        t = now - v
-                        if (approval['grantedOn'] >= t):
-                            found_approval = False
-                    else:
-                        if (normalizeCategory(approval['description']) != k or
-                            int(approval['value']) not in v):
-                            found_approval = False
-                if found_approval:
-                    matches_approval = True
+                if self._match_approval_required_approval(rapproval, approval):
+                    # We have a matching approval so this requirement is
+                    # fulfilled
+                    matches_rapproval = True
                     break
-            if not matches_approval:
+            if not matches_rapproval:
                 return False
         return True
 
+    def matchesNoRejectApprovals(self, change):
+        # Check to make sure no approvals match a reject criteria
+        for rapproval in self.reject_approvals:
+            for approval in change.approvals:
+                if self._match_approval_required_approval(rapproval, approval):
+                    # A reject approval has been matched, so we reject
+                    # immediately
+                    return False
+        # To get here no rejects can have been matched so we should be good to
+        # queue
+        return True
+
 
 class EventFilter(BaseFilter):
     def __init__(self, trigger, types=[], branches=[], refs=[],
                  event_approvals={}, comments=[], emails=[], usernames=[],
-                 timespecs=[], required_approvals=[], pipelines=[]):
+                 timespecs=[], required_approvals=[], reject_approvals=[],
+                 pipelines=[], ignore_deletes=True):
         super(EventFilter, self).__init__(
-            required_approvals=required_approvals)
+            required_approvals=required_approvals,
+            reject_approvals=reject_approvals)
         self.trigger = trigger
         self._types = types
         self._branches = branches
@@ -1107,6 +1184,7 @@
         self.pipelines = [re.compile(x) for x in pipelines]
         self.event_approvals = event_approvals
         self.timespecs = timespecs
+        self.ignore_deletes = ignore_deletes
 
     def __repr__(self):
         ret = '<EventFilter'
@@ -1119,12 +1197,17 @@
             ret += ' branches: %s' % ', '.join(self._branches)
         if self._refs:
             ret += ' refs: %s' % ', '.join(self._refs)
+        if self.ignore_deletes:
+            ret += ' ignore_deletes: %s' % self.ignore_deletes
         if self.event_approvals:
             ret += ' event_approvals: %s' % ', '.join(
                 ['%s:%s' % a for a in self.event_approvals.items()])
         if self.required_approvals:
             ret += ' required_approvals: %s' % ', '.join(
                 ['%s' % a for a in self._required_approvals])
+        if self.reject_approvals:
+            ret += ' reject_approvals: %s' % ', '.join(
+                ['%s' % a for a in self._reject_approvals])
         if self._comments:
             ret += ' comments: %s' % ', '.join(self._comments)
         if self._emails:
@@ -1170,6 +1253,10 @@
                     matches_ref = True
         if self.refs and not matches_ref:
             return False
+        if self.ignore_deletes and event.newrev == EMPTY_GIT_REF:
+            # If the updated ref has an empty git sha (all 0s),
+            # then the ref is being deleted
+            return False
 
         # comments are ORed
         matches_comment_re = False
@@ -1213,12 +1300,8 @@
             if not matches_approval:
                 return False
 
-        if self.required_approvals and not change.approvals:
-            # A change with no approvals can not match
-            return False
-
-        # required approvals are ANDed
-        if not self.matchesRequiredApprovals(change):
+        # required approvals are ANDed (reject approvals are ORed)
+        if not self.matchesApprovals(change):
             return False
 
         # timespecs are ORed
@@ -1234,9 +1317,11 @@
 
 class ChangeishFilter(BaseFilter):
     def __init__(self, open=None, current_patchset=None,
-                 statuses=[], required_approvals=[]):
+                 statuses=[], required_approvals=[],
+                 reject_approvals=[]):
         super(ChangeishFilter, self).__init__(
-            required_approvals=required_approvals)
+            required_approvals=required_approvals,
+            reject_approvals=reject_approvals)
         self.open = open
         self.current_patchset = current_patchset
         self.statuses = statuses
@@ -1251,7 +1336,11 @@
         if self.statuses:
             ret += ' statuses: %s' % ', '.join(self.statuses)
         if self.required_approvals:
-            ret += ' required_approvals: %s' % str(self.required_approvals)
+            ret += (' required_approvals: %s' %
+                    str(self.required_approvals))
+        if self.reject_approvals:
+            ret += (' reject_approvals: %s' %
+                    str(self.reject_approvals))
         ret += '>'
 
         return ret
@@ -1269,12 +1358,8 @@
             if change.status not in self.statuses:
                 return False
 
-        if self.required_approvals and not change.approvals:
-            # A change with no approvals can not match
-            return False
-
-        # required approvals are ANDed
-        if not self.matchesRequiredApprovals(change):
+        # required approvals are ANDed (reject approvals are ORed)
+        if not self.matchesApprovals(change):
             return False
 
         return True
@@ -1301,3 +1386,78 @@
                     job.copy(metajob)
             self.jobs[name] = job
         return job
+
+
+class JobTimeData(object):
+    format = 'B10H10H10B'
+    version = 0
+
+    def __init__(self, path):
+        self.path = path
+        self.success_times = [0 for x in range(10)]
+        self.failure_times = [0 for x in range(10)]
+        self.results = [0 for x in range(10)]
+
+    def load(self):
+        if not os.path.exists(self.path):
+            return
+        with open(self.path) as f:
+            data = struct.unpack(self.format, f.read())
+        version = data[0]
+        if version != self.version:
+            raise Exception("Unkown data version")
+        self.success_times = list(data[1:11])
+        self.failure_times = list(data[11:21])
+        self.results = list(data[21:32])
+
+    def save(self):
+        tmpfile = self.path + '.tmp'
+        data = [self.version]
+        data.extend(self.success_times)
+        data.extend(self.failure_times)
+        data.extend(self.results)
+        data = struct.pack(self.format, *data)
+        with open(tmpfile, 'w') as f:
+            f.write(data)
+        os.rename(tmpfile, self.path)
+
+    def add(self, elapsed, result):
+        elapsed = int(elapsed)
+        if result == 'SUCCESS':
+            self.success_times.append(elapsed)
+            self.success_times.pop(0)
+            result = 0
+        else:
+            self.failure_times.append(elapsed)
+            self.failure_times.pop(0)
+            result = 1
+        self.results.append(result)
+        self.results.pop(0)
+
+    def getEstimatedTime(self):
+        times = [x for x in self.success_times if x]
+        if times:
+            return float(sum(times)) / len(times)
+        return 0.0
+
+
+class TimeDataBase(object):
+    def __init__(self, root):
+        self.root = root
+        self.jobs = {}
+
+    def _getTD(self, name):
+        td = self.jobs.get(name)
+        if not td:
+            td = JobTimeData(os.path.join(self.root, name))
+            self.jobs[name] = td
+            td.load()
+        return td
+
+    def getEstimatedTime(self, name):
+        return self._getTD(name).getEstimatedTime()
+
+    def update(self, name, elapsed, result):
+        td = self._getTD(name)
+        td.add(elapsed, result)
+        td.save()
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index e69de29..0c9a8d8 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -0,0 +1,151 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import logging
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseReporter(object):
+    """Base class for reporters.
+
+    Defines the exact public methods that must be supplied.
+    """
+
+    log = logging.getLogger("zuul.reporter.BaseReporter")
+
+    def __init__(self, reporter_config={}, sched=None, connection=None):
+        self.reporter_config = reporter_config
+        self.sched = sched
+        self.connection = connection
+        self._action = None
+
+    def setAction(self, action):
+        self._action = action
+
+    def stop(self):
+        """Stop the reporter."""
+
+    @abc.abstractmethod
+    def report(self, source, pipeline, item):
+        """Send the compiled report message."""
+
+    def getSubmitAllowNeeds(self):
+        """Get a list of code review labels that are allowed to be
+        "needed" in the submit records for a change, with respect
+        to this queue.  In other words, the list of review labels
+        this reporter itself is likely to set before submitting.
+        """
+        return []
+
+    def postConfig(self):
+        """Run tasks after configuration is reloaded"""
+
+    def _getFormatter(self):
+        format_methods = {
+            'start': self._formatItemReportStart,
+            'success': self._formatItemReportSuccess,
+            'failure': self._formatItemReportFailure,
+            'merge-failure': self._formatItemReportMergeFailure,
+            'disabled': self._formatItemReportDisabled
+        }
+        return format_methods[self._action]
+
+    def _formatItemReport(self, pipeline, item):
+        """Format a report from the given items. Usually to provide results to
+        a reporter taking free-form text."""
+        ret = self._getFormatter()(pipeline, item)
+
+        if pipeline.footer_message:
+            ret += '\n' + pipeline.footer_message
+
+        return ret
+
+    def _formatItemReportStart(self, pipeline, item):
+        msg = "Starting %s jobs." % pipeline.name
+        if self.sched.config.has_option('zuul', 'status_url'):
+            msg += "\n" + self.sched.config.get('zuul', 'status_url')
+        return msg
+
+    def _formatItemReportSuccess(self, pipeline, item):
+        return (pipeline.success_message + '\n\n' +
+                self._formatItemReportJobs(pipeline, item))
+
+    def _formatItemReportFailure(self, pipeline, item):
+        if item.dequeued_needing_change:
+            msg = 'This change depends on a change that failed to merge.\n'
+        elif not pipeline.didMergerSucceed(item):
+            msg = pipeline.merge_failure_message
+        else:
+            msg = (pipeline.failure_message + '\n\n' +
+                   self._formatItemReportJobs(pipeline, item))
+        return msg
+
+    def _formatItemReportMergeFailure(self, pipeline, item):
+        return pipeline.merge_failure_message
+
+    def _formatItemReportDisabled(self, pipeline, item):
+        if item.current_build_set.result == 'SUCCESS':
+            return self._formatItemReportSuccess(pipeline, item)
+        elif item.current_build_set.result == 'FAILURE':
+            return self._formatItemReportFailure(pipeline, item)
+        else:
+            return self._formatItemReport(pipeline, item)
+
+    def _formatItemReportJobs(self, pipeline, item):
+        # Return the list of jobs portion of the report
+        ret = ''
+
+        if self.sched.config.has_option('zuul', 'url_pattern'):
+            url_pattern = self.sched.config.get('zuul', 'url_pattern')
+        else:
+            url_pattern = None
+
+        for job in pipeline.getJobs(item):
+            build = item.current_build_set.getBuild(job.name)
+            (result, url) = item.formatJobResult(job, url_pattern)
+            if not job.voting:
+                voting = ' (non-voting)'
+            else:
+                voting = ''
+
+            if self.sched.config and self.sched.config.has_option(
+                'zuul', 'report_times'):
+                report_times = self.sched.config.getboolean(
+                    'zuul', 'report_times')
+            else:
+                report_times = True
+
+            if report_times and build.end_time and build.start_time:
+                dt = int(build.end_time - build.start_time)
+                m, s = divmod(dt, 60)
+                h, m = divmod(m, 60)
+                if h:
+                    elapsed = ' in %dh %02dm %02ds' % (h, m, s)
+                elif m:
+                    elapsed = ' in %dm %02ds' % (m, s)
+                else:
+                    elapsed = ' in %ds' % (s)
+            else:
+                elapsed = ''
+            name = ''
+            if self.sched.config.has_option('zuul', 'job_name_in_report'):
+                if self.sched.config.getboolean('zuul',
+                                                'job_name_in_report'):
+                    name = job.name + ' '
+            ret += '- %s%s : %s%s%s\n' % (name, url, result, elapsed,
+                                          voting)
+        return ret
diff --git a/zuul/reporter/gerrit.py b/zuul/reporter/gerrit.py
index 7c4774b..1427449 100644
--- a/zuul/reporter/gerrit.py
+++ b/zuul/reporter/gerrit.py
@@ -13,33 +13,40 @@
 # under the License.
 
 import logging
+import voluptuous as v
 
 
-class Reporter(object):
+from zuul.reporter import BaseReporter
+
+
+class GerritReporter(BaseReporter):
     """Sends off reports to Gerrit."""
 
     name = 'gerrit'
     log = logging.getLogger("zuul.reporter.gerrit.Reporter")
 
-    def __init__(self, trigger):
-        """Set up the reporter."""
-        self.gerrit = trigger.gerrit
-        self.trigger = trigger
-
-    def report(self, change, message, params):
+    def report(self, source, pipeline, item):
         """Send a message to gerrit."""
-        self.log.debug("Report change %s, params %s, message: %s" %
-                       (change, params, message))
-        changeid = '%s,%s' % (change.number, change.patchset)
-        change._ref_sha = self.trigger.getRefSha(change.project.name,
-                                                 'refs/heads/' + change.branch)
-        return self.gerrit.review(change.project.name, changeid, message,
-                                  params)
+        message = self._formatItemReport(pipeline, item)
 
-    def getSubmitAllowNeeds(self, params):
+        self.log.debug("Report change %s, params %s, message: %s" %
+                       (item.change, self.reporter_config, message))
+        changeid = '%s,%s' % (item.change.number, item.change.patchset)
+        item.change._ref_sha = source.getRefSha(
+            item.change.project.name, 'refs/heads/' + item.change.branch)
+
+        return self.connection.review(item.change.project.name, changeid,
+                                      message, self.reporter_config)
+
+    def getSubmitAllowNeeds(self):
         """Get a list of code review labels that are allowed to be
         "needed" in the submit records for a change, with respect
         to this queue.  In other words, the list of review labels
         this reporter itself is likely to set before submitting.
         """
-        return params
+        return self.reporter_config
+
+
+def getSchema():
+    gerrit_reporter = v.Any(str, v.Schema({}, extra=True))
+    return gerrit_reporter
diff --git a/zuul/reporter/smtp.py b/zuul/reporter/smtp.py
index b214019..586b941 100644
--- a/zuul/reporter/smtp.py
+++ b/zuul/reporter/smtp.py
@@ -13,59 +13,43 @@
 # under the License.
 
 import logging
-import smtplib
+import voluptuous as v
 
-from email.mime.text import MIMEText
+from zuul.reporter import BaseReporter
 
 
-class Reporter(object):
+class SMTPReporter(BaseReporter):
     """Sends off reports to emails via SMTP."""
 
     name = 'smtp'
     log = logging.getLogger("zuul.reporter.smtp.Reporter")
 
-    def __init__(self, smtp_default_from, smtp_default_to,
-                 smtp_server='localhost', smtp_port=25):
-        """Set up the reporter.
-
-        Takes parameters for the smtp server.
-        """
-        self.smtp_server = smtp_server
-        self.smtp_port = smtp_port
-        self.smtp_default_from = smtp_default_from
-        self.smtp_default_to = smtp_default_to
-
-    def report(self, change, message, params):
+    def report(self, source, pipeline, item):
         """Send the compiled report message via smtp."""
+        message = self._formatItemReport(pipeline, item)
+
         self.log.debug("Report change %s, params %s, message: %s" %
-                       (change, params, message))
+                       (item.change, self.reporter_config, message))
 
-        # Create a text/plain email message
-        from_email = params['from']\
-            if 'from' in params else self.smtp_default_from
-        to_email = params['to']\
-            if 'to' in params else self.smtp_default_to
-        msg = MIMEText(message)
-        if 'subject' in params:
-            subject = params['subject'].format(change=change)
+        from_email = self.reporter_config['from'] \
+            if 'from' in self.reporter_config else None
+        to_email = self.reporter_config['to'] \
+            if 'to' in self.reporter_config else None
+
+        if 'subject' in self.reporter_config:
+            subject = self.reporter_config['subject'].format(
+                change=item.change)
         else:
-            subject = "Report for change %s" % change
-        msg['Subject'] = subject
-        msg['From'] = from_email
-        msg['To'] = to_email
+            subject = "Report for change %s" % item.change
 
-        try:
-            s = smtplib.SMTP(self.smtp_server, self.smtp_port)
-            s.sendmail(from_email, to_email.split(','), msg.as_string())
-            s.quit()
-        except:
-            return "Could not send email via SMTP"
-        return
+        self.connection.sendMail(subject, message, from_email, to_email)
 
-    def getSubmitAllowNeeds(self, params):
-        """Get a list of code review labels that are allowed to be
-        "needed" in the submit records for a change, with respect
-        to this queue.  In other words, the list of review labels
-        this reporter itself is likely to set before submitting.
-        """
-        return []
+
+def getSchema():
+    smtp_reporter = v.Schema({
+        'connection': str,
+        'to': str,
+        'from': str,
+        'subject': str,
+    })
+    return smtp_reporter
diff --git a/zuul/rpclistener.py b/zuul/rpclistener.py
index d54da9f..716dcfb 100644
--- a/zuul/rpclistener.py
+++ b/zuul/rpclistener.py
@@ -21,7 +21,7 @@
 import gear
 import six
 
-import model
+from zuul import model
 
 
 class RPCListener(object):
@@ -40,11 +40,11 @@
             port = 4730
         self.worker = gear.Worker('Zuul RPC Listener')
         self.worker.addServer(server, port)
+        self.worker.waitForServer()
+        self.register()
         self.thread = threading.Thread(target=self.run)
         self.thread.daemon = True
         self.thread.start()
-        self.worker.waitForServer()
-        self.register()
 
     def register(self):
         self.worker.registerFunction("zuul:enqueue")
@@ -66,8 +66,8 @@
         while self._running:
             try:
                 job = self.worker.getJob()
-                z, jobname = job.name.split(':')
                 self.log.debug("Received job %s" % job.name)
+                z, jobname = job.name.split(':')
                 attrname = 'handle_' + jobname
                 if hasattr(self, attrname):
                     f = getattr(self, attrname)
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index a9bd6b2..b974762 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -20,6 +20,7 @@
 import logging
 import os
 import pickle
+import six
 from six.moves import queue as Queue
 import re
 import sys
@@ -27,11 +28,11 @@
 import time
 import yaml
 
-import layoutvalidator
-import model
-from model import ActionReporter, Pipeline, Project, ChangeQueue
-from model import EventFilter, ChangeishFilter
-from zuul import change_matcher
+from zuul import layoutvalidator
+from zuul import model
+from zuul.model import Pipeline, Project, ChangeQueue
+from zuul.model import ChangeishFilter, NullChange
+from zuul import change_matcher, exceptions
 from zuul import version as zuul_version
 
 statsd = extras.try_import('statsd.statsd')
@@ -59,20 +60,76 @@
     return ret
 
 
-class MergeFailure(Exception):
-    pass
+class MutexHandler(object):
+    log = logging.getLogger("zuul.MutexHandler")
+
+    def __init__(self):
+        self.mutexes = {}
+
+    def acquire(self, item, job):
+        if not job.mutex:
+            return True
+        mutex_name = job.mutex
+        m = self.mutexes.get(mutex_name)
+        if not m:
+            # The mutex is not held, acquire it
+            self._acquire(mutex_name, item, job.name)
+            return True
+        held_item, held_job_name = m
+        if held_item is item and held_job_name == job.name:
+            # This item already holds the mutex
+            return True
+        held_build = held_item.current_build_set.getBuild(held_job_name)
+        if held_build and held_build.result:
+            # The build that held the mutex is complete, release it
+            # and let the new item have it.
+            self.log.error("Held mutex %s being released because "
+                           "the build that holds it is complete" %
+                           (mutex_name,))
+            self._release(mutex_name, item, job.name)
+            self._acquire(mutex_name, item, job.name)
+            return True
+        return False
+
+    def release(self, item, job):
+        if not job.mutex:
+            return
+        mutex_name = job.mutex
+        m = self.mutexes.get(mutex_name)
+        if not m:
+            # The mutex is not held, nothing to do
+            self.log.error("Mutex can not be released for %s "
+                           "because the mutex is not held" %
+                           (item,))
+            return
+        held_item, held_job_name = m
+        if held_item is item and held_job_name == job.name:
+            # This item holds the mutex
+            self._release(mutex_name, item, job.name)
+            return
+        self.log.error("Mutex can not be released for %s "
+                       "which does not hold it" %
+                       (item,))
+
+    def _acquire(self, mutex_name, item, job_name):
+        self.log.debug("Job %s of item %s acquiring mutex %s" %
+                       (job_name, item, mutex_name))
+        self.mutexes[mutex_name] = (item, job_name)
+
+    def _release(self, mutex_name, item, job_name):
+        self.log.debug("Job %s of item %s releasing mutex %s" %
+                       (job_name, item, mutex_name))
+        del self.mutexes[mutex_name]
 
 
 class ManagementEvent(object):
     """An event that should be processed within the main queue run loop"""
     def __init__(self):
         self._wait_event = threading.Event()
-        self._exception = None
-        self._traceback = None
+        self._exc_info = None
 
-    def exception(self, e, tb):
-        self._exception = e
-        self._traceback = tb
+    def exception(self, exc_info):
+        self._exc_info = exc_info
         self._wait_event.set()
 
     def done(self):
@@ -80,8 +137,8 @@
 
     def wait(self, timeout=None):
         self._wait_event.wait(timeout)
-        if self._exception:
-            raise self._exception, None, self._traceback
+        if self._exc_info:
+            six.reraise(*self._exc_info)
         return self._wait_event.is_set()
 
 
@@ -178,7 +235,7 @@
 class Scheduler(threading.Thread):
     log = logging.getLogger("zuul.Scheduler")
 
-    def __init__(self):
+    def __init__(self, config, testonly=False):
         threading.Thread.__init__(self)
         self.daemon = True
         self.wake_event = threading.Event()
@@ -189,24 +246,47 @@
         self._stopped = False
         self.launcher = None
         self.merger = None
+        self.mutex = MutexHandler()
+        self.connections = dict()
+        # Despite triggers being part of the pipeline, there is one trigger set
+        # per scheduler. The pipeline handles the trigger filters but since
+        # the events are handled by the scheduler itself it needs to handle
+        # the loading of the triggers.
+        # self.triggers['connection_name'] = triggerObject
         self.triggers = dict()
-        self.reporters = dict()
-        self.config = None
+        self.config = config
 
         self.trigger_event_queue = Queue.Queue()
         self.result_event_queue = Queue.Queue()
         self.management_event_queue = Queue.Queue()
         self.layout = model.Layout()
 
+        if not testonly:
+            time_dir = self._get_time_database_dir()
+            self.time_database = model.TimeDataBase(time_dir)
+
         self.zuul_version = zuul_version.version_info.release_string()
         self.last_reconfigured = None
 
+        # A set of reporter configuration keys to action mapping
+        self._reporter_actions = {
+            'start': 'start_actions',
+            'success': 'success_actions',
+            'failure': 'failure_actions',
+            'merge-failure': 'merge_failure_actions',
+            'disabled': 'disabled_actions',
+        }
+
     def stop(self):
         self._stopped = True
+        self._unloadDrivers()
+        self.stopConnections()
         self.wake_event.set()
 
-    def testConfig(self, config_path):
-        return self._parseConfig(config_path)
+    def testConfig(self, config_path, connections):
+        # Take the list of set up connections directly here rather than with
+        # registerConnections as we don't want to do the onLoad event yet.
+        return self._parseConfig(config_path, connections)
 
     def _parseSkipIf(self, config_job):
         cm = change_matcher
@@ -236,7 +316,82 @@
             # Any skip-if predicate can be matched to trigger a skip
             return cm.MatchAny(skip_matchers)
 
-    def _parseConfig(self, config_path):
+    def registerConnections(self, connections, load=True):
+        # load: whether or not to trigger the onLoad for the connection. This
+        # is useful for not doing a full load during layout validation.
+        self.connections = connections
+        for connection_name, connection in self.connections.items():
+            connection.registerScheduler(self)
+            if load:
+                connection.onLoad()
+
+    def stopConnections(self):
+        for connection_name, connection in self.connections.items():
+            connection.onStop()
+
+    def _unloadDrivers(self):
+        for trigger in self.triggers.values():
+            trigger.stop()
+        self.triggers = {}
+        for pipeline in self.layout.pipelines.values():
+            pipeline.source.stop()
+            for action in self._reporter_actions.values():
+                for reporter in pipeline.__getattribute__(action):
+                    reporter.stop()
+
+    def _getDriver(self, dtype, connection_name, driver_config={}):
+        # Instantiate a driver such as a trigger, source or reporter
+        # TODO(jhesketh): Make this list dynamic or use entrypoints etc.
+        # Stevedore was not a good fit here due to the nature of triggers.
+        # Specifically we don't want to load a trigger per a pipeline as one
+        # trigger can listen to a stream (from gerrit, for example) and the
+        # scheduler decides which eventfilter to use. As such we want to load
+        # trigger+connection pairs uniquely.
+        drivers = {
+            'source': {
+                'gerrit': 'zuul.source.gerrit:GerritSource',
+            },
+            'trigger': {
+                'gerrit': 'zuul.trigger.gerrit:GerritTrigger',
+                'timer': 'zuul.trigger.timer:TimerTrigger',
+                'zuul': 'zuul.trigger.zuultrigger:ZuulTrigger',
+            },
+            'reporter': {
+                'gerrit': 'zuul.reporter.gerrit:GerritReporter',
+                'smtp': 'zuul.reporter.smtp:SMTPReporter',
+            },
+        }
+
+        # TODO(jhesketh): Check the connection_name exists
+        if connection_name in self.connections.keys():
+            driver_name = self.connections[connection_name].driver_name
+            connection = self.connections[connection_name]
+        else:
+            # In some cases a driver may not be related to a connection. For
+            # example, the 'timer' or 'zuul' triggers.
+            driver_name = connection_name
+            connection = None
+        driver = drivers[dtype][driver_name].split(':')
+        driver_instance = getattr(
+            __import__(driver[0], fromlist=['']), driver[1])(
+                driver_config, self, connection
+        )
+
+        if connection:
+            connection.registerUse(dtype, driver_instance)
+
+        return driver_instance
+
+    def _getSourceDriver(self, connection_name):
+        return self._getDriver('source', connection_name)
+
+    def _getReporterDriver(self, connection_name, driver_config={}):
+        return self._getDriver('reporter', connection_name, driver_config)
+
+    def _getTriggerDriver(self, connection_name, driver_config={}):
+        return self._getDriver('trigger', connection_name, driver_config)
+
+    def _parseConfig(self, config_path, connections):
         layout = model.Layout()
         project_templates = {}
 
@@ -245,11 +400,11 @@
             if not os.path.exists(config_path):
                 raise Exception("Unable to read layout config file at %s" %
                                 config_path)
-        config_file = open(config_path)
-        data = yaml.load(config_file)
+        with open(config_path) as config_file:
+            data = yaml.load(config_file)
 
         validator = layoutvalidator.LayoutValidator()
-        validator.validate(data)
+        validator.validate(data, connections)
 
         config_env = {}
         for include in data.get('includes', []):
@@ -259,14 +414,16 @@
                     base = os.path.dirname(os.path.realpath(config_path))
                     fn = os.path.join(base, fn)
                 fn = os.path.expanduser(fn)
-                execfile(fn, config_env)
+                with open(fn) as _f:
+                    code = compile(_f.read(), fn, 'exec')
+                    six.exec_(code, config_env)
 
         for conf_pipeline in data.get('pipelines', []):
             pipeline = Pipeline(conf_pipeline['name'])
             pipeline.description = conf_pipeline.get('description')
             # TODO(jeblair): remove backwards compatibility:
-            pipeline.source = self.triggers[conf_pipeline.get('source',
-                                                              'gerrit')]
+            pipeline.source = self._getSourceDriver(
+                conf_pipeline.get('source', 'gerrit'))
             precedence = model.PRECEDENCE_MAP[conf_pipeline.get('precedence')]
             pipeline.precedence = precedence
             pipeline.failure_message = conf_pipeline.get('failure-message',
@@ -285,26 +442,23 @@
             pipeline.ignore_dependencies = conf_pipeline.get(
                 'ignore-dependencies', False)
 
-            action_reporters = {}
-            for action in ['start', 'success', 'failure', 'merge-failure']:
-                action_reporters[action] = []
-                if conf_pipeline.get(action):
+            for conf_key, action in self._reporter_actions.items():
+                reporter_set = []
+                if conf_pipeline.get(conf_key):
                     for reporter_name, params \
-                        in conf_pipeline.get(action).items():
-                        if reporter_name in self.reporters.keys():
-                            action_reporters[action].append(ActionReporter(
-                                self.reporters[reporter_name], params))
-                        else:
-                            self.log.error('Invalid reporter name %s' %
-                                           reporter_name)
-            pipeline.start_actions = action_reporters['start']
-            pipeline.success_actions = action_reporters['success']
-            pipeline.failure_actions = action_reporters['failure']
-            if len(action_reporters['merge-failure']) > 0:
-                pipeline.merge_failure_actions = \
-                    action_reporters['merge-failure']
-            else:
-                pipeline.merge_failure_actions = action_reporters['failure']
+                        in conf_pipeline.get(conf_key).items():
+                        reporter = self._getReporterDriver(reporter_name,
+                                                           params)
+                        reporter.setAction(conf_key)
+                        reporter_set.append(reporter)
+                setattr(pipeline, action, reporter_set)
+
+            # If merge-failure actions aren't explicit, use the failure actions
+            if not pipeline.merge_failure_actions:
+                pipeline.merge_failure_actions = pipeline.failure_actions
+
+            pipeline.disable_at = conf_pipeline.get(
+                'disable-after-consecutive-failures', None)
 
             pipeline.window = conf_pipeline.get('window', 20)
             pipeline.window_floor = conf_pipeline.get('window-floor', 3)
@@ -321,64 +475,28 @@
             pipeline.setManager(manager)
             layout.pipelines[conf_pipeline['name']] = pipeline
 
-            if 'require' in conf_pipeline:
-                require = conf_pipeline['require']
+            if 'require' in conf_pipeline or 'reject' in conf_pipeline:
+                require = conf_pipeline.get('require', {})
+                reject = conf_pipeline.get('reject', {})
                 f = ChangeishFilter(
                     open=require.get('open'),
                     current_patchset=require.get('current-patchset'),
                     statuses=toList(require.get('status')),
-                    required_approvals=toList(require.get('approval')))
+                    required_approvals=toList(require.get('approval')),
+                    reject_approvals=toList(reject.get('approval'))
+                )
                 manager.changeish_filters.append(f)
 
-            # TODO: move this into triggers (may require pluggable
-            # configuration)
-            if 'gerrit' in conf_pipeline['trigger']:
-                for trigger in toList(conf_pipeline['trigger']['gerrit']):
-                    approvals = {}
-                    for approval_dict in toList(trigger.get('approval')):
-                        for k, v in approval_dict.items():
-                            approvals[k] = v
-                    # Backwards compat for *_filter versions of these args
-                    comments = toList(trigger.get('comment'))
-                    if not comments:
-                        comments = toList(trigger.get('comment_filter'))
-                    emails = toList(trigger.get('email'))
-                    if not emails:
-                        emails = toList(trigger.get('email_filter'))
-                    usernames = toList(trigger.get('username'))
-                    if not usernames:
-                        usernames = toList(trigger.get('username_filter'))
-                    f = EventFilter(
-                        trigger=self.triggers['gerrit'],
-                        types=toList(trigger['event']),
-                        branches=toList(trigger.get('branch')),
-                        refs=toList(trigger.get('ref')),
-                        event_approvals=approvals,
-                        comments=comments,
-                        emails=emails,
-                        usernames=usernames,
-                        required_approvals=toList(
-                            trigger.get('require-approval')
-                        )
-                    )
-                    manager.event_filters.append(f)
-            if 'timer' in conf_pipeline['trigger']:
-                for trigger in toList(conf_pipeline['trigger']['timer']):
-                    f = EventFilter(trigger=self.triggers['timer'],
-                                    types=['timer'],
-                                    timespecs=toList(trigger['time']))
-                    manager.event_filters.append(f)
-            if 'zuul' in conf_pipeline['trigger']:
-                for trigger in toList(conf_pipeline['trigger']['zuul']):
-                    f = EventFilter(
-                        trigger=self.triggers['zuul'],
-                        types=toList(trigger['event']),
-                        pipelines=toList(trigger.get('pipeline')),
-                        required_approvals=toList(
-                            trigger.get('require-approval')
-                        )
-                    )
-                    manager.event_filters.append(f)
+            for trigger_name, trigger_config\
+                in conf_pipeline.get('trigger').items():
+                if trigger_name not in self.triggers.keys():
+                    self.triggers[trigger_name] = \
+                        self._getTriggerDriver(trigger_name, trigger_config)
+
+            for trigger_name, trigger in self.triggers.items():
+                if trigger_name in conf_pipeline['trigger']:
+                    manager.event_filters += trigger.getEventFilters(
+                        conf_pipeline['trigger'][trigger_name])
 
         for project_template in data.get('project-templates', []):
             # Make sure the template only contains valid pipelines
@@ -414,6 +532,16 @@
             m = config_job.get('voting', None)
             if m is not None:
                 job.voting = m
+            m = config_job.get('mutex', None)
+            if m is not None:
+                job.mutex = m
+            tags = toList(config_job.get('tags'))
+            if tags:
+                # Tags are merged via a union rather than a
+                # destructive copy because they are intended to
+                # accumulate onto any previously applied tags from
+                # metajobs.
+                job.tags = job.tags.union(set(tags))
             fname = config_job.get('parameter-function', None)
             if fname:
                 func = config_env.get(fname, None)
@@ -497,21 +625,15 @@
     def setMerger(self, merger):
         self.merger = merger
 
-    def registerTrigger(self, trigger, name=None):
-        if name is None:
-            name = trigger.name
-        self.triggers[name] = trigger
-
-    def registerReporter(self, reporter, name=None):
-        if name is None:
-            name = reporter.name
-        self.reporters[name] = reporter
-
-    def getProject(self, name):
+    def getProject(self, name, create_foreign=False):
         self.layout_lock.acquire()
         p = None
         try:
             p = self.layout.projects.get(name)
+            if p is None and create_foreign:
+                self.log.info("Registering foreign project: %s" % name)
+                p = Project(name, foreign=True)
+                self.layout.projects[name] = p
         finally:
             self.layout_lock.release()
         return p
@@ -547,14 +669,29 @@
         try:
             if statsd and build.pipeline:
                 jobname = build.job.name.replace('.', '_')
+                key = 'zuul.pipeline.%s.all_jobs' % build.pipeline.name
+                statsd.incr(key)
+                for label in build.node_labels:
+                    # Jenkins includes the node name in its list of labels, so
+                    # we filter it out here, since that is not statistically
+                    # interesting.
+                    if label == build.node_name:
+                        continue
+                    dt = int((build.start_time - build.launch_time) * 1000)
+                    key = 'zuul.pipeline.%s.label.%s.wait_time' % (
+                        build.pipeline.name, label)
+                    statsd.timing(key, dt)
                 key = 'zuul.pipeline.%s.job.%s.%s' % (build.pipeline.name,
                                                       jobname, build.result)
                 if build.result in ['SUCCESS', 'FAILURE'] and build.start_time:
                     dt = int((build.end_time - build.start_time) * 1000)
                     statsd.timing(key, dt)
                 statsd.incr(key)
-                key = 'zuul.pipeline.%s.all_jobs' % build.pipeline.name
-                statsd.incr(key)
+
+                key = 'zuul.pipeline.%s.job.%s.wait_time' % (
+                    build.pipeline.name, jobname)
+                dt = int((build.start_time - build.launch_time) * 1000)
+                statsd.timing(key, dt)
         except:
             self.log.exception("Exception reporting runtime stats")
         event = BuildCompletedEvent(build)
@@ -611,6 +748,17 @@
             state_dir = '/var/lib/zuul'
         return os.path.join(state_dir, 'queue.pickle')
 
+    def _get_time_database_dir(self):
+        if self.config.has_option('zuul', 'state_dir'):
+            state_dir = os.path.expanduser(self.config.get('zuul',
+                                                           'state_dir'))
+        else:
+            state_dir = '/var/lib/zuul'
+        d = os.path.join(state_dir, 'times')
+        if not os.path.exists(d):
+            os.mkdir(d)
+        return d
+
     def _save_queue(self):
         pickle_file = self._get_queue_pickle_file()
         events = []
@@ -663,8 +811,9 @@
         self.config = event.config
         try:
             self.log.debug("Performing reconfiguration")
+            self._unloadDrivers()
             layout = self._parseConfig(
-                self.config.get('zuul', 'layout_config'))
+                self.config.get('zuul', 'layout_config'), self.connections)
             for name, new_pipeline in layout.pipelines.items():
                 old_pipeline = self.layout.pipelines.get(name)
                 if not old_pipeline:
@@ -685,15 +834,15 @@
                         item.items_behind = []
                         item.pipeline = None
                         item.queue = None
-                        project = layout.projects.get(item.change.project.name)
-                        if not project:
-                            self.log.warning("Unable to find project for "
-                                             "change %s while reenqueueing" %
-                                             item.change)
-                            item.change.project = None
-                            items_to_remove.append(item)
-                            continue
-                        item.change.project = project
+                        project_name = item.change.project.name
+                        item.change.project = layout.projects.get(project_name)
+                        if not item.change.project:
+                            self.log.debug("Project %s not defined, "
+                                           "re-instantiating as foreign" %
+                                           project_name)
+                            project = Project(project_name, foreign=True)
+                            layout.projects[project_name] = project
+                            item.change.project = project
                         item_jobs = new_pipeline.getJobs(item)
                         for build in item.current_build_set.getBuilds():
                             job = layout.jobs.get(build.job.name)
@@ -718,9 +867,14 @@
                             "Exception while canceling build %s "
                             "for change %s" % (build, item.change))
             self.layout = layout
-            self.maintainTriggerCache()
+            self.maintainConnectionCache()
             for trigger in self.triggers.values():
                 trigger.postConfig()
+            for pipeline in self.layout.pipelines.values():
+                pipeline.source.postConfig()
+                for action in self._reporter_actions.values():
+                    for reporter in pipeline.__getattribute__(action):
+                        reporter.postConfig()
             if statsd:
                 try:
                     for pipeline in self.layout.pipelines.values():
@@ -843,17 +997,18 @@
             finally:
                 self.run_handler_lock.release()
 
-    def maintainTriggerCache(self):
+    def maintainConnectionCache(self):
         relevant = set()
         for pipeline in self.layout.pipelines.values():
-            self.log.debug("Start maintain trigger cache for: %s" % pipeline)
+            self.log.debug("Gather relevant cache items for: %s" % pipeline)
             for item in pipeline.getAllItems():
                 relevant.add(item.change)
                 relevant.update(item.change.getRelatedChanges())
-            self.log.debug("End maintain trigger cache for: %s" % pipeline)
-        self.log.debug("Trigger cache size: %s" % len(relevant))
-        for trigger in self.triggers.values():
-            trigger.maintainCache(relevant)
+        for connection in self.connections.values():
+            connection.maintainCache(relevant)
+            self.log.debug(
+                "End maintain connection cache for: %s" % connection)
+        self.log.debug("Connection cache size: %s" % len(relevant))
 
     def process_event_queue(self):
         self.log.debug("Fetching trigger event")
@@ -861,12 +1016,22 @@
         self.log.debug("Processing trigger event %s" % event)
         try:
             project = self.layout.projects.get(event.project_name)
-            if not project:
-                self.log.debug("Project %s not found" % event.project_name)
-                return
 
             for pipeline in self.layout.pipelines.values():
-                change = pipeline.source.getChange(event, project)
+                # Get the change even if the project is unknown to us for the
+                # use of updating the cache if there is another change
+                # depending on this foreign one.
+                try:
+                    change = pipeline.source.getChange(event, project)
+                except exceptions.ChangeNotFound as e:
+                    self.log.debug("Unable to get change %s from source %s. "
+                                   "(most likely looking for a change from "
+                                   "another connection trigger)",
+                                   e.change, pipeline.source)
+                    continue
+                if not project or project.foreign:
+                    self.log.debug("Project %s not found" % event.project_name)
+                    continue
                 if event.type == 'patchset-created':
                     pipeline.manager.removeOldVersionsOfChange(change)
                 elif event.type == 'change-abandoned':
@@ -892,8 +1057,8 @@
             else:
                 self.log.error("Unable to handle event %s" % event)
             event.done()
-        except Exception as e:
-            event.exception(e, sys.exc_info()[2])
+        except Exception:
+            event.exception(sys.exc_info())
         self.management_event_queue.task_done()
 
     def process_result_queue(self):
@@ -923,6 +1088,11 @@
             self.log.warning("Build %s is not associated with a pipeline" %
                              (build,))
             return
+        try:
+            build.estimated_time = float(self.time_database.getEstimatedTime(
+                build.job.name))
+        except Exception:
+            self.log.exception("Exception estimating build time:")
         pipeline.manager.onBuildStarted(event.build)
 
     def _doBuildCompletedEvent(self, event):
@@ -936,6 +1106,13 @@
             self.log.warning("Build %s is not associated with a pipeline" %
                              (build,))
             return
+        if build.end_time and build.start_time and build.result:
+            duration = build.end_time - build.start_time
+            try:
+                self.time_database.update(
+                    build.job.name, duration, build.result)
+            except Exception:
+                self.log.exception("Exception recording build time:")
         pipeline.manager.onBuildCompleted(event.build)
 
     def _doMergeCompletedEvent(self, event):
@@ -951,6 +1128,11 @@
         pipeline.manager.onMergeCompleted(event)
 
     def formatStatusJSON(self):
+        if self.config.has_option('zuul', 'url_pattern'):
+            url_pattern = self.config.get('zuul', 'url_pattern')
+        else:
+            url_pattern = None
+
         data = {}
 
         data['zuul_version'] = self.zuul_version
@@ -976,7 +1158,7 @@
         pipelines = []
         data['pipelines'] = pipelines
         for pipeline in self.layout.pipelines.values():
-            pipelines.append(pipeline.formatStatusJSON())
+            pipelines.append(pipeline.formatStatusJSON(url_pattern))
         return json.dumps(data)
 
 
@@ -988,12 +1170,6 @@
         self.pipeline = pipeline
         self.event_filters = []
         self.changeish_filters = []
-        if self.sched.config and self.sched.config.has_option(
-            'zuul', 'report_times'):
-            self.report_times = self.sched.config.getboolean(
-                'zuul', 'report_times')
-        else:
-            self.report_times = True
 
     def __str__(self):
         return "<%s %s>" % (self.__class__.__name__, self.pipeline.name)
@@ -1021,14 +1197,16 @@
                     efilters += str(tree.job.skip_if_matcher)
                 if efilters:
                     efilters = ' ' + efilters
-                hold = ''
+                tags = []
                 if tree.job.hold_following_changes:
-                    hold = ' [hold]'
-                voting = ''
+                    tags.append('[hold]')
                 if not tree.job.voting:
-                    voting = ' [nonvoting]'
-                self.log.info("%s%s%s%s%s" % (istr, repr(tree.job),
-                                              efilters, hold, voting))
+                    tags.append('[nonvoting]')
+                if tree.job.mutex:
+                    tags.append('[mutex: %s]' % tree.job.mutex)
+                tags = ' '.join(tags)
+                self.log.info("%s%s%s %s" % (istr, repr(tree.job),
+                                             efilters, tags))
             for x in tree.job_trees:
                 log_jobs(x, indent + 2)
 
@@ -1045,6 +1223,8 @@
         self.log.info("    %s" % self.pipeline.failure_actions)
         self.log.info("  On merge-failure:")
         self.log.info("    %s" % self.pipeline.merge_failure_actions)
+        self.log.info("  When disabled:")
+        self.log.info("    %s" % self.pipeline.disabled_actions)
 
     def getSubmitAllowNeeds(self):
         # Get a list of code review labels that are allowed to be
@@ -1085,31 +1265,30 @@
                 return True
         return False
 
-    def reportStart(self, change):
-        try:
-            self.log.info("Reporting start, action %s change %s" %
-                          (self.pipeline.start_actions, change))
-            msg = "Starting %s jobs." % self.pipeline.name
-            if self.sched.config.has_option('zuul', 'status_url'):
-                msg += "\n" + self.sched.config.get('zuul', 'status_url')
-            ret = self.sendReport(self.pipeline.start_actions,
-                                  change, msg)
-            if ret:
-                self.log.error("Reporting change start %s received: %s" %
-                               (change, ret))
-        except:
-            self.log.exception("Exception while reporting start:")
+    def reportStart(self, item):
+        if not self.pipeline._disabled:
+            try:
+                self.log.info("Reporting start, action %s item %s" %
+                              (self.pipeline.start_actions, item))
+                ret = self.sendReport(self.pipeline.start_actions,
+                                      self.pipeline.source, item)
+                if ret:
+                    self.log.error("Reporting item start %s received: %s" %
+                                   (item, ret))
+            except:
+                self.log.exception("Exception while reporting start:")
 
-    def sendReport(self, action_reporters, change, message):
+    def sendReport(self, action_reporters, source, item,
+                   message=None):
         """Sends the built message off to configured reporters.
 
-        Takes the action_reporters, change, message and extra options and
+        Takes the action_reporters, item, message and extra options and
         sends them to the pluggable reporters.
         """
         report_errors = []
         if len(action_reporters) > 0:
-            for action_reporter in action_reporters:
-                ret = action_reporter.report(change, message)
+            for reporter in action_reporters:
+                ret = reporter.report(source, self.pipeline, item)
                 if ret:
                     report_errors.append(ret)
             if len(report_errors) == 0:
@@ -1245,18 +1424,18 @@
 
             self.log.debug("Adding change %s to queue %s" %
                            (change, change_queue))
-            if not quiet:
-                if len(self.pipeline.start_actions) > 0:
-                    self.reportStart(change)
             item = change_queue.enqueueChange(change)
             if enqueue_time:
                 item.enqueue_time = enqueue_time
             item.live = live
             self.reportStats(item)
+            if not quiet:
+                if len(self.pipeline.start_actions) > 0:
+                    self.reportStart(item)
             self.enqueueChangesBehind(change, quiet, ignore_requirements,
                                       change_queue)
-            self.sched.triggers['zuul'].onChangeEnqueued(item.change,
-                                                         self.pipeline)
+            for trigger in self.sched.triggers.values():
+                trigger.onChangeEnqueued(item.change, self.pipeline)
             return True
 
     def dequeueItem(self, item):
@@ -1285,9 +1464,11 @@
         elif hasattr(item.change, 'newrev'):
             oldrev = item.change.oldrev
             newrev = item.change.newrev
+        connection_name = self.pipeline.source.connection.connection_name
         return dict(project=item.change.project.name,
                     url=self.pipeline.source.getGitUrl(
                         item.change.project),
+                    connection_name=connection_name,
                     merge_mode=item.change.project.merge_mode,
                     refspec=item.change.refspec,
                     branch=item.change.branch,
@@ -1305,7 +1486,6 @@
             return True
         if build_set.merge_state == build_set.PENDING:
             return False
-        build_set.merge_state = build_set.PENDING
         ref = build_set.ref
         if hasattr(item.change, 'refspec') and not ref:
             self.log.debug("Preparing ref for: %s" % item.change)
@@ -1323,6 +1503,8 @@
             self.sched.merger.updateRepo(item.change.project.name,
                                          url, build_set,
                                          self.pipeline.precedence)
+        # merge:merge has been emitted properly:
+        build_set.merge_state = build_set.PENDING
         return False
 
     def _launchJobs(self, item, jobs):
@@ -1342,7 +1524,7 @@
                                    "for change %s:" % (job, item.change))
 
     def launchJobs(self, item):
-        jobs = self.pipeline.findJobsToRun(item)
+        jobs = self.pipeline.findJobsToRun(item, self.sched.mutex)
         if jobs:
             self._launchJobs(item, jobs)
 
@@ -1386,7 +1568,7 @@
             if item.live:
                 try:
                     self.reportItem(item)
-                except MergeFailure:
+                except exceptions.MergeFailure:
                     pass
             return (True, nnfi)
         dep_items = self.getFailingDependentItems(item)
@@ -1427,7 +1609,7 @@
             and item.live):
             try:
                 self.reportItem(item)
-            except MergeFailure:
+            except exceptions.MergeFailure:
                 failing_reasons.append("it did not merge")
                 for item_behind in item.items_behind:
                     self.log.info("Resetting builds for change %s because the "
@@ -1471,13 +1653,23 @@
 
     def updateBuildDescriptions(self, build_set):
         for build in build_set.getBuilds():
-            desc = self.formatDescription(build)
-            self.sched.launcher.setBuildDescription(build, desc)
+            try:
+                desc = self.formatDescription(build)
+                self.sched.launcher.setBuildDescription(build, desc)
+            except:
+                # Log the failure and let loop continue
+                self.log.error("Failed to update description for build %s" %
+                               (build))
 
         if build_set.previous_build_set:
             for build in build_set.previous_build_set.getBuilds():
-                desc = self.formatDescription(build)
-                self.sched.launcher.setBuildDescription(build, desc)
+                try:
+                    desc = self.formatDescription(build)
+                    self.sched.launcher.setBuildDescription(build, desc)
+                except:
+                    # Log the failure and let loop continue
+                    self.log.error("Failed to update description for "
+                                   "build %s in previous build set" % (build))
 
     def onBuildStarted(self, build):
         self.log.debug("Build %s started" % build)
@@ -1488,6 +1680,7 @@
         item = build.build_set.item
 
         self.pipeline.setResult(item, build)
+        self.sched.mutex.release(item, build.job)
         self.log.debug("Item %s status is now:\n %s" %
                        (item, item.formatStatus()))
         return True
@@ -1500,8 +1693,9 @@
         if event.merged:
             build_set.commit = event.commit
         elif event.updated:
-            build_set.commit = item.change.newrev
-        if not build_set.commit:
+            if not isinstance(item.change, NullChange):
+                build_set.commit = item.change.newrev
+        if not build_set.commit and not isinstance(item.change, NullChange):
             self.log.info("Unable to merge change %s" % item.change)
             self.pipeline.setUnableToMerge(item)
 
@@ -1524,12 +1718,15 @@
                 change_queue.decreaseWindowSize()
                 self.log.debug("%s window size decreased to %s" %
                                (change_queue, change_queue.window))
-                raise MergeFailure("Change %s failed to merge" % item.change)
+                raise exceptions.MergeFailure(
+                    "Change %s failed to merge" % item.change)
             else:
                 change_queue.increaseWindowSize()
                 self.log.debug("%s window size increased to %s" %
                                (change_queue, change_queue.window))
-                self.sched.triggers['zuul'].onChangeMerged(item.change)
+
+                for trigger in self.sched.triggers.values():
+                    trigger.onChangeMerged(item.change, self.pipeline.source)
 
     def _reportItem(self, item):
         self.log.debug("Reporting change %s" % item.change)
@@ -1544,101 +1741,36 @@
             self.log.debug("success %s" % (self.pipeline.success_actions))
             actions = self.pipeline.success_actions
             item.setReportedResult('SUCCESS')
+            self.pipeline._consecutive_failures = 0
         elif not self.pipeline.didMergerSucceed(item):
             actions = self.pipeline.merge_failure_actions
             item.setReportedResult('MERGER_FAILURE')
         else:
             actions = self.pipeline.failure_actions
             item.setReportedResult('FAILURE')
+            self.pipeline._consecutive_failures += 1
+        if self.pipeline._disabled:
+            actions = self.pipeline.disabled_actions
+        # Check here if we should disable so that we only use the disabled
+        # reporters /after/ the last disable_at failure is still reported as
+        # normal.
+        if (self.pipeline.disable_at and not self.pipeline._disabled and
+            self.pipeline._consecutive_failures >= self.pipeline.disable_at):
+            self.pipeline._disabled = True
         if actions:
-            report = self.formatReport(item)
             try:
-                self.log.info("Reporting change %s, actions: %s" %
-                              (item.change, actions))
-                ret = self.sendReport(actions, item.change, report)
+                self.log.info("Reporting item %s, actions: %s" %
+                              (item, actions))
+                ret = self.sendReport(actions, self.pipeline.source, item)
                 if ret:
-                    self.log.error("Reporting change %s received: %s" %
-                                   (item.change, ret))
+                    self.log.error("Reporting item %s received: %s" %
+                                   (item, ret))
             except:
                 self.log.exception("Exception while reporting:")
                 item.setReportedResult('ERROR')
         self.updateBuildDescriptions(item.current_build_set)
         return ret
 
-    def formatReport(self, item):
-        ret = ''
-
-        if item.dequeued_needing_change:
-            ret += 'This change depends on a change that failed to merge.\n'
-        elif not self.pipeline.didMergerSucceed(item):
-            ret += self.pipeline.merge_failure_message
-        else:
-            if self.pipeline.didAllJobsSucceed(item):
-                ret += self.pipeline.success_message + '\n\n'
-            else:
-                ret += self.pipeline.failure_message + '\n\n'
-            ret += self._formatReportJobs(item)
-
-        if self.pipeline.footer_message:
-            ret += '\n' + self.pipeline.footer_message
-
-        return ret
-
-    def _formatReportJobs(self, item):
-        # Return the list of jobs portion of the report
-        ret = ''
-
-        if self.sched.config.has_option('zuul', 'url_pattern'):
-            url_pattern = self.sched.config.get('zuul', 'url_pattern')
-        else:
-            url_pattern = None
-
-        for job in self.pipeline.getJobs(item):
-            build = item.current_build_set.getBuild(job.name)
-            result = build.result
-            pattern = url_pattern
-            if result == 'SUCCESS':
-                if job.success_message:
-                    result = job.success_message
-                if job.success_pattern:
-                    pattern = job.success_pattern
-            elif result == 'FAILURE':
-                if job.failure_message:
-                    result = job.failure_message
-                if job.failure_pattern:
-                    pattern = job.failure_pattern
-            if pattern:
-                url = pattern.format(change=item.change,
-                                     pipeline=self.pipeline,
-                                     job=job,
-                                     build=build)
-            else:
-                url = build.url or job.name
-            if not job.voting:
-                voting = ' (non-voting)'
-            else:
-                voting = ''
-            if self.report_times and build.end_time and build.start_time:
-                dt = int(build.end_time - build.start_time)
-                m, s = divmod(dt, 60)
-                h, m = divmod(m, 60)
-                if h:
-                    elapsed = ' in %dh %02dm %02ds' % (h, m, s)
-                elif m:
-                    elapsed = ' in %dm %02ds' % (m, s)
-                else:
-                    elapsed = ' in %ds' % (s)
-            else:
-                elapsed = ''
-            name = ''
-            if self.sched.config.has_option('zuul', 'job_name_in_report'):
-                if self.sched.config.getboolean('zuul',
-                                                'job_name_in_report'):
-                    name = job.name + ' '
-            ret += '- %s%s : %s%s%s\n' % (name, url, result, elapsed,
-                                          voting)
-        return ret
-
     def formatDescription(self, build):
         concurrent_changes = ''
         concurrent_builds = ''
@@ -1797,10 +1929,11 @@
         if existing:
             return DynamicChangeQueueContextManager(existing)
         if change.project not in self.pipeline.getProjects():
-            return DynamicChangeQueueContextManager(None)
+            self.pipeline.addProject(change.project)
         change_queue = ChangeQueue(self.pipeline)
         change_queue.addProject(change.project)
         self.pipeline.addQueue(change_queue)
+        self.log.debug("Dynamically created queue %s", change_queue)
         return DynamicChangeQueueContextManager(change_queue)
 
     def enqueueChangesAhead(self, change, quiet, ignore_requirements,
diff --git a/zuul/source/__init__.py b/zuul/source/__init__.py
new file mode 100644
index 0000000..cb4501a
--- /dev/null
+++ b/zuul/source/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseSource(object):
+    """Base class for sources.
+
+    A source class gives methods for fetching and updating changes. Each
+    pipeline must have (only) one source. It is the canonical provider of the
+    change to be tested.
+
+    Defines the exact public methods that must be supplied."""
+
+    def __init__(self, source_config={}, sched=None, connection=None):
+        self.source_config = source_config
+        self.sched = sched
+        self.connection = connection
+
+    def stop(self):
+        """Stop the source."""
+
+    @abc.abstractmethod
+    def getRefSha(self, project, ref):
+        """Return a sha for a given project ref."""
+
+    @abc.abstractmethod
+    def isMerged(self, change, head=None):
+        """Determine if change is merged.
+
+        If head is provided the change is checked if it is at head."""
+
+    @abc.abstractmethod
+    def canMerge(self, change, allow_needs):
+        """Determine if change can merge."""
+
+    def postConfig(self):
+        """Called after configuration has been processed."""
+
+    @abc.abstractmethod
+    def getChange(self, event, project):
+        """Get the change representing an event."""
+
+    @abc.abstractmethod
+    def getProjectOpenChanges(self, project):
+        """Get the open changes for a project."""
+
+    @abc.abstractmethod
+    def getGitUrl(self, project):
+        """Get the git url for a project."""
diff --git a/zuul/source/gerrit.py b/zuul/source/gerrit.py
new file mode 100644
index 0000000..73cf726
--- /dev/null
+++ b/zuul/source/gerrit.py
@@ -0,0 +1,357 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import re
+import time
+from zuul import exceptions
+from zuul.model import Change, Ref, NullChange
+from zuul.source import BaseSource
+
+
+# Walk the change dependency tree to find a cycle
+def detect_cycle(change, history=None):
+    if history is None:
+        history = []
+    else:
+        history = history[:]
+    history.append(change.number)
+    for dep in change.needs_changes:
+        if dep.number in history:
+            raise Exception("Dependency cycle detected: %s in %s" % (
+                dep.number, history))
+        detect_cycle(dep, history)
+
+
+class GerritSource(BaseSource):
+    name = 'gerrit'
+    log = logging.getLogger("zuul.source.Gerrit")
+    replication_timeout = 300
+    replication_retry_interval = 5
+
+    depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
+                               re.MULTILINE | re.IGNORECASE)
+
+    def getRefSha(self, project, ref):
+        refs = {}
+        try:
+            refs = self.connection.getInfoRefs(project)
+        except:
+            self.log.exception("Exception looking for ref %s" %
+                               ref)
+        sha = refs.get(ref, '')
+        return sha
+
+    def _waitForRefSha(self, project, ref, old_sha=''):
+        # Wait for the ref to show up in the repo
+        start = time.time()
+        while time.time() - start < self.replication_timeout:
+            sha = self.getRefSha(project.name, ref)
+            if old_sha != sha:
+                return True
+            time.sleep(self.replication_retry_interval)
+        return False
+
+    def isMerged(self, change, head=None):
+        self.log.debug("Checking if change %s is merged" % change)
+        if not change.number:
+            self.log.debug("Change has no number; considering it merged")
+            # Good question.  It's probably ref-updated, which, ah,
+            # means it's merged.
+            return True
+
+        data = self.connection.query(change.number)
+        change._data = data
+        change.is_merged = self._isMerged(change)
+        if change.is_merged:
+            self.log.debug("Change %s is merged" % (change,))
+        else:
+            self.log.debug("Change %s is not merged" % (change,))
+        if not head:
+            return change.is_merged
+        if not change.is_merged:
+            return False
+
+        ref = 'refs/heads/' + change.branch
+        self.log.debug("Waiting for %s to appear in git repo" % (change))
+        if self._waitForRefSha(change.project, ref, change._ref_sha):
+            self.log.debug("Change %s is in the git repo" %
+                           (change))
+            return True
+        self.log.debug("Change %s did not appear in the git repo" %
+                       (change))
+        return False
+
+    def _isMerged(self, change):
+        data = change._data
+        if not data:
+            return False
+        status = data.get('status')
+        if not status:
+            return False
+        if status == 'MERGED':
+            return True
+        return False
+
+    def canMerge(self, change, allow_needs):
+        if not change.number:
+            self.log.debug("Change has no number; considering it merged")
+            # Good question.  It's probably ref-updated, which, ah,
+            # means it's merged.
+            return True
+        data = change._data
+        if not data:
+            return False
+        if 'submitRecords' not in data:
+            return False
+        try:
+            for sr in data['submitRecords']:
+                if sr['status'] == 'OK':
+                    return True
+                elif sr['status'] == 'NOT_READY':
+                    for label in sr['labels']:
+                        if label['status'] in ['OK', 'MAY']:
+                            continue
+                        elif label['status'] in ['NEED', 'REJECT']:
+                            # It may be our own rejection, so we ignore
+                            if label['label'].lower() not in allow_needs:
+                                return False
+                            continue
+                        else:
+                            # IMPOSSIBLE
+                            return False
+                else:
+                    # CLOSED, RULE_ERROR
+                    return False
+        except:
+            self.log.exception("Exception determining whether change"
+                               "%s can merge:" % change)
+            return False
+        return True
+
+    def postConfig(self):
+        pass
+
+    def getChange(self, event, project):
+        if event.change_number:
+            refresh = False
+            change = self._getChange(event.change_number, event.patch_number,
+                                     refresh=refresh)
+        elif event.ref:
+            change = Ref(project)
+            change.ref = event.ref
+            change.oldrev = event.oldrev
+            change.newrev = event.newrev
+            change.url = self._getGitwebUrl(project, sha=event.newrev)
+        else:
+            change = NullChange(project)
+        return change
+
+    def _getChange(self, number, patchset, refresh=False, history=None):
+        key = '%s,%s' % (number, patchset)
+        change = self.connection.getCachedChange(key)
+        if change and not refresh:
+            return change
+        if not change:
+            change = Change(None)
+            change.number = number
+            change.patchset = patchset
+        key = '%s,%s' % (change.number, change.patchset)
+        self.connection.updateChangeCache(key, change)
+        try:
+            self._updateChange(change, history)
+        except Exception:
+            self.connection.deleteCachedChange(key)
+            raise
+        return change
+
+    def getProjectOpenChanges(self, project):
+        # This is a best-effort function in case Gerrit is unable to return
+        # a particular change.  It happens.
+        query = "project:%s status:open" % (project.name,)
+        self.log.debug("Running query %s to get project open changes" %
+                       (query,))
+        data = self.connection.simpleQuery(query)
+        changes = []
+        for record in data:
+            try:
+                changes.append(
+                    self._getChange(record['number'],
+                                    record['currentPatchSet']['number']))
+            except Exception:
+                self.log.exception("Unable to query change %s" %
+                                   (record.get('number'),))
+        return changes
+
+    def _getDependsOnFromCommit(self, message, change):
+        records = []
+        seen = set()
+        for match in self.depends_on_re.findall(message):
+            if match in seen:
+                self.log.debug("Ignoring duplicate Depends-On: %s" %
+                               (match,))
+                continue
+            seen.add(match)
+            query = "change:%s" % (match,)
+            self.log.debug("Updating %s: Running query %s "
+                           "to find needed changes" %
+                           (change, query,))
+            records.extend(self.connection.simpleQuery(query))
+        return records
+
+    def _getNeededByFromCommit(self, change_id, change):
+        records = []
+        seen = set()
+        query = 'message:%s' % change_id
+        self.log.debug("Updating %s: Running query %s "
+                       "to find changes needed-by" %
+                       (change, query,))
+        results = self.connection.simpleQuery(query)
+        for result in results:
+            for match in self.depends_on_re.findall(
+                result['commitMessage']):
+                if match != change_id:
+                    continue
+                key = (result['number'], result['currentPatchSet']['number'])
+                if key in seen:
+                    continue
+                self.log.debug("Updating %s: Found change %s,%s "
+                               "needs %s from commit" %
+                               (change, key[0], key[1], change_id))
+                seen.add(key)
+                records.append(result)
+        return records
+
+    def _updateChange(self, change, history=None):
+        self.log.info("Updating %s" % (change,))
+        data = self.connection.query(change.number)
+        change._data = data
+
+        if change.patchset is None:
+            change.patchset = data['currentPatchSet']['number']
+
+        if 'project' not in data:
+            raise exceptions.ChangeNotFound(change.number, change.patchset)
+        # If updated changed came as a dependent on
+        # and its project is not defined,
+        # then create a 'foreign' project for it in layout
+        change.project = self.sched.getProject(data['project'],
+                                               create_foreign=bool(history))
+        change.branch = data['branch']
+        change.url = data['url']
+        max_ps = 0
+        files = []
+        for ps in data['patchSets']:
+            if ps['number'] == change.patchset:
+                change.refspec = ps['ref']
+                for f in ps.get('files', []):
+                    files.append(f['file'])
+            if int(ps['number']) > int(max_ps):
+                max_ps = ps['number']
+        if max_ps == change.patchset:
+            change.is_current_patchset = True
+        else:
+            change.is_current_patchset = False
+        change.files = files
+
+        change.is_merged = self._isMerged(change)
+        change.approvals = data['currentPatchSet'].get('approvals', [])
+        change.open = data['open']
+        change.status = data['status']
+        change.owner = data['owner']
+
+        if change.is_merged:
+            # This change is merged, so we don't need to look any further
+            # for dependencies.
+            self.log.debug("Updating %s: change is merged" % (change,))
+            return change
+
+        if history is None:
+            history = []
+        else:
+            history = history[:]
+        history.append(change.number)
+
+        needs_changes = []
+        if 'dependsOn' in data:
+            parts = data['dependsOn'][0]['ref'].split('/')
+            dep_num, dep_ps = parts[3], parts[4]
+            if dep_num in history:
+                raise Exception("Dependency cycle detected: %s in %s" % (
+                    dep_num, history))
+            self.log.debug("Updating %s: Getting git-dependent change %s,%s" %
+                           (change, dep_num, dep_ps))
+            dep = self._getChange(dep_num, dep_ps, history=history)
+            # Because we are not forcing a refresh in _getChange, it
+            # may return without executing this code, so if we are
+            # updating our change to add ourselves to a dependency
+            # cycle, we won't detect it.  By explicitly performing a
+            # walk of the dependency tree, we will.
+            detect_cycle(dep, history)
+            if (not dep.is_merged) and dep not in needs_changes:
+                needs_changes.append(dep)
+
+        for record in self._getDependsOnFromCommit(data['commitMessage'],
+                                                   change):
+            dep_num = record['number']
+            dep_ps = record['currentPatchSet']['number']
+            if dep_num in history:
+                raise Exception("Dependency cycle detected: %s in %s" % (
+                    dep_num, history))
+            self.log.debug("Updating %s: Getting commit-dependent "
+                           "change %s,%s" %
+                           (change, dep_num, dep_ps))
+            dep = self._getChange(dep_num, dep_ps, history=history)
+            # Because we are not forcing a refresh in _getChange, it
+            # may return without executing this code, so if we are
+            # updating our change to add ourselves to a dependency
+            # cycle, we won't detect it.  By explicitly performing a
+            # walk of the dependency tree, we will.
+            detect_cycle(dep, history)
+            if (not dep.is_merged) and dep not in needs_changes:
+                needs_changes.append(dep)
+        change.needs_changes = needs_changes
+
+        needed_by_changes = []
+        if 'neededBy' in data:
+            for needed in data['neededBy']:
+                parts = needed['ref'].split('/')
+                dep_num, dep_ps = parts[3], parts[4]
+                self.log.debug("Updating %s: Getting git-needed change %s,%s" %
+                               (change, dep_num, dep_ps))
+                dep = self._getChange(dep_num, dep_ps)
+                if (not dep.is_merged) and dep.is_current_patchset:
+                    needed_by_changes.append(dep)
+
+        for record in self._getNeededByFromCommit(data['id'], change):
+            dep_num = record['number']
+            dep_ps = record['currentPatchSet']['number']
+            self.log.debug("Updating %s: Getting commit-needed change %s,%s" %
+                           (change, dep_num, dep_ps))
+            # Because a commit needed-by may be a cross-repo
+            # dependency, cause that change to refresh so that it will
+            # reference the latest patchset of its Depends-On (this
+            # change).
+            dep = self._getChange(dep_num, dep_ps, refresh=True)
+            if (not dep.is_merged) and dep.is_current_patchset:
+                needed_by_changes.append(dep)
+        change.needed_by_changes = needed_by_changes
+
+        return change
+
+    def getGitUrl(self, project):
+        return self.connection.getGitUrl(project)
+
+    def _getGitwebUrl(self, project, sha=None):
+        return self.connection.getGitwebUrl(project, sha)
diff --git a/zuul/trigger/__init__.py b/zuul/trigger/__init__.py
index e69de29..16fb0b1 100644
--- a/zuul/trigger/__init__.py
+++ b/zuul/trigger/__init__.py
@@ -0,0 +1,46 @@
+# Copyright 2014 Rackspace Australia
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseTrigger(object):
+    """Base class for triggers.
+
+    Defines the exact public methods that must be supplied."""
+
+    def __init__(self, trigger_config={}, sched=None, connection=None):
+        self.trigger_config = trigger_config
+        self.sched = sched
+        self.connection = connection
+
+    def stop(self):
+        """Stop the trigger."""
+
+    @abc.abstractmethod
+    def getEventFilters(self, trigger_conf):
+        """Return a list of EventFilter's for the scheduler to match against.
+        """
+
+    def postConfig(self):
+        """Called after config is loaded."""
+
+    def onChangeMerged(self, change, source):
+        """Called when a change has been merged."""
+
+    def onChangeEnqueued(self, change, pipeline):
+        """Called when a change has been enqueued."""
diff --git a/zuul/trigger/gerrit.py b/zuul/trigger/gerrit.py
index 175e3f8..8a3fe42 100644
--- a/zuul/trigger/gerrit.py
+++ b/zuul/trigger/gerrit.py
@@ -13,505 +13,105 @@
 # under the License.
 
 import logging
-import re
-import threading
-import time
-import urllib2
-import voluptuous
-from zuul.lib import gerrit
-from zuul.model import TriggerEvent, Change, Ref, NullChange
+import voluptuous as v
+from zuul.model import EventFilter
+from zuul.trigger import BaseTrigger
 
 
-class GerritEventConnector(threading.Thread):
-    """Move events from Gerrit to the scheduler."""
-
-    log = logging.getLogger("zuul.GerritEventConnector")
-    delay = 5.0
-
-    def __init__(self, gerrit, sched, trigger):
-        super(GerritEventConnector, self).__init__()
-        self.daemon = True
-        self.gerrit = gerrit
-        self.sched = sched
-        self.trigger = trigger
-        self._stopped = False
-
-    def stop(self):
-        self._stopped = True
-        self.gerrit.addEvent((None, None))
-
-    def _handleEvent(self):
-        ts, data = self.gerrit.getEvent()
-        if self._stopped:
-            return
-        # Gerrit can produce inconsistent data immediately after an
-        # event, So ensure that we do not deliver the event to Zuul
-        # until at least a certain amount of time has passed.  Note
-        # that if we receive several events in succession, we will
-        # only need to delay for the first event.  In essence, Zuul
-        # should always be a constant number of seconds behind Gerrit.
-        now = time.time()
-        time.sleep(max((ts + self.delay) - now, 0.0))
-        event = TriggerEvent()
-        event.type = data.get('type')
-        event.trigger_name = self.trigger.name
-        change = data.get('change')
-        if change:
-            event.project_name = change.get('project')
-            event.branch = change.get('branch')
-            event.change_number = change.get('number')
-            event.change_url = change.get('url')
-            patchset = data.get('patchSet')
-            if patchset:
-                event.patch_number = patchset.get('number')
-                event.refspec = patchset.get('ref')
-            event.approvals = data.get('approvals', [])
-            event.comment = data.get('comment')
-        refupdate = data.get('refUpdate')
-        if refupdate:
-            event.project_name = refupdate.get('project')
-            event.ref = refupdate.get('refName')
-            event.oldrev = refupdate.get('oldRev')
-            event.newrev = refupdate.get('newRev')
-        # Map the event types to a field name holding a Gerrit
-        # account attribute. See Gerrit stream-event documentation
-        # in cmd-stream-events.html
-        accountfield_from_type = {
-            'patchset-created': 'uploader',
-            'draft-published': 'uploader',  # Gerrit 2.5/2.6
-            'change-abandoned': 'abandoner',
-            'change-restored': 'restorer',
-            'change-merged': 'submitter',
-            'merge-failed': 'submitter',  # Gerrit 2.5/2.6
-            'comment-added': 'author',
-            'ref-updated': 'submitter',
-            'reviewer-added': 'reviewer',  # Gerrit 2.5/2.6
-        }
-        try:
-            event.account = data.get(accountfield_from_type[event.type])
-        except KeyError:
-            self.log.error("Received unrecognized event type '%s' from Gerrit.\
-                    Can not get account information." % event.type)
-            event.account = None
-
-        if event.change_number:
-            # Call _getChange for the side effect of updating the
-            # cache.  Note that this modifies Change objects outside
-            # the main thread.
-            self.trigger._getChange(event.change_number,
-                                    event.patch_number,
-                                    refresh=True)
-
-        self.sched.addEvent(event)
-
-    def run(self):
-        while True:
-            if self._stopped:
-                return
-            try:
-                self._handleEvent()
-            except:
-                self.log.exception("Exception moving Gerrit event:")
-            finally:
-                self.gerrit.eventDone()
-
-
-class Gerrit(object):
+class GerritTrigger(BaseTrigger):
     name = 'gerrit'
-    log = logging.getLogger("zuul.Gerrit")
-    replication_timeout = 300
-    replication_retry_interval = 5
+    log = logging.getLogger("zuul.trigger.Gerrit")
 
-    depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
-                               re.MULTILINE | re.IGNORECASE)
+    def getEventFilters(self, trigger_conf):
+        def toList(item):
+            if not item:
+                return []
+            if isinstance(item, list):
+                return item
+            return [item]
 
-    def __init__(self, config, sched):
-        self._change_cache = {}
-        self.sched = sched
-        self.config = config
-        self.server = config.get('gerrit', 'server')
-        if config.has_option('gerrit', 'baseurl'):
-            self.baseurl = config.get('gerrit', 'baseurl')
-        else:
-            self.baseurl = 'https://%s' % self.server
-        user = config.get('gerrit', 'user')
-        if config.has_option('gerrit', 'sshkey'):
-            sshkey = config.get('gerrit', 'sshkey')
-        else:
-            sshkey = None
-        if config.has_option('gerrit', 'port'):
-            port = int(config.get('gerrit', 'port'))
-        else:
-            port = 29418
-        self.gerrit = gerrit.Gerrit(self.server, user, port, sshkey)
-        self.gerrit.startWatching()
-        self.gerrit_connector = GerritEventConnector(
-            self.gerrit, sched, self)
-        self.gerrit_connector.start()
+        efilters = []
+        for trigger in toList(trigger_conf):
+            approvals = {}
+            for approval_dict in toList(trigger.get('approval')):
+                for key, val in approval_dict.items():
+                    approvals[key] = val
+            # Backwards compat for *_filter versions of these args
+            comments = toList(trigger.get('comment'))
+            if not comments:
+                comments = toList(trigger.get('comment_filter'))
+            emails = toList(trigger.get('email'))
+            if not emails:
+                emails = toList(trigger.get('email_filter'))
+            usernames = toList(trigger.get('username'))
+            if not usernames:
+                usernames = toList(trigger.get('username_filter'))
+            ignore_deletes = trigger.get('ignore-deletes', True)
+            f = EventFilter(
+                trigger=self,
+                types=toList(trigger['event']),
+                branches=toList(trigger.get('branch')),
+                refs=toList(trigger.get('ref')),
+                event_approvals=approvals,
+                comments=comments,
+                emails=emails,
+                usernames=usernames,
+                required_approvals=(
+                    toList(trigger.get('require-approval'))
+                ),
+                reject_approvals=toList(
+                    trigger.get('reject-approval')
+                ),
+                ignore_deletes=ignore_deletes
+            )
+            efilters.append(f)
 
-    def stop(self):
-        self.gerrit_connector.stop()
-        self.gerrit_connector.join()
-
-    def _getInfoRefs(self, project):
-        url = "%s/p/%s/info/refs?service=git-upload-pack" % (
-            self.baseurl, project)
-        try:
-            data = urllib2.urlopen(url).read()
-        except:
-            self.log.error("Cannot get references from %s" % url)
-            raise  # keeps urllib2 error informations
-        ret = {}
-        read_headers = False
-        read_advertisement = False
-        if data[4] != '#':
-            raise Exception("Gerrit repository does not support "
-                            "git-upload-pack")
-        i = 0
-        while i < len(data):
-            if len(data) - i < 4:
-                raise Exception("Invalid length in info/refs")
-            plen = int(data[i:i + 4], 16)
-            i += 4
-            # It's the length of the packet, including the 4 bytes of the
-            # length itself, unless it's null, in which case the length is
-            # not included.
-            if plen > 0:
-                plen -= 4
-            if len(data) - i < plen:
-                raise Exception("Invalid data in info/refs")
-            line = data[i:i + plen]
-            i += plen
-            if not read_headers:
-                if plen == 0:
-                    read_headers = True
-                continue
-            if not read_advertisement:
-                read_advertisement = True
-                continue
-            if plen == 0:
-                # The terminating null
-                continue
-            line = line.strip()
-            revision, ref = line.split()
-            ret[ref] = revision
-        return ret
-
-    def getRefSha(self, project, ref):
-        refs = {}
-        try:
-            refs = self._getInfoRefs(project)
-        except:
-            self.log.exception("Exception looking for ref %s" %
-                               ref)
-        sha = refs.get(ref, '')
-        return sha
-
-    def waitForRefSha(self, project, ref, old_sha=''):
-        # Wait for the ref to show up in the repo
-        start = time.time()
-        while time.time() - start < self.replication_timeout:
-            sha = self.getRefSha(project.name, ref)
-            if old_sha != sha:
-                return True
-            time.sleep(self.replication_retry_interval)
-        return False
-
-    def isMerged(self, change, head=None):
-        self.log.debug("Checking if change %s is merged" % change)
-        if not change.number:
-            self.log.debug("Change has no number; considering it merged")
-            # Good question.  It's probably ref-updated, which, ah,
-            # means it's merged.
-            return True
-
-        data = self.gerrit.query(change.number)
-        change._data = data
-        change.is_merged = self._isMerged(change)
-        if not head:
-            return change.is_merged
-        if not change.is_merged:
-            return False
-
-        ref = 'refs/heads/' + change.branch
-        self.log.debug("Waiting for %s to appear in git repo" % (change))
-        if self.waitForRefSha(change.project, ref, change._ref_sha):
-            self.log.debug("Change %s is in the git repo" %
-                           (change))
-            return True
-        self.log.debug("Change %s did not appear in the git repo" %
-                       (change))
-        return False
-
-    def _isMerged(self, change):
-        data = change._data
-        if not data:
-            return False
-        status = data.get('status')
-        if not status:
-            return False
-        self.log.debug("Change %s status: %s" % (change, status))
-        if status == 'MERGED':
-            return True
-        return False
-
-    def canMerge(self, change, allow_needs):
-        if not change.number:
-            self.log.debug("Change has no number; considering it merged")
-            # Good question.  It's probably ref-updated, which, ah,
-            # means it's merged.
-            return True
-        data = change._data
-        if not data:
-            return False
-        if 'submitRecords' not in data:
-            return False
-        try:
-            for sr in data['submitRecords']:
-                if sr['status'] == 'OK':
-                    return True
-                elif sr['status'] == 'NOT_READY':
-                    for label in sr['labels']:
-                        if label['status'] in ['OK', 'MAY']:
-                            continue
-                        elif label['status'] in ['NEED', 'REJECT']:
-                            # It may be our own rejection, so we ignore
-                            if label['label'].lower() not in allow_needs:
-                                return False
-                            continue
-                        else:
-                            # IMPOSSIBLE
-                            return False
-                else:
-                    # CLOSED, RULE_ERROR
-                    return False
-        except:
-            self.log.exception("Exception determining whether change"
-                               "%s can merge:" % change)
-            return False
-        return True
-
-    def maintainCache(self, relevant):
-        # This lets the user supply a list of change objects that are
-        # still in use.  Anything in our cache that isn't in the supplied
-        # list should be safe to remove from the cache.
-        remove = []
-        for key, change in self._change_cache.items():
-            if change not in relevant:
-                remove.append(key)
-        for key in remove:
-            del self._change_cache[key]
-
-    def postConfig(self):
-        pass
-
-    def getChange(self, event, project):
-        if event.change_number:
-            change = self._getChange(event.change_number, event.patch_number)
-        elif event.ref:
-            change = Ref(project)
-            change.ref = event.ref
-            change.oldrev = event.oldrev
-            change.newrev = event.newrev
-            change.url = self.getGitwebUrl(project, sha=event.newrev)
-        else:
-            change = NullChange(project)
-        return change
-
-    def _getChange(self, number, patchset, refresh=False, history=None):
-        key = '%s,%s' % (number, patchset)
-        change = None
-        if key in self._change_cache:
-            change = self._change_cache.get(key)
-            if not refresh:
-                return change
-        if not change:
-            change = Change(None)
-            change.number = number
-            change.patchset = patchset
-        key = '%s,%s' % (change.number, change.patchset)
-        self._change_cache[key] = change
-        try:
-            self.updateChange(change, history)
-        except Exception:
-            del self._change_cache[key]
-            raise
-        return change
-
-    def getProjectOpenChanges(self, project):
-        # This is a best-effort function in case Gerrit is unable to return
-        # a particular change.  It happens.
-        query = "project:%s status:open" % (project.name,)
-        self.log.debug("Running query %s to get project open changes" %
-                       (query,))
-        data = self.gerrit.simpleQuery(query)
-        changes = []
-        for record in data:
-            try:
-                changes.append(
-                    self._getChange(record['number'],
-                                    record['currentPatchSet']['number']))
-            except Exception:
-                self.log.exception("Unable to query change %s" %
-                                   (record.get('number'),))
-        return changes
-
-    def _getDependsOnFromCommit(self, message):
-        records = []
-        seen = set()
-        for match in self.depends_on_re.findall(message):
-            if match in seen:
-                self.log.debug("Ignoring duplicate Depends-On: %s" %
-                               (match,))
-                continue
-            seen.add(match)
-            query = "change:%s" % (match,)
-            self.log.debug("Running query %s to find needed changes" %
-                           (query,))
-            records.extend(self.gerrit.simpleQuery(query))
-        return records
-
-    def _getNeededByFromCommit(self, change_id):
-        records = []
-        seen = set()
-        query = 'message:%s' % change_id
-        self.log.debug("Running query %s to find changes needed-by" %
-                       (query,))
-        results = self.gerrit.simpleQuery(query)
-        for result in results:
-            for match in self.depends_on_re.findall(
-                result['commitMessage']):
-                if match != change_id:
-                    continue
-                key = (result['number'], result['currentPatchSet']['number'])
-                if key in seen:
-                    continue
-                self.log.debug("Found change %s,%s needs %s from commit" %
-                               (key[0], key[1], change_id))
-                seen.add(key)
-                records.append(result)
-        return records
-
-    def updateChange(self, change, history=None):
-        self.log.info("Updating information for %s,%s" %
-                      (change.number, change.patchset))
-        data = self.gerrit.query(change.number)
-        change._data = data
-
-        if change.patchset is None:
-            change.patchset = data['currentPatchSet']['number']
-
-        if 'project' not in data:
-            raise Exception("Change %s,%s not found" % (change.number,
-                                                        change.patchset))
-        change.project = self.sched.getProject(data['project'])
-        change.branch = data['branch']
-        change.url = data['url']
-        max_ps = 0
-        files = []
-        for ps in data['patchSets']:
-            if ps['number'] == change.patchset:
-                change.refspec = ps['ref']
-                for f in ps.get('files', []):
-                    files.append(f['file'])
-            if int(ps['number']) > int(max_ps):
-                max_ps = ps['number']
-        if max_ps == change.patchset:
-            change.is_current_patchset = True
-        else:
-            change.is_current_patchset = False
-        change.files = files
-
-        change.is_merged = self._isMerged(change)
-        change.approvals = data['currentPatchSet'].get('approvals', [])
-        change.open = data['open']
-        change.status = data['status']
-        change.owner = data['owner']
-
-        if change.is_merged:
-            # This change is merged, so we don't need to look any further
-            # for dependencies.
-            return change
-
-        if history is None:
-            history = []
-        else:
-            history = history[:]
-        history.append(change.number)
-
-        needs_changes = []
-        if 'dependsOn' in data:
-            parts = data['dependsOn'][0]['ref'].split('/')
-            dep_num, dep_ps = parts[3], parts[4]
-            if dep_num in history:
-                raise Exception("Dependency cycle detected: %s in %s" % (
-                    dep_num, history))
-            self.log.debug("Getting git-dependent change %s,%s" %
-                           (dep_num, dep_ps))
-            dep = self._getChange(dep_num, dep_ps, history=history)
-            if (not dep.is_merged) and dep not in needs_changes:
-                needs_changes.append(dep)
-
-        for record in self._getDependsOnFromCommit(data['commitMessage']):
-            dep_num = record['number']
-            dep_ps = record['currentPatchSet']['number']
-            if dep_num in history:
-                raise Exception("Dependency cycle detected: %s in %s" % (
-                    dep_num, history))
-            self.log.debug("Getting commit-dependent change %s,%s" %
-                           (dep_num, dep_ps))
-            dep = self._getChange(dep_num, dep_ps, history=history)
-            if (not dep.is_merged) and dep not in needs_changes:
-                needs_changes.append(dep)
-        change.needs_changes = needs_changes
-
-        needed_by_changes = []
-        if 'neededBy' in data:
-            for needed in data['neededBy']:
-                parts = needed['ref'].split('/')
-                dep_num, dep_ps = parts[3], parts[4]
-                dep = self._getChange(dep_num, dep_ps)
-                if (not dep.is_merged) and dep.is_current_patchset:
-                    needed_by_changes.append(dep)
-
-        for record in self._getNeededByFromCommit(data['id']):
-            dep_num = record['number']
-            dep_ps = record['currentPatchSet']['number']
-            self.log.debug("Getting commit-needed change %s,%s" %
-                           (dep_num, dep_ps))
-            # Because a commit needed-by may be a cross-repo
-            # dependency, cause that change to refresh so that it will
-            # reference the latest patchset of its Depends-On (this
-            # change).
-            dep = self._getChange(dep_num, dep_ps, refresh=True)
-            if (not dep.is_merged) and dep.is_current_patchset:
-                needed_by_changes.append(dep)
-        change.needed_by_changes = needed_by_changes
-
-        return change
-
-    def getGitUrl(self, project):
-        server = self.config.get('gerrit', 'server')
-        user = self.config.get('gerrit', 'user')
-        if self.config.has_option('gerrit', 'port'):
-            port = int(self.config.get('gerrit', 'port'))
-        else:
-            port = 29418
-        url = 'ssh://%s@%s:%s/%s' % (user, server, port, project.name)
-        return url
-
-    def getGitwebUrl(self, project, sha=None):
-        url = '%s/gitweb?p=%s.git' % (self.baseurl, project)
-        if sha:
-            url += ';a=commitdiff;h=' + sha
-        return url
+        return efilters
 
 
-def validate_trigger(trigger_data):
+def validate_conf(trigger_conf):
     """Validates the layout's trigger data."""
     events_with_ref = ('ref-updated', )
-    for event in trigger_data['gerrit']:
+    for event in trigger_conf:
         if event['event'] not in events_with_ref and event.get('ref', False):
-            raise voluptuous.Invalid(
+            raise v.Invalid(
                 "The event %s does not include ref information, Zuul cannot "
                 "use ref filter 'ref: %s'" % (event['event'], event['ref']))
+
+
+def getSchema():
+    def toList(x):
+        return v.Any([x], x)
+    variable_dict = v.Schema({}, extra=True)
+
+    approval = v.Schema({'username': str,
+                         'email-filter': str,
+                         'email': str,
+                         'older-than': str,
+                         'newer-than': str,
+                         }, extra=True)
+
+    gerrit_trigger = {
+        v.Required('event'):
+            toList(v.Any('patchset-created',
+                         'draft-published',
+                         'change-abandoned',
+                         'change-restored',
+                         'change-merged',
+                         'comment-added',
+                         'ref-updated')),
+        'comment_filter': toList(str),
+        'comment': toList(str),
+        'email_filter': toList(str),
+        'email': toList(str),
+        'username_filter': toList(str),
+        'username': toList(str),
+        'branch': toList(str),
+        'ref': toList(str),
+        'ignore-deletes': bool,
+        'approval': toList(variable_dict),
+        'require-approval': toList(approval),
+        'reject-approval': toList(approval),
+    }
+
+    return gerrit_trigger
diff --git a/zuul/trigger/timer.py b/zuul/trigger/timer.py
index 3d5cd9b..f81312e 100644
--- a/zuul/trigger/timer.py
+++ b/zuul/trigger/timer.py
@@ -13,19 +13,21 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import apscheduler.scheduler
+from apscheduler.schedulers.background import BackgroundScheduler
+from apscheduler.triggers.cron import CronTrigger
 import logging
-from zuul.model import TriggerEvent
+import voluptuous as v
+from zuul.model import EventFilter, TriggerEvent
+from zuul.trigger import BaseTrigger
 
 
-class Timer(object):
+class TimerTrigger(BaseTrigger):
     name = 'timer'
     log = logging.getLogger("zuul.Timer")
 
-    def __init__(self, config, sched):
-        self.sched = sched
-        self.config = config
-        self.apsched = apscheduler.scheduler.Scheduler()
+    def __init__(self, trigger_config={}, sched=None, connection=None):
+        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
+        self.apsched = BackgroundScheduler()
         self.apsched.start()
 
     def _onTrigger(self, pipeline_name, timespec):
@@ -41,20 +43,27 @@
     def stop(self):
         self.apsched.shutdown()
 
-    def isMerged(self, change, head=None):
-        raise Exception("Timer trigger does not support checking if "
-                        "a change is merged.")
+    def getEventFilters(self, trigger_conf):
+        def toList(item):
+            if not item:
+                return []
+            if isinstance(item, list):
+                return item
+            return [item]
 
-    def canMerge(self, change, allow_needs):
-        raise Exception("Timer trigger does not support checking if "
-                        "a change can merge.")
+        efilters = []
+        for trigger in toList(trigger_conf):
+            f = EventFilter(trigger=self,
+                            types=['timer'],
+                            timespecs=toList(trigger['time']))
 
-    def maintainCache(self, relevant):
-        return
+            efilters.append(f)
+
+        return efilters
 
     def postConfig(self):
         for job in self.apsched.get_jobs():
-            self.apsched.unschedule_job(job)
+            job.remove()
         for pipeline in self.sched.layout.pipelines.values():
             for ef in pipeline.manager.event_filters:
                 if ef.trigger != self:
@@ -73,20 +82,13 @@
                         second = parts[5]
                     else:
                         second = None
-                    self.apsched.add_cron_job(self._onTrigger,
-                                              day=dom,
-                                              day_of_week=dow,
-                                              hour=hour,
-                                              minute=minute,
-                                              second=second,
-                                              args=(pipeline.name,
-                                                    timespec,))
+                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour,
+                                          minute=minute, second=second)
 
-    def getChange(self, event, project):
-        raise Exception("Timer trigger does not support changes.")
+                    self.apsched.add_job(self._onTrigger, trigger=trigger,
+                                         args=(pipeline.name, timespec,))
 
-    def getGitUrl(self, project):
-        raise Exception("Timer trigger does not support changes.")
 
-    def getGitwebUrl(self, project, sha=None):
-        raise Exception("Timer trigger does not support changes.")
+def getSchema():
+    timer_trigger = {v.Required('time'): str}
+    return timer_trigger
diff --git a/zuul/trigger/zuultrigger.py b/zuul/trigger/zuultrigger.py
index 4418d6f..00b21f2 100644
--- a/zuul/trigger/zuultrigger.py
+++ b/zuul/trigger/zuultrigger.py
@@ -14,38 +14,50 @@
 # under the License.
 
 import logging
-from zuul.model import TriggerEvent
+import voluptuous as v
+from zuul.model import EventFilter, TriggerEvent
+from zuul.trigger import BaseTrigger
 
 
-class ZuulTrigger(object):
+class ZuulTrigger(BaseTrigger):
     name = 'zuul'
     log = logging.getLogger("zuul.ZuulTrigger")
 
-    def __init__(self, config, sched):
-        self.sched = sched
-        self.config = config
+    def __init__(self, trigger_config={}, sched=None, connection=None):
+        super(ZuulTrigger, self).__init__(trigger_config, sched, connection)
         self._handle_parent_change_enqueued_events = False
         self._handle_project_change_merged_events = False
 
-    def stop(self):
-        pass
+    def getEventFilters(self, trigger_conf):
+        def toList(item):
+            if not item:
+                return []
+            if isinstance(item, list):
+                return item
+            return [item]
 
-    def isMerged(self, change, head=None):
-        raise Exception("Zuul trigger does not support checking if "
-                        "a change is merged.")
+        efilters = []
+        for trigger in toList(trigger_conf):
+            f = EventFilter(
+                trigger=self,
+                types=toList(trigger['event']),
+                pipelines=toList(trigger.get('pipeline')),
+                required_approvals=(
+                    toList(trigger.get('require-approval'))
+                ),
+                reject_approvals=toList(
+                    trigger.get('reject-approval')
+                ),
+            )
+            efilters.append(f)
 
-    def canMerge(self, change, allow_needs):
-        raise Exception("Zuul trigger does not support checking if "
-                        "a change can merge.")
+        return efilters
 
-    def maintainCache(self, relevant):
-        return
-
-    def onChangeMerged(self, change):
+    def onChangeMerged(self, change, source):
         # Called each time zuul merges a change
         if self._handle_project_change_merged_events:
             try:
-                self._createProjectChangeMergedEvents(change)
+                self._createProjectChangeMergedEvents(change, source)
             except Exception:
                 self.log.exception(
                     "Unable to create project-change-merged events for "
@@ -61,8 +73,8 @@
                     "Unable to create parent-change-enqueued events for "
                     "%s in %s" % (change, pipeline))
 
-    def _createProjectChangeMergedEvents(self, change):
-        changes = self.sched.triggers['gerrit'].getProjectOpenChanges(
+    def _createProjectChangeMergedEvents(self, change, source):
+        changes = source.getProjectOpenChanges(
             change.project)
         for open_change in changes:
             self._createProjectChangeMergedEvent(open_change)
@@ -112,11 +124,25 @@
                 elif 'project-change-merged' in ef._types:
                     self._handle_project_change_merged_events = True
 
-    def getChange(self, number, patchset, refresh=False):
-        raise Exception("Zuul trigger does not support changes.")
 
-    def getGitUrl(self, project):
-        raise Exception("Zuul trigger does not support changes.")
+def getSchema():
+    def toList(x):
+        return v.Any([x], x)
 
-    def getGitwebUrl(self, project, sha=None):
-        raise Exception("Zuul trigger does not support changes.")
+    approval = v.Schema({'username': str,
+                         'email-filter': str,
+                         'email': str,
+                         'older-than': str,
+                         'newer-than': str,
+                         }, extra=True)
+
+    zuul_trigger = {
+        v.Required('event'):
+        toList(v.Any('parent-change-enqueued',
+                     'project-change-merged')),
+        'pipeline': toList(str),
+        'require-approval': toList(approval),
+        'reject-approval': toList(approval),
+    }
+
+    return zuul_trigger
diff --git a/zuul/webapp.py b/zuul/webapp.py
index 44c333b..c1c848b 100644
--- a/zuul/webapp.py
+++ b/zuul/webapp.py
@@ -43,16 +43,19 @@
 class WebApp(threading.Thread):
     log = logging.getLogger("zuul.WebApp")
 
-    def __init__(self, scheduler, port=8001, cache_expiry=1):
+    def __init__(self, scheduler, port=8001, cache_expiry=1,
+                 listen_address='0.0.0.0'):
         threading.Thread.__init__(self)
         self.scheduler = scheduler
+        self.listen_address = listen_address
         self.port = port
         self.cache_expiry = cache_expiry
         self.cache_time = 0
         self.cache = None
         self.daemon = True
-        self.server = httpserver.serve(dec.wsgify(self.app), host='0.0.0.0',
-                                       port=self.port, start_loop=False)
+        self.server = httpserver.serve(
+            dec.wsgify(self.app), host=self.listen_address, port=self.port,
+            start_loop=False)
 
     def run(self):
         self.server.serve_forever()