Merge branch 'master' into feature/zuulv3

Change-Id: Id2789441d6b651c736c3d046a373eaea1921cf2d
diff --git a/.gitignore b/.gitignore
index b59cb77..f516785 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,6 @@
 AUTHORS
 build/*
 ChangeLog
-config
 doc/build/*
 zuul/versioninfo
 dist/
diff --git a/.gitreview b/.gitreview
index 665adb6..9ba1bdc 100644
--- a/.gitreview
+++ b/.gitreview
@@ -2,3 +2,4 @@
 host=review.openstack.org
 port=29418
 project=openstack-infra/zuul.git
+defaultbranch=feature/zuulv3
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 9e0d2c7..f8ae368 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -25,7 +25,11 @@
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'sphinxcontrib.blockdiag', 'sphinxcontrib.programoutput' ]
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinxcontrib.blockdiag',
+    'sphinxcontrib.programoutput'
+]
 #extensions = ['sphinx.ext.intersphinx']
 #intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 3c793da..8c12138 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -24,6 +24,7 @@
    launchers
    statsd
    client
+   internals
 
 Indices and tables
 ==================
diff --git a/doc/source/internals.rst b/doc/source/internals.rst
new file mode 100644
index 0000000..e98ab6e
--- /dev/null
+++ b/doc/source/internals.rst
@@ -0,0 +1,118 @@
+Zuul Internals
+==============
+
+While most people should not need to understand the details of Zuul's internal
+data model, understanding the data model is essential for people writing
+code for Zuul, and might be interesting to advanced users. The model is
+defined in `zuul/model.py`_.
+
+.. _zuul/model.py: http://git.openstack.org/cgit/openstack-infra/zuul/tree/zuul/model.py
+
+Data Model
+----------
+
+It all starts with the :py:class:`~zuul.model.Pipeline`. A Pipeline is the
+basic organizational structure that everything else hangs off.
+
+.. autoclass:: zuul.model.Pipeline
+
+Pipelines have a configured
+:py:class:`~zuul.manager.PipelineManager` which controlls how
+the :py:class:`Change <zuul.model.Changeish>` objects are enqueued and
+processed.
+
+There are currently two,
+:py:class:`~zuul.manager.dependent.DependentPipelineManager` and
+:py:class:`~zuul.manager.independent.IndependentPipelineManager`
+
+.. autoclass:: zuul.manager.PipelineManager
+.. autoclass:: zuul.manager.dependent.DependentPipelineManager
+.. autoclass:: zuul.manager.independent.IndependentPipelineManager
+
+A :py:class:`~zuul.model.Pipeline` has one or more
+:py:class:`~zuul.model.ChangeQueue` objects.
+
+.. autoclass:: zuul.model.ChangeQueue
+
+A :py:class:`~zuul.model.Job` represents the definition of what to do. A
+:py:class:`~zuul.model.Build` represents a single run of a
+:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobTree` is used to
+encapsulate the dependencies between one or more :py:class:`~zuul.model.Job`
+objects.
+
+.. autoclass:: zuul.model.Job
+.. autoclass:: zuul.model.JobTree
+.. autoclass:: zuul.model.Build
+
+The :py:class:`~zuul.manager.base.PipelineManager` enqueues each
+:py:class:`Change <zuul.model.Changeish>` into the
+:py:class:`~zuul.model.ChangeQueue` in a :py:class:`~zuul.model.QueueItem`.
+
+.. autoclass:: zuul.model.QueueItem
+
+As the Changes are processed, each :py:class:`~zuul.model.Build` is put into
+a :py:class:`~zuul.model.BuildSet`
+
+.. autoclass:: zuul.model.BuildSet
+
+Changes
+~~~~~~~
+
+.. autoclass:: zuul.model.Changeish
+.. autoclass:: zuul.model.Change
+.. autoclass:: zuul.model.Ref
+
+Filters
+~~~~~~~
+
+.. autoclass:: zuul.model.ChangeishFilter
+.. autoclass:: zuul.model.EventFilter
+
+
+Tenants
+~~~~~~~
+
+An abide is a collection of tenants.
+
+.. autoclass:: zuul.model.UnparsedAbideConfig
+.. autoclass:: zuul.model.UnparsedTenantConfig
+
+Other Global Objects
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: zuul.model.Project
+.. autoclass:: zuul.model.Layout
+.. autoclass:: zuul.model.RepoFiles
+.. autoclass:: zuul.model.Worker
+.. autoclass:: zuul.model.TriggerEvent
+
+
+Testing
+-------
+
+Zuul provides an extensive framework for performing functional testing
+on the system from end-to-end with major external components replaced
+by fakes for ease of use and speed.
+
+Test classes that subclass :py:class:`~tests.base.ZuulTestCase` have
+access to a number of attributes useful for manipulating or inspecting
+the environment being simulated in the test:
+
+.. autoclass:: tests.base.ZuulTestCase
+   :members:
+
+.. autoclass:: tests.base.FakeGerritConnection
+   :members:
+   :inherited-members:
+
+.. autoclass:: tests.base.FakeGearmanServer
+   :members:
+
+.. autoclass:: tests.base.RecordingLaunchServer
+   :members:
+
+.. autoclass:: tests.base.FakeBuild
+   :members:
+
+.. autoclass:: tests.base.BuildHistory
+   :members:
diff --git a/doc/source/launchers.rst b/doc/source/launchers.rst
index f368cb9..78d5839 100644
--- a/doc/source/launchers.rst
+++ b/doc/source/launchers.rst
@@ -362,24 +362,3 @@
 
 The original job is expected to complete with a WORK_DATA and
 WORK_FAIL packet as described in `Starting Builds`_.
-
-Build Descriptions
-^^^^^^^^^^^^^^^^^^
-
-In order to update the job running system with a description of the
-current state of all related builds, the job runner may optionally
-implement the following Gearman function:
-
-  set_description:MANAGER_NAME
-
-Where **MANAGER_NAME** is used as described in `Stopping Builds`_.
-The argument to the function is the following encoded in JSON format:
-
-**name**
-  The job name of the build to describe.
-
-**number**
-  The build number of the build to describe.
-
-**html_description**
-  The description of the build in HTML format.
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
index 2285ecb..102beac 100644
--- a/doc/source/zuul.rst
+++ b/doc/source/zuul.rst
@@ -52,11 +52,6 @@
   Port on which the Gearman server is listening.
   ``port=4730`` (optional)
 
-**check_job_registration**
-  Check to see if job is registered with Gearman or not. When True
-  a build result of NOT_REGISTERED will be return if job is not found.
-  ``check_job_registration=True``
-
 gearman_server
 """"""""""""""
 
diff --git a/requirements.txt b/requirements.txt
index 77ac0a5..ec0b76a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -15,3 +15,4 @@
 PrettyTable>=0.6,<0.8
 babel>=1.0
 six>=1.6.0
+ansible>=2.0.0.1
diff --git a/tests/base.py b/tests/base.py
index 38d2817..552bdd6 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -32,6 +32,7 @@
 import string
 import subprocess
 import swiftclient
+import tempfile
 import threading
 import time
 
@@ -47,11 +48,14 @@
 import zuul.scheduler
 import zuul.webapp
 import zuul.rpclistener
-import zuul.launcher.gearman
+import zuul.launcher.server
+import zuul.launcher.client
 import zuul.lib.swift
+import zuul.lib.connections
 import zuul.merger.client
 import zuul.merger.merger
 import zuul.merger.server
+import zuul.nodepool
 import zuul.reporter.gerrit
 import zuul.reporter.smtp
 import zuul.source.gerrit
@@ -99,12 +103,12 @@
 
 
 class FakeChange(object):
-    categories = {'APRV': ('Approved', -1, 1),
-                  'CRVW': ('Code-Review', -2, 2),
-                  'VRFY': ('Verified', -2, 2)}
+    categories = {'approved': ('Approved', -1, 1),
+                  'code-review': ('Code-Review', -2, 2),
+                  'verified': ('Verified', -2, 2)}
 
     def __init__(self, gerrit, number, project, branch, subject,
-                 status='NEW', upstream_root=None):
+                 status='NEW', upstream_root=None, files={}):
         self.gerrit = gerrit
         self.reported = 0
         self.queried = 0
@@ -138,11 +142,11 @@
             'url': 'https://hostname/%s' % number}
 
         self.upstream_root = upstream_root
-        self.addPatchset()
+        self.addPatchset(files=files)
         self.data['submitRecords'] = self.getSubmitRecords()
         self.open = status == 'NEW'
 
-    def add_fake_change_to_repo(self, msg, fn, large):
+    def addFakeChangeToRepo(self, msg, files, large):
         path = os.path.join(self.upstream_root, self.project)
         repo = git.Repo(path)
         ref = ChangeReference.create(repo, '1/%s/%s' % (self.number,
@@ -154,12 +158,11 @@
 
         path = os.path.join(self.upstream_root, self.project)
         if not large:
-            fn = os.path.join(path, fn)
-            f = open(fn, 'w')
-            f.write("test %s %s %s\n" %
-                    (self.branch, self.number, self.latest_patchset))
-            f.close()
-            repo.index.add([fn])
+            for fn, content in files.items():
+                fn = os.path.join(path, fn)
+                with open(fn, 'w') as f:
+                    f.write(content)
+                repo.index.add([fn])
         else:
             for fni in range(100):
                 fn = os.path.join(path, str(fni))
@@ -176,19 +179,20 @@
         repo.heads['master'].checkout()
         return r
 
-    def addPatchset(self, files=[], large=False):
+    def addPatchset(self, files=None, large=False):
         self.latest_patchset += 1
-        if files:
-            fn = files[0]
-        else:
+        if not files:
             fn = '%s-%s' % (self.branch.replace('/', '_'), self.number)
+            data = ("test %s %s %s\n" %
+                    (self.branch, self.number, self.latest_patchset))
+            files = {fn: data}
         msg = self.subject + '-' + str(self.latest_patchset)
-        c = self.add_fake_change_to_repo(msg, fn, large)
+        c = self.addFakeChangeToRepo(msg, files, large)
         ps_files = [{'file': '/COMMIT_MSG',
                      'type': 'ADDED'},
                     {'file': 'README',
                      'type': 'MODIFIED'}]
-        for f in files:
+        for f in files.keys():
             ps_files.append({'file': f, 'type': 'ADDED'})
         d = {'approvals': [],
              'createdOn': time.time(),
@@ -256,7 +260,7 @@
                             "url": "https://hostname/3"},
                  "patchSet": self.patchsets[patchset - 1],
                  "author": {"name": "User Name"},
-                 "approvals": [{"type": "Code-Review",
+                 "approvals": [{"type": "code-review",
                                 "description": "Code-Review",
                                 "value": "0"}],
                  "comment": "This is a comment"}
@@ -382,25 +386,34 @@
 
 
 class FakeGerritConnection(zuul.connection.gerrit.GerritConnection):
+    """A Fake Gerrit connection for use in tests.
+
+    This subclasses
+    :py:class:`~zuul.connection.gerrit.GerritConnection` to add the
+    ability for tests to add changes to the fake Gerrit it represents.
+    """
+
     log = logging.getLogger("zuul.test.FakeGerritConnection")
 
     def __init__(self, connection_name, connection_config,
-                 changes_db=None, queues_db=None, upstream_root=None):
+                 changes_db=None, upstream_root=None):
         super(FakeGerritConnection, self).__init__(connection_name,
                                                    connection_config)
 
-        self.event_queue = queues_db
+        self.event_queue = Queue.Queue()
         self.fixture_dir = os.path.join(FIXTURE_DIR, 'gerrit')
         self.change_number = 0
         self.changes = changes_db
         self.queries = []
         self.upstream_root = upstream_root
 
-    def addFakeChange(self, project, branch, subject, status='NEW'):
+    def addFakeChange(self, project, branch, subject, status='NEW',
+                      files=None):
+        """Add a change to the fake Gerrit."""
         self.change_number += 1
         c = FakeChange(self, self.change_number, project, branch, subject,
                        upstream_root=self.upstream_root,
-                       status=status)
+                       status=status, files=files)
         self.changes[self.change_number] = c
         return c
 
@@ -418,10 +431,11 @@
         # happens they can add their own verified event into the queue.
         # Nevertheless, we can update change with the new review in gerrit.
 
-        for cat in ['CRVW', 'VRFY', 'APRV']:
-            if cat in action:
+        for cat in action.keys():
+            if cat != 'submit':
                 change.addApproval(cat, action[cat], username=self.user)
 
+        # TODOv3(jeblair): can this be removed?
         if 'label' in action:
             parts = action['label'].split('=')
             change.addApproval(parts[0], parts[2], username=self.user)
@@ -469,8 +483,8 @@
         self.__dict__.update(kw)
 
     def __repr__(self):
-        return ("<Completed build, result: %s name: %s #%s changes: %s>" %
-                (self.result, self.name, self.number, self.changes))
+        return ("<Completed build, result: %s name: %s uuid: %s changes: %s>" %
+                (self.result, self.name, self.uuid, self.changes))
 
 
 class FakeURLOpener(object):
@@ -524,27 +538,36 @@
         os.write(self.wake_write, '1\n')
 
 
-class FakeBuild(threading.Thread):
+class FakeBuild(object):
     log = logging.getLogger("zuul.test")
 
-    def __init__(self, worker, job, number, node):
-        threading.Thread.__init__(self)
+    def __init__(self, launch_server, job, node):
         self.daemon = True
-        self.worker = worker
+        self.launch_server = launch_server
         self.job = job
-        self.name = job.name.split(':')[1]
-        self.number = number
+        self.jobdir = None
+        self.uuid = job.unique
         self.node = node
         self.parameters = json.loads(job.arguments)
         self.unique = self.parameters['ZUUL_UUID']
+        self.name = self.parameters['job']
         self.wait_condition = threading.Condition()
         self.waiting = False
         self.aborted = False
         self.created = time.time()
-        self.description = ''
         self.run_error = False
+        self.changes = None
+        if 'ZUUL_CHANGE_IDS' in self.parameters:
+            self.changes = self.parameters['ZUUL_CHANGE_IDS']
+
+    def __repr__(self):
+        waiting = ''
+        if self.waiting:
+            waiting = ' [waiting]'
+        return '<FakeBuild %s %s%s>' % (self.name, self.changes, waiting)
 
     def release(self):
+        """Release this build."""
         self.wait_condition.acquire()
         self.wait_condition.notify()
         self.waiting = False
@@ -552,6 +575,12 @@
         self.wait_condition.release()
 
     def isWaiting(self):
+        """Return whether this build is being held.
+
+        :returns: Whether the build is being held.
+        :rtype: bool
+        """
+
         self.wait_condition.acquire()
         if self.waiting:
             ret = True
@@ -568,183 +597,167 @@
         self.wait_condition.release()
 
     def run(self):
-        data = {
-            'url': 'https://server/job/%s/%s/' % (self.name, self.number),
-            'name': self.name,
-            'number': self.number,
-            'manager': self.worker.worker_id,
-            'worker_name': 'My Worker',
-            'worker_hostname': 'localhost',
-            'worker_ips': ['127.0.0.1', '192.168.1.1'],
-            'worker_fqdn': 'zuul.example.org',
-            'worker_program': 'FakeBuilder',
-            'worker_version': 'v1.1',
-            'worker_extra': {'something': 'else'}
-        }
-
         self.log.debug('Running build %s' % self.unique)
 
-        self.job.sendWorkData(json.dumps(data))
-        self.log.debug('Sent WorkData packet with %s' % json.dumps(data))
-        self.job.sendWorkStatus(0, 100)
-
-        if self.worker.hold_jobs_in_build:
+        if self.launch_server.hold_jobs_in_build:
             self.log.debug('Holding build %s' % self.unique)
             self._wait()
         self.log.debug("Build %s continuing" % self.unique)
 
-        self.worker.lock.acquire()
-
         result = 'SUCCESS'
-        if (('ZUUL_REF' in self.parameters) and
-            self.worker.shouldFailTest(self.name,
-                                       self.parameters['ZUUL_REF'])):
+        if (('ZUUL_REF' in self.parameters) and self.shouldFail()):
             result = 'FAILURE'
         if self.aborted:
             result = 'ABORTED'
 
         if self.run_error:
-            work_fail = True
             result = 'RUN_ERROR'
-        else:
-            data['result'] = result
-            data['node_labels'] = ['bare-necessities']
-            data['node_name'] = 'foo'
-            work_fail = False
 
-        changes = None
-        if 'ZUUL_CHANGE_IDS' in self.parameters:
-            changes = self.parameters['ZUUL_CHANGE_IDS']
+        return result
 
-        self.worker.build_history.append(
-            BuildHistory(name=self.name, number=self.number,
-                         result=result, changes=changes, node=self.node,
-                         uuid=self.unique, description=self.description,
-                         parameters=self.parameters,
-                         pipeline=self.parameters['ZUUL_PIPELINE'])
-        )
+    def shouldFail(self):
+        changes = self.launch_server.fail_tests.get(self.name, [])
+        for change in changes:
+            if self.hasChanges(change):
+                return True
+        return False
 
-        self.job.sendWorkData(json.dumps(data))
-        if work_fail:
-            self.job.sendWorkFail()
-        else:
-            self.job.sendWorkComplete(json.dumps(data))
-        del self.worker.gearman_jobs[self.job.unique]
-        self.worker.running_builds.remove(self)
-        self.worker.lock.release()
+    def hasChanges(self, *changes):
+        """Return whether this build has certain changes in its git repos.
+
+        :arg FakeChange changes: One or more changes (varargs) that
+        are expected to be present (in order) in the git repository of
+        the active project.
+
+        :returns: Whether the build has the indicated changes.
+        :rtype: bool
+
+        """
+        project = self.parameters['ZUUL_PROJECT']
+        path = os.path.join(self.jobdir.git_root, project)
+        repo = git.Repo(path)
+        ref = self.parameters['ZUUL_REF']
+        repo_messages = [c.message.strip() for c in repo.iter_commits(ref)]
+        commit_messages = ['%s-1' % change.subject for change in changes]
+        self.log.debug("Checking if build %s has changes; commit_messages %s;"
+                       " repo_messages %s" % (self, commit_messages,
+                                              repo_messages))
+        for msg in commit_messages:
+            if msg not in repo_messages:
+                self.log.debug("  messages do not match")
+                return False
+        self.log.debug("  OK")
+        return True
 
 
-class FakeWorker(gear.Worker):
-    def __init__(self, worker_id, test):
-        super(FakeWorker, self).__init__(worker_id)
-        self.gearman_jobs = {}
-        self.build_history = []
-        self.running_builds = []
-        self.build_counter = 0
-        self.fail_tests = {}
-        self.test = test
+class RecordingLaunchServer(zuul.launcher.server.LaunchServer):
+    """An Ansible launcher to be used in tests.
 
+    :ivar bool hold_jobs_in_build: If true, when jobs are launched
+        they will report that they have started but then pause until
+        released before reporting completion.  This attribute may be
+        changed at any time and will take effect for subsequently
+        launched builds, but previously held builds will still need to
+        be explicitly released.
+
+    """
+    def __init__(self, *args, **kw):
+        self._run_ansible = kw.pop('_run_ansible', False)
+        super(RecordingLaunchServer, self).__init__(*args, **kw)
         self.hold_jobs_in_build = False
         self.lock = threading.Lock()
-        self.__work_thread = threading.Thread(target=self.work)
-        self.__work_thread.daemon = True
-        self.__work_thread.start()
+        self.running_builds = []
+        self.build_history = []
+        self.fail_tests = {}
+        self.job_builds = {}
 
-    def handleJob(self, job):
-        parts = job.name.split(":")
-        cmd = parts[0]
-        name = parts[1]
-        if len(parts) > 2:
-            node = parts[2]
-        else:
-            node = None
-        if cmd == 'build':
-            self.handleBuild(job, name, node)
-        elif cmd == 'stop':
-            self.handleStop(job, name)
-        elif cmd == 'set_description':
-            self.handleSetDescription(job, name)
+    def failJob(self, name, change):
+        """Instruct the launcher to report matching builds as failures.
 
-    def handleBuild(self, job, name, node):
-        build = FakeBuild(self, job, self.build_counter, node)
-        job.build = build
-        self.gearman_jobs[job.unique] = job
-        self.build_counter += 1
+        :arg str name: The name of the job to fail.
+        :arg Change change: The :py:class:`~tests.base.FakeChange`
+            instance which should cause the job to fail.  This job
+            will also fail for changes depending on this change.
 
-        self.running_builds.append(build)
-        build.start()
-
-    def handleStop(self, job, name):
-        self.log.debug("handle stop")
-        parameters = json.loads(job.arguments)
-        name = parameters['name']
-        number = parameters['number']
-        for build in self.running_builds:
-            if build.name == name and build.number == number:
-                build.aborted = True
-                build.release()
-                job.sendWorkComplete()
-                return
-        job.sendWorkFail()
-
-    def handleSetDescription(self, job, name):
-        self.log.debug("handle set description")
-        parameters = json.loads(job.arguments)
-        name = parameters['name']
-        number = parameters['number']
-        descr = parameters['html_description']
-        for build in self.running_builds:
-            if build.name == name and build.number == number:
-                build.description = descr
-                job.sendWorkComplete()
-                return
-        for build in self.build_history:
-            if build.name == name and build.number == number:
-                build.description = descr
-                job.sendWorkComplete()
-                return
-        job.sendWorkFail()
-
-    def work(self):
-        while self.running:
-            try:
-                job = self.getJob()
-            except gear.InterruptedError:
-                continue
-            try:
-                self.handleJob(job)
-            except:
-                self.log.exception("Worker exception:")
-
-    def addFailTest(self, name, change):
+        """
         l = self.fail_tests.get(name, [])
         l.append(change)
         self.fail_tests[name] = l
 
-    def shouldFailTest(self, name, ref):
-        l = self.fail_tests.get(name, [])
-        for change in l:
-            if self.test.ref_has_change(ref, change):
-                return True
-        return False
-
     def release(self, regex=None):
+        """Release a held build.
+
+        :arg str regex: A regular expression which, if supplied, will
+            cause only builds with matching names to be released.  If
+            not supplied, all builds will be released.
+
+        """
         builds = self.running_builds[:]
-        self.log.debug("releasing build %s (%s)" % (regex,
+        self.log.debug("Releasing build %s (%s)" % (regex,
                                                     len(self.running_builds)))
         for build in builds:
             if not regex or re.match(regex, build.name):
-                self.log.debug("releasing build %s" %
+                self.log.debug("Releasing build %s" %
                                (build.parameters['ZUUL_UUID']))
                 build.release()
             else:
-                self.log.debug("not releasing build %s" %
+                self.log.debug("Not releasing build %s" %
                                (build.parameters['ZUUL_UUID']))
-        self.log.debug("done releasing builds %s (%s)" %
+        self.log.debug("Done releasing builds %s (%s)" %
                        (regex, len(self.running_builds)))
 
+    def launchJob(self, job):
+        node = None
+        build = FakeBuild(self, job, node)
+        job.build = build
+        self.running_builds.append(build)
+        self.job_builds[job.unique] = build
+        super(RecordingLaunchServer, self).launchJob(job)
+
+    def stopJob(self, job):
+        self.log.debug("handle stop")
+        parameters = json.loads(job.arguments)
+        uuid = parameters['uuid']
+        for build in self.running_builds:
+            if build.unique == uuid:
+                build.aborted = True
+                build.release()
+        super(RecordingLaunchServer, self).stopJob(job)
+
+    def runAnsible(self, jobdir, job):
+        build = self.job_builds[job.unique]
+        build.jobdir = jobdir
+
+        if self._run_ansible:
+            result = super(RecordingLaunchServer, self).runAnsible(jobdir, job)
+        else:
+            result = build.run()
+
+        self.lock.acquire()
+        self.build_history.append(
+            BuildHistory(name=build.name, result=result, changes=build.changes,
+                         node=build.node, uuid=build.unique,
+                         parameters=build.parameters,
+                         pipeline=build.parameters['ZUUL_PIPELINE'])
+        )
+        self.running_builds.remove(build)
+        del self.job_builds[job.unique]
+        self.lock.release()
+        return result
+
 
 class FakeGearmanServer(gear.Server):
+    """A Gearman server for use in tests.
+
+    :ivar bool hold_jobs_in_queue: If true, submitted jobs will be
+        added to the queue but will not be distributed to workers
+        until released.  This attribute may be changed at any time and
+        will take effect for subsequently enqueued jobs, but
+        previously held jobs will still need to be explicitly
+        released.
+
+    """
+
     def __init__(self):
         self.hold_jobs_in_queue = False
         super(FakeGearmanServer, self).__init__(0)
@@ -769,6 +782,12 @@
         return None
 
     def release(self, regex=None):
+        """Release a held job.
+
+        :arg str regex: A regular expression which, if supplied, will
+            cause only jobs with matching names to be released.  If
+            not supplied, all jobs will be released.
+        """
         released = False
         qlen = (len(self.high_queue) + len(self.normal_queue) +
                 len(self.low_queue))
@@ -886,6 +905,59 @@
 
 
 class ZuulTestCase(BaseTestCase):
+    """A test case with a functioning Zuul.
+
+    The following class variables are used during test setup and can
+    be overidden by subclasses but are effectively read-only once a
+    test method starts running:
+
+    :cvar str config_file: This points to the main zuul config file
+        within the fixtures directory.  Subclasses may override this
+        to obtain a different behavior.
+
+    :cvar str tenant_config_file: This is the tenant config file
+        (which specifies from what git repos the configuration should
+        be loaded).  It defaults to the value specified in
+        `config_file` but can be overidden by subclasses to obtain a
+        different tenant/project layout while using the standard main
+        configuration.
+
+    The following are instance variables that are useful within test
+    methods:
+
+    :ivar FakeGerritConnection fake_<connection>:
+        A :py:class:`~tests.base.FakeGerritConnection` will be
+        instantiated for each connection present in the config file
+        and stored here.  For instance, `fake_gerrit` will hold the
+        FakeGerritConnection object for a connection named `gerrit`.
+
+    :ivar FakeGearmanServer gearman_server: An instance of
+        :py:class:`~tests.base.FakeGearmanServer` which is the Gearman
+        server that all of the Zuul components in this test use to
+        communicate with each other.
+
+    :ivar RecordingLaunchServer launch_server: An instance of
+        :py:class:`~tests.base.RecordingLaunchServer` which is the
+        Ansible launch server used to run jobs for this test.
+
+    :ivar list builds: A list of :py:class:`~tests.base.FakeBuild` objects
+        representing currently running builds.  They are appended to
+        the list in the order they are launched, and removed from this
+        list upon completion.
+
+    :ivar list history: A list of :py:class:`~tests.base.BuildHistory`
+        objects representing completed builds.  They are appended to
+        the list in the order they complete.
+
+    """
+
+    config_file = 'zuul.conf'
+    run_ansible = False
+
+    def _startMerger(self):
+        self.merge_server = zuul.merger.server.MergeServer(self.config,
+                                                           self.connections)
+        self.merge_server.start()
 
     def setUp(self):
         super(ZuulTestCase, self).setUp()
@@ -908,16 +980,15 @@
 
         # Make per test copy of Configuration.
         self.setup_config()
-        self.config.set('zuul', 'layout_config',
+        self.config.set('zuul', 'tenant_config',
                         os.path.join(FIXTURE_DIR,
-                                     self.config.get('zuul', 'layout_config')))
+                                     self.config.get('zuul', 'tenant_config')))
         self.config.set('merger', 'git_dir', self.git_root)
         self.config.set('zuul', 'state_dir', self.state_root)
 
         # For each project in config:
-        self.init_repo("org/project")
-        self.init_repo("org/project1")
-        self.init_repo("org/project2")
+        # TODOv3(jeblair): remove these and replace with new git
+        # filesystem fixtures
         self.init_repo("org/project3")
         self.init_repo("org/project4")
         self.init_repo("org/project5")
@@ -946,10 +1017,6 @@
 
         self.config.set('gearman', 'port', str(self.gearman_server.port))
 
-        self.worker = FakeWorker('fake_worker', self)
-        self.worker.addServer('127.0.0.1', self.gearman_server.port)
-        self.gearman_server.worker = self.worker
-
         zuul.source.gerrit.GerritSource.replication_timeout = 1.5
         zuul.source.gerrit.GerritSource.replication_retry_interval = 0.5
         zuul.connection.gerrit.GerritEventConnector.delay = 0.0
@@ -976,17 +1043,23 @@
         old_urlopen = urllib.request.urlopen
         urllib.request.urlopen = URLOpenerFactory
 
-        self.merge_server = zuul.merger.server.MergeServer(self.config,
-                                                           self.connections)
-        self.merge_server.start()
+        self._startMerger()
 
-        self.launcher = zuul.launcher.gearman.Gearman(self.config, self.sched,
-                                                      self.swift)
+        self.launch_server = RecordingLaunchServer(
+            self.config, self.connections, _run_ansible=self.run_ansible)
+        self.launch_server.start()
+        self.history = self.launch_server.build_history
+        self.builds = self.launch_server.running_builds
+
+        self.launch_client = zuul.launcher.client.LaunchClient(
+            self.config, self.sched, self.swift)
         self.merge_client = zuul.merger.client.MergeClient(
             self.config, self.sched)
+        self.nodepool = zuul.nodepool.Nodepool(self.sched)
 
-        self.sched.setLauncher(self.launcher)
+        self.sched.setLauncher(self.launch_client)
         self.sched.setMerger(self.merge_client)
+        self.sched.setNodepool(self.nodepool)
 
         self.webapp = zuul.webapp.WebApp(
             self.sched, port=0, listen_address='127.0.0.1')
@@ -997,10 +1070,7 @@
         self.sched.resume()
         self.webapp.start()
         self.rpc.start()
-        self.launcher.gearman.waitForServer()
-        self.registerJobs()
-        self.builds = self.worker.running_builds
-        self.history = self.worker.build_history
+        self.launch_client.gearman.waitForServer()
 
         self.addCleanup(self.assertFinalState)
         self.addCleanup(self.shutdown)
@@ -1018,8 +1088,7 @@
         # Set a changes database so multiple FakeGerrit's can report back to
         # a virtual canonical database given by the configured hostname
         self.gerrit_changes_dbs = {}
-        self.gerrit_queues_dbs = {}
-        self.connections = {}
+        self.connections = zuul.lib.connections.ConnectionRegistry()
 
         for section_name in self.config.sections():
             con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
@@ -1039,20 +1108,17 @@
             if con_driver == 'gerrit':
                 if con_config['server'] not in self.gerrit_changes_dbs.keys():
                     self.gerrit_changes_dbs[con_config['server']] = {}
-                if con_config['server'] not in self.gerrit_queues_dbs.keys():
-                    self.gerrit_queues_dbs[con_config['server']] = \
-                        Queue.Queue()
-                    self.event_queues.append(
-                        self.gerrit_queues_dbs[con_config['server']])
-                self.connections[con_name] = FakeGerritConnection(
+                self.connections.connections[con_name] = FakeGerritConnection(
                     con_name, con_config,
                     changes_db=self.gerrit_changes_dbs[con_config['server']],
-                    queues_db=self.gerrit_queues_dbs[con_config['server']],
                     upstream_root=self.upstream_root
                 )
-                setattr(self, 'fake_' + con_name, self.connections[con_name])
+                self.event_queues.append(
+                    self.connections.connections[con_name].event_queue)
+                setattr(self, 'fake_' + con_name,
+                        self.connections.connections[con_name])
             elif con_driver == 'smtp':
-                self.connections[con_name] = \
+                self.connections.connections[con_name] = \
                     zuul.connection.smtp.SMTPConnection(con_name, con_config)
             else:
                 raise Exception("Unknown driver, %s, for connection %s"
@@ -1063,22 +1129,50 @@
 
         if 'gerrit' in self.config.sections():
             self.gerrit_changes_dbs['gerrit'] = {}
-            self.gerrit_queues_dbs['gerrit'] = Queue.Queue()
-            self.event_queues.append(self.gerrit_queues_dbs['gerrit'])
-            self.connections['gerrit'] = FakeGerritConnection(
+            self.event_queues.append(
+                self.connections.connections[con_name].event_queue)
+            self.connections.connections['gerrit'] = FakeGerritConnection(
                 '_legacy_gerrit', dict(self.config.items('gerrit')),
-                changes_db=self.gerrit_changes_dbs['gerrit'],
-                queues_db=self.gerrit_queues_dbs['gerrit'])
+                changes_db=self.gerrit_changes_dbs['gerrit'])
 
         if 'smtp' in self.config.sections():
-            self.connections['smtp'] = \
+            self.connections.connections['smtp'] = \
                 zuul.connection.smtp.SMTPConnection(
                     '_legacy_smtp', dict(self.config.items('smtp')))
 
-    def setup_config(self, config_file='zuul.conf'):
-        """Per test config object. Override to set different config."""
+    def setup_config(self):
+        # This creates the per-test configuration object.  It can be
+        # overriden by subclasses, but should not need to be since it
+        # obeys the config_file and tenant_config_file attributes.
         self.config = ConfigParser.ConfigParser()
-        self.config.read(os.path.join(FIXTURE_DIR, config_file))
+        self.config.read(os.path.join(FIXTURE_DIR, self.config_file))
+        if hasattr(self, 'tenant_config_file'):
+            self.config.set('zuul', 'tenant_config', self.tenant_config_file)
+            git_path = os.path.join(
+                os.path.dirname(
+                    os.path.join(FIXTURE_DIR, self.tenant_config_file)),
+                'git')
+            if os.path.exists(git_path):
+                for reponame in os.listdir(git_path):
+                    project = reponame.replace('_', '/')
+                    self.copyDirToRepo(project,
+                                       os.path.join(git_path, reponame))
+
+    def copyDirToRepo(self, project, source_path):
+        self.init_repo(project)
+
+        files = {}
+        for (dirpath, dirnames, filenames) in os.walk(source_path):
+            for filename in filenames:
+                test_tree_filepath = os.path.join(dirpath, filename)
+                common_path = os.path.commonprefix([test_tree_filepath,
+                                                    source_path])
+                relative_filepath = test_tree_filepath[len(common_path) + 1:]
+                with open(test_tree_filepath, 'r') as f:
+                    content = f.read()
+                files[relative_filepath] = content
+        self.addCommitToRepo(project, 'add content from fixture',
+                             files, branch='master', tag='init')
 
     def assertFinalState(self):
         # Make sure that git.Repo objects have been garbage collected.
@@ -1089,18 +1183,19 @@
                 repos.append(obj)
         self.assertEqual(len(repos), 0)
         self.assertEmptyQueues()
-        for pipeline in self.sched.layout.pipelines.values():
-            if isinstance(pipeline.manager,
-                          zuul.scheduler.IndependentPipelineManager):
-                self.assertEqual(len(pipeline.queues), 0)
+        ipm = zuul.manager.independent.IndependentPipelineManager
+        for tenant in self.sched.abide.tenants.values():
+            for pipeline in tenant.layout.pipelines.values():
+                if isinstance(pipeline.manager, ipm):
+                    self.assertEqual(len(pipeline.queues), 0)
 
     def shutdown(self):
         self.log.debug("Shutting down after tests")
-        self.launcher.stop()
+        self.launch_client.stop()
         self.merge_server.stop()
         self.merge_server.join()
         self.merge_client.stop()
-        self.worker.shutdown()
+        self.launch_server.stop()
         self.sched.stop()
         self.sched.join()
         self.statsd.stop()
@@ -1122,25 +1217,17 @@
         path = os.path.join(self.upstream_root, project)
         repo = git.Repo.init(path)
 
-        repo.config_writer().set_value('user', 'email', 'user@example.com')
-        repo.config_writer().set_value('user', 'name', 'User Name')
-        repo.config_writer().write()
+        with repo.config_writer() as config_writer:
+            config_writer.set_value('user', 'email', 'user@example.com')
+            config_writer.set_value('user', 'name', 'User Name')
 
-        fn = os.path.join(path, 'README')
-        f = open(fn, 'w')
-        f.write("test\n")
-        f.close()
-        repo.index.add([fn])
         repo.index.commit('initial commit')
         master = repo.create_head('master')
-        repo.create_tag('init')
 
         repo.head.reference = master
         zuul.merger.merger.reset_repo_to_head(repo)
         repo.git.clean('-x', '-f', '-d')
 
-        self.create_branch(project, 'mp')
-
     def create_branch(self, project, branch):
         path = os.path.join(self.upstream_root, project)
         repo = git.Repo.init(path)
@@ -1170,6 +1257,8 @@
         return commit.hexsha
 
     def ref_has_change(self, ref, change):
+        # TODOv3(jeblair): this should probably be removed in favor of
+        # build.hasChanges
         path = os.path.join(self.git_root, change.project)
         repo = git.Repo(path)
         try:
@@ -1180,45 +1269,6 @@
             pass
         return False
 
-    def job_has_changes(self, *args):
-        job = args[0]
-        commits = args[1:]
-        if isinstance(job, FakeBuild):
-            parameters = job.parameters
-        else:
-            parameters = json.loads(job.arguments)
-        project = parameters['ZUUL_PROJECT']
-        path = os.path.join(self.git_root, project)
-        repo = git.Repo(path)
-        ref = parameters['ZUUL_REF']
-        sha = parameters['ZUUL_COMMIT']
-        repo_messages = [c.message.strip() for c in repo.iter_commits(ref)]
-        repo_shas = [c.hexsha for c in repo.iter_commits(ref)]
-        commit_messages = ['%s-1' % commit.subject for commit in commits]
-        self.log.debug("Checking if job %s has changes; commit_messages %s;"
-                       " repo_messages %s; sha %s" % (job, commit_messages,
-                                                      repo_messages, sha))
-        for msg in commit_messages:
-            if msg not in repo_messages:
-                self.log.debug("  messages do not match")
-                return False
-        if repo_shas[0] != sha:
-            self.log.debug("  sha does not match")
-            return False
-        self.log.debug("  OK")
-        return True
-
-    def registerJobs(self):
-        count = 0
-        for job in self.sched.layout.jobs.keys():
-            self.worker.registerFunction('build:' + job)
-            count += 1
-        self.worker.registerFunction('stop:' + self.worker.worker_id)
-        count += 1
-
-        while len(self.gearman_server.functions) < count:
-            time.sleep(0)
-
     def orderedRelease(self):
         # Run one build at a time to ensure non-race order:
         while len(self.builds):
@@ -1241,7 +1291,7 @@
             return parameters[name]
 
     def resetGearmanServer(self):
-        self.worker.setFunctions([])
+        self.launch_server.worker.setFunctions([])
         while True:
             done = True
             for connection in self.gearman_server.active_connections:
@@ -1254,33 +1304,32 @@
             time.sleep(0)
         self.gearman_server.functions = set()
         self.rpc.register()
-        self.merge_server.register()
 
     def haveAllBuildsReported(self):
         # See if Zuul is waiting on a meta job to complete
-        if self.launcher.meta_jobs:
+        if self.launch_client.meta_jobs:
             return False
         # Find out if every build that the worker has completed has been
         # reported back to Zuul.  If it hasn't then that means a Gearman
         # event is still in transit and the system is not stable.
-        for build in self.worker.build_history:
-            zbuild = self.launcher.builds.get(build.uuid)
+        for build in self.history:
+            zbuild = self.launch_client.builds.get(build.uuid)
             if not zbuild:
                 # It has already been reported
                 continue
             # It hasn't been reported yet.
             return False
         # Make sure that none of the worker connections are in GRAB_WAIT
-        for connection in self.worker.active_connections:
+        for connection in self.launch_server.worker.active_connections:
             if connection.state == 'GRAB_WAIT':
                 return False
         return True
 
     def areAllBuildsWaiting(self):
-        builds = self.launcher.builds.values()
+        builds = self.launch_client.builds.values()
         for build in builds:
             client_job = None
-            for conn in self.launcher.gearman.active_connections:
+            for conn in self.launch_client.gearman.active_connections:
                 for j in conn.related_jobs.values():
                     if j.unique == build.uuid:
                         client_job = j
@@ -1302,15 +1351,15 @@
                 return False
             if server_job.waiting:
                 continue
-            worker_job = self.worker.gearman_jobs.get(server_job.unique)
-            if worker_job:
-                if build.number is None:
-                    self.log.debug("%s has not reported start" % worker_job)
-                    return False
-                if worker_job.build.isWaiting():
+            if build.url is None:
+                self.log.debug("%s has not reported start" % build)
+                return False
+            worker_build = self.launch_server.job_builds.get(server_job.unique)
+            if worker_build:
+                if worker_build.isWaiting():
                     continue
                 else:
-                    self.log.debug("%s is running" % worker_job)
+                    self.log.debug("%s is running" % worker_build)
                     return False
             else:
                 self.log.debug("%s is unassigned" % server_job)
@@ -1335,47 +1384,52 @@
                     self.log.debug("  %s: %s" % (queue, queue.empty()))
                 self.log.debug("All builds waiting: %s" %
                                (self.areAllBuildsWaiting(),))
+                self.log.debug("All builds reported: %s" %
+                               (self.haveAllBuildsReported(),))
                 raise Exception("Timeout waiting for Zuul to settle")
             # Make sure no new events show up while we're checking
-            self.worker.lock.acquire()
+
+            self.launch_server.lock.acquire()
             # have all build states propogated to zuul?
             if self.haveAllBuildsReported():
                 # Join ensures that the queue is empty _and_ events have been
                 # processed
                 self.eventQueuesJoin()
                 self.sched.run_handler_lock.acquire()
-                if (not self.merge_client.build_sets and
+                if (not self.merge_client.jobs and
                     all(self.eventQueuesEmpty()) and
                     self.haveAllBuildsReported() and
                     self.areAllBuildsWaiting()):
                     self.sched.run_handler_lock.release()
-                    self.worker.lock.release()
+                    self.launch_server.lock.release()
                     self.log.debug("...settled.")
                     return
                 self.sched.run_handler_lock.release()
-            self.worker.lock.release()
+            self.launch_server.lock.release()
             self.sched.wake_event.wait(0.1)
 
     def countJobResults(self, jobs, result):
         jobs = filter(lambda x: x.result == result, jobs)
         return len(jobs)
 
-    def getJobFromHistory(self, name):
-        history = self.worker.build_history
-        for job in history:
-            if job.name == name:
+    def getJobFromHistory(self, name, project=None):
+        for job in self.history:
+            if (job.name == name and
+                (project is None or
+                 job.parameters['ZUUL_PROJECT'] == project)):
                 return job
         raise Exception("Unable to find job %s in history" % name)
 
     def assertEmptyQueues(self):
         # Make sure there are no orphaned jobs
-        for pipeline in self.sched.layout.pipelines.values():
-            for queue in pipeline.queues:
-                if len(queue.queue) != 0:
-                    print('pipeline %s queue %s contents %s' % (
-                        pipeline.name, queue.name, queue.queue))
-                self.assertEqual(len(queue.queue), 0,
-                                 "Pipelines queues should be empty")
+        for tenant in self.sched.abide.tenants.values():
+            for pipeline in tenant.layout.pipelines.values():
+                for queue in pipeline.queues:
+                    if len(queue.queue) != 0:
+                        print('pipeline %s queue %s contents %s' % (
+                            pipeline.name, queue.name, queue.queue))
+                    self.assertEqual(len(queue.queue), 0,
+                                     "Pipelines queues should be empty")
 
     def assertReportedStat(self, key, value=None, kind=None):
         start = time.time()
@@ -1396,3 +1450,125 @@
 
         pprint.pprint(self.statsd.stats)
         raise Exception("Key %s not found in reported stats" % key)
+
+    def assertBuilds(self, builds):
+        """Assert that the running builds are as described.
+
+        The list of running builds is examined and must match exactly
+        the list of builds described by the input.
+
+        :arg list builds: A list of dictionaries.  Each item in the
+            list must match the corresponding build in the build
+            history, and each element of the dictionary must match the
+            corresponding attribute of the build.
+
+        """
+        try:
+            self.assertEqual(len(self.builds), len(builds))
+            for i, d in enumerate(builds):
+                for k, v in d.items():
+                    self.assertEqual(
+                        getattr(self.builds[i], k), v,
+                        "Element %i in builds does not match" % (i,))
+        except Exception:
+            for build in self.builds:
+                self.log.error("Running build: %s" % build)
+            else:
+                self.log.error("No running builds")
+            raise
+
+    def assertHistory(self, history):
+        """Assert that the completed builds are as described.
+
+        The list of completed builds is examined and must match
+        exactly the list of builds described by the input.
+
+        :arg list history: A list of dictionaries.  Each item in the
+            list must match the corresponding build in the build
+            history, and each element of the dictionary must match the
+            corresponding attribute of the build.
+
+        """
+        try:
+            self.assertEqual(len(self.history), len(history))
+            for i, d in enumerate(history):
+                for k, v in d.items():
+                    self.assertEqual(
+                        getattr(self.history[i], k), v,
+                        "Element %i in history does not match" % (i,))
+        except Exception:
+            for build in self.history:
+                self.log.error("Completed build: %s" % build)
+            else:
+                self.log.error("No completed builds")
+            raise
+
+    def getPipeline(self, name):
+        return self.sched.abide.tenants.values()[0].layout.pipelines.get(name)
+
+    def updateConfigLayout(self, path):
+        root = os.path.join(self.test_root, "config")
+        os.makedirs(root)
+        f = tempfile.NamedTemporaryFile(dir=root, delete=False)
+        f.write("""
+tenants:
+  - name: openstack
+    include:
+      - %s
+        """ % os.path.abspath(path))
+        f.close()
+        self.config.set('zuul', 'tenant_config', f.name)
+
+    def addCommitToRepo(self, project, message, files,
+                        branch='master', tag=None):
+        path = os.path.join(self.upstream_root, project)
+        repo = git.Repo(path)
+        repo.head.reference = branch
+        zuul.merger.merger.reset_repo_to_head(repo)
+        for fn, content in files.items():
+            fn = os.path.join(path, fn)
+            with open(fn, 'w') as f:
+                f.write(content)
+            repo.index.add([fn])
+        commit = repo.index.commit(message)
+        repo.heads[branch].commit = commit
+        repo.head.reference = branch
+        repo.git.clean('-x', '-f', '-d')
+        repo.heads[branch].checkout()
+        if tag:
+            repo.create_tag(tag)
+
+    def addEvent(self, connection, event):
+        """Inject a Fake (Gerrit) event.
+
+        This method accepts a JSON-encoded event and simulates Zuul
+        having received it from Gerrit.  It could (and should)
+        eventually apply to any connection type, but is currently only
+        used with Gerrit connections.  The name of the connection is
+        used to look up the corresponding server, and the event is
+        simulated as having been received by all Zuul connections
+        attached to that server.  So if two Gerrit connections in Zuul
+        are connected to the same Gerrit server, and you invoke this
+        method specifying the name of one of them, the event will be
+        received by both.
+
+        .. note::
+
+            "self.fake_gerrit.addEvent" calls should be migrated to
+            this method.
+
+        :arg str connection: The name of the connection corresponding
+        to the gerrit server.
+        :arg str event: The JSON-encoded event.
+
+        """
+        specified_conn = self.connections.connections[connection]
+        for conn in self.connections.connections.values():
+            if (isinstance(conn, specified_conn.__class__) and
+                specified_conn.server == conn.server):
+                conn.addEvent(event)
+
+
+class AnsibleZuulTestCase(ZuulTestCase):
+    """ZuulTestCase but with an actual ansible launcher running"""
+    run_ansible = True
diff --git a/tests/cmd/__init__.py b/tests/cmd/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tests/cmd/__init__.py
+++ /dev/null
diff --git a/tests/fixtures/config/in-repo/git/common-config/zuul.yaml b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
new file mode 100644
index 0000000..58b2051
--- /dev/null
+++ b/tests/fixtures/config/in-repo/git/common-config/zuul.yaml
@@ -0,0 +1,37 @@
+- pipeline:
+    name: check
+    manager: independent
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- pipeline:
+    name: tenant-one-gate
+    manager: dependent
+    success-message: Build succeeded (tenant-one-gate).
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    start:
+      gerrit:
+        verified: 0
+    precedence: high
diff --git a/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
new file mode 100644
index 0000000..d6f083d
--- /dev/null
+++ b/tests/fixtures/config/in-repo/git/org_project/.zuul.yaml
@@ -0,0 +1,8 @@
+- job:
+    name: project-test1
+
+- project:
+    name: org/project
+    tenant-one-gate:
+      jobs:
+        - project-test1
diff --git a/tests/fixtures/config/in-repo/git/org_project/README b/tests/fixtures/config/in-repo/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/in-repo/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/in-repo/main.yaml b/tests/fixtures/config/in-repo/main.yaml
new file mode 100644
index 0000000..d9868fa
--- /dev/null
+++ b/tests/fixtures/config/in-repo/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-repos:
+          - common-config
+        project-repos:
+          - org/project
diff --git a/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml
new file mode 100644
index 0000000..08117d6
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/git/common-config/zuul.yaml
@@ -0,0 +1,21 @@
+- pipeline:
+    name: check
+    manager: independent
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name:
+      python27
+    nodes:
+      - name: controller
+        image: ubuntu-trusty
diff --git a/tests/fixtures/config/multi-tenant/git/org_project1/README b/tests/fixtures/config/multi-tenant/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/multi-tenant/git/org_project2/README b/tests/fixtures/config/multi-tenant/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/multi-tenant/git/tenant-one-config/zuul.yaml b/tests/fixtures/config/multi-tenant/git/tenant-one-config/zuul.yaml
new file mode 100644
index 0000000..785f8a5
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/git/tenant-one-config/zuul.yaml
@@ -0,0 +1,37 @@
+- pipeline:
+    name: tenant-one-gate
+    manager: dependent
+    success-message: Build succeeded (tenant-one-gate).
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    start:
+      gerrit:
+        verified: 0
+    precedence: high
+
+- job:
+    name:
+      project1-test1
+
+- project:
+    name: org/project1
+    check:
+      jobs:
+        - python27
+        - project1-test1
+    tenant-one-gate:
+      jobs:
+        - python27
+        - project1-test1
diff --git a/tests/fixtures/config/multi-tenant/git/tenant-two-config/zuul.yaml b/tests/fixtures/config/multi-tenant/git/tenant-two-config/zuul.yaml
new file mode 100644
index 0000000..c6127ca
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/git/tenant-two-config/zuul.yaml
@@ -0,0 +1,37 @@
+- pipeline:
+    name: tenant-two-gate
+    manager: dependent
+    success-message: Build succeeded (tenant-two-gate).
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    start:
+      gerrit:
+        verified: 0
+    precedence: high
+
+- job:
+    name:
+      project2-test1
+
+- project:
+    name: org/project2
+    check:
+      jobs:
+        - python27
+        - project2-test1
+    tenant-two-gate:
+      jobs:
+        - python27
+        - project2-test1
diff --git a/tests/fixtures/config/multi-tenant/main.yaml b/tests/fixtures/config/multi-tenant/main.yaml
new file mode 100644
index 0000000..b1c47b1
--- /dev/null
+++ b/tests/fixtures/config/multi-tenant/main.yaml
@@ -0,0 +1,15 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-repos:
+          - common-config
+          - tenant-one-config
+
+- tenant:
+    name: tenant-two
+    source:
+      gerrit:
+        config-repos:
+          - common-config
+          - tenant-two-config
diff --git a/tests/fixtures/config/project-template/git/common-config/zuul.yaml b/tests/fixtures/config/project-template/git/common-config/zuul.yaml
new file mode 100644
index 0000000..c6b237f
--- /dev/null
+++ b/tests/fixtures/config/project-template/git/common-config/zuul.yaml
@@ -0,0 +1,59 @@
+- pipeline:
+    name: check
+    manager: independent
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- pipeline:
+    name: gate
+    manager: dependent
+    success-message: Build succeeded (gate).
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    start:
+      gerrit:
+        verified: 0
+    precedence: high
+
+- job:
+    name:
+      project-test1
+
+- job:
+    name:
+      project-test2
+
+- project-template:
+    name: test-template
+    gate:
+      jobs:
+        - project-test2
+
+- project:
+    name: org/project
+    templates:
+      - test-template
+    gate:
+      jobs:
+        - project-test1
diff --git a/tests/fixtures/config/project-template/git/org_project/README b/tests/fixtures/config/project-template/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/project-template/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/project-template/main.yaml b/tests/fixtures/config/project-template/main.yaml
new file mode 100644
index 0000000..a22ed5c
--- /dev/null
+++ b/tests/fixtures/config/project-template/main.yaml
@@ -0,0 +1,6 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-repos:
+          - common-config
diff --git a/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
new file mode 100644
index 0000000..3a88863
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/common-config/zuul.yaml
@@ -0,0 +1,56 @@
+- pipeline:
+    name: check
+    manager: independent
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- pipeline:
+    name: gate
+    manager: dependent
+    success-message: Build succeeded (gate).
+    source:
+      gerrit
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - approved: 1
+    success:
+      gerrit:
+        verified: 2
+        submit: true
+    failure:
+      gerrit:
+        verified: -2
+    start:
+      gerrit:
+        verified: 0
+    precedence: high
+
+- job:
+    name: project-merge
+    hold-following-changes: true
+
+- job:
+    name: project-test1
+
+- job:
+    name: project-test2
+
+- project:
+    name: org/project
+    gate:
+      jobs:
+        - project-merge:
+            jobs:
+              - project-test1
+              - project-test2
diff --git a/tests/fixtures/config/single-tenant/git/org_project/README b/tests/fixtures/config/single-tenant/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/single-tenant/main.yaml b/tests/fixtures/config/single-tenant/main.yaml
new file mode 100644
index 0000000..a22ed5c
--- /dev/null
+++ b/tests/fixtures/config/single-tenant/main.yaml
@@ -0,0 +1,6 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-repos:
+          - common-config
diff --git a/tests/fixtures/config/zuul-connections-same-gerrit/git/common-config/zuul.yaml b/tests/fixtures/config/zuul-connections-same-gerrit/git/common-config/zuul.yaml
new file mode 100644
index 0000000..114a4a3
--- /dev/null
+++ b/tests/fixtures/config/zuul-connections-same-gerrit/git/common-config/zuul.yaml
@@ -0,0 +1,26 @@
+- pipeline:
+    name: check
+    manager: independent
+    source: review_gerrit
+    trigger:
+      review_gerrit:
+        - event: patchset-created
+    success:
+      review_gerrit:
+        verified: 1
+    failure:
+      alt_voting_gerrit:
+        verified: -1
+
+- job:
+    name: project-test1
+
+- job:
+    name: project-test2
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - project-test1
+        - project-test2
diff --git a/tests/fixtures/config/zuul-connections-same-gerrit/git/org_project/README b/tests/fixtures/config/zuul-connections-same-gerrit/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/zuul-connections-same-gerrit/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/zuul-connections-same-gerrit/main.yaml b/tests/fixtures/config/zuul-connections-same-gerrit/main.yaml
new file mode 100644
index 0000000..90297fb
--- /dev/null
+++ b/tests/fixtures/config/zuul-connections-same-gerrit/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+    name: tenant-one
+    source:
+      review_gerrit:
+        config-repos:
+          - common-config
+        project-repos:
+          - org/project
diff --git a/tests/fixtures/layout.yaml b/tests/fixtures/layout.yaml
index 2e48ff1..7d52c17 100644
--- a/tests/fixtures/layout.yaml
+++ b/tests/fixtures/layout.yaml
@@ -3,7 +3,9 @@
 
 pipelines:
   - name: check
-    manager: IndependentPipelineManager
+    manager: independent
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: patchset-created
@@ -15,15 +17,19 @@
         verified: -1
 
   - name: post
-    manager: IndependentPipelineManager
+    manager: independent
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: ref-updated
           ref: ^(?!refs/).*$
 
   - name: gate
-    manager: DependentPipelineManager
+    manager: dependent
     failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: comment-added
@@ -42,8 +48,10 @@
     precedence: high
 
   - name: unused
-    manager: IndependentPipelineManager
+    manager: independent
     dequeue-on-new-patchset: false
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: comment-added
@@ -51,7 +59,9 @@
             - approved: 1
 
   - name: dup1
-    manager: IndependentPipelineManager
+    manager: independent
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: change-restored
@@ -63,7 +73,9 @@
         verified: -1
 
   - name: dup2
-    manager: IndependentPipelineManager
+    manager: independent
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: change-restored
@@ -75,8 +87,10 @@
         verified: -1
 
   - name: conflict
-    manager: DependentPipelineManager
+    manager: dependent
     failure-message: Build failed.  For information on how to proceed, see http://wiki.example.org/Test_Failures
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: comment-added
@@ -94,7 +108,9 @@
         verified: 0
 
   - name: experimental
-    manager: IndependentPipelineManager
+    manager: independent
+    source:
+      gerrit
     trigger:
       gerrit:
         - event: patchset-created
diff --git a/tests/fixtures/main.yaml b/tests/fixtures/main.yaml
new file mode 100644
index 0000000..f9ec378
--- /dev/null
+++ b/tests/fixtures/main.yaml
@@ -0,0 +1,4 @@
+tenants:
+  - name: openstack
+    include:
+      - layout.yaml
diff --git a/tests/fixtures/zuul-connections-same-gerrit.conf b/tests/fixtures/zuul-connections-same-gerrit.conf
index af31c8a..43109d2 100644
--- a/tests/fixtures/zuul-connections-same-gerrit.conf
+++ b/tests/fixtures/zuul-connections-same-gerrit.conf
@@ -2,7 +2,7 @@
 server=127.0.0.1
 
 [zuul]
-layout_config=layout-connections-multiple-voters.yaml
+tenant_config=config/zuul-connections-same-gerrit/main.yaml
 url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
diff --git a/tests/fixtures/zuul.conf b/tests/fixtures/zuul.conf
index b250c6d..c08b5ad 100644
--- a/tests/fixtures/zuul.conf
+++ b/tests/fixtures/zuul.conf
@@ -2,7 +2,7 @@
 server=127.0.0.1
 
 [zuul]
-layout_config=layout.yaml
+tenant_config=main.yaml
 url_pattern=http://logs.example.com/{change.number}/{change.patchset}/{pipeline.name}/{job.name}/{build.number}
 job_name_in_report=true
 
diff --git a/tests/print_layout.py b/tests/print_layout.py
new file mode 100644
index 0000000..9afd379
--- /dev/null
+++ b/tests/print_layout.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import sys
+
+import tests.base
+
+CONFIG_DIR = os.path.join(tests.base.FIXTURE_DIR, 'config')
+
+
+def print_file(title, path):
+    print('')
+    print(title)
+    print('-' * 78)
+    with open(path) as f:
+        print(f.read())
+    print('-' * 78)
+
+
+def main():
+    parser = argparse.ArgumentParser(description='Print test layout.')
+    parser.add_argument(dest='config', nargs='?',
+                        help='the test configuration name')
+    args = parser.parse_args()
+    if not args.config:
+        print('Available test configurations:')
+        for d in os.listdir(CONFIG_DIR):
+            print('  ' + d)
+        sys.exit(1)
+    configdir = os.path.join(CONFIG_DIR, args.config)
+
+    title = '   Configuration: %s   ' % args.config
+    print('=' * len(title))
+    print(title)
+    print('=' * len(title))
+    print_file('Main Configuration',
+               os.path.join(configdir, 'main.yaml'))
+
+    gitroot = os.path.join(configdir, 'git')
+    for gitrepo in os.listdir(gitroot):
+        reporoot = os.path.join(gitroot, gitrepo)
+        print('')
+        print('=== Git repo: %s ===' % gitrepo)
+        filenames = os.listdir(reporoot)
+        for fn in filenames:
+            if fn in ['zuul.yaml', '.zuul.yaml']:
+                print_file('File: ' + os.path.join(gitrepo, fn),
+                           os.path.join(reporoot, fn))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/test_cloner.py b/tests/test_cloner.py
index e3576bd..67b5303 100644
--- a/tests/test_cloner.py
+++ b/tests/test_cloner.py
@@ -37,11 +37,13 @@
     workspace_root = None
 
     def setUp(self):
+        self.skip("Disabled for early v3 development")
+
         super(TestCloner, self).setUp()
         self.workspace_root = os.path.join(self.test_root, 'workspace')
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-cloner.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-cloner.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -504,8 +506,8 @@
     def test_periodic(self):
         self.worker.hold_jobs_in_build = True
         self.create_branch('org/project', 'stable/havana')
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-timer.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -519,8 +521,8 @@
         self.worker.hold_jobs_in_build = False
         # Stop queuing timer triggered jobs so that the assertions
         # below don't race against more jobs being queued.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-no-timer.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
         self.worker.release()
diff --git a/tests/cmd/test_cloner.py b/tests/test_cloner_cmd.py
similarity index 100%
rename from tests/cmd/test_cloner.py
rename to tests/test_cloner_cmd.py
diff --git a/tests/test_connection.py b/tests/test_connection.py
index c3458ac..b3c133c 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -29,42 +29,44 @@
 
 
 class TestConnections(ZuulTestCase):
-    def setup_config(self, config_file='zuul-connections-same-gerrit.conf'):
-        super(TestConnections, self).setup_config(config_file)
+    config_file = 'zuul-connections-same-gerrit.conf'
+    tenant_config_file = 'config/zuul-connections-same-gerrit/main.yaml'
 
     def test_multiple_connections(self):
         "Test multiple connections to the one gerrit"
 
         A = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'A')
-        self.fake_review_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.addEvent('review_gerrit', A.getPatchsetCreatedEvent(1))
 
         self.waitUntilSettled()
 
         self.assertEqual(len(A.patchsets[-1]['approvals']), 1)
-        self.assertEqual(A.patchsets[-1]['approvals'][0]['type'], 'VRFY')
+        self.assertEqual(A.patchsets[-1]['approvals'][0]['type'], 'verified')
         self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
         self.assertEqual(A.patchsets[-1]['approvals'][0]['by']['username'],
                          'jenkins')
 
         B = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'B')
-        self.worker.addFailTest('project-test2', B)
-        self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+        self.launch_server.failJob('project-test2', B)
+        self.addEvent('review_gerrit', B.getPatchsetCreatedEvent(1))
 
         self.waitUntilSettled()
 
         self.assertEqual(len(B.patchsets[-1]['approvals']), 1)
-        self.assertEqual(B.patchsets[-1]['approvals'][0]['type'], 'VRFY')
+        self.assertEqual(B.patchsets[-1]['approvals'][0]['type'], 'verified')
         self.assertEqual(B.patchsets[-1]['approvals'][0]['value'], '-1')
         self.assertEqual(B.patchsets[-1]['approvals'][0]['by']['username'],
                          'civoter')
 
 
 class TestMultipleGerrits(ZuulTestCase):
+    def setUp(self):
+        self.skip("Disabled for early v3 development")
+
     def setup_config(self,
                      config_file='zuul-connections-multiple-gerrits.conf'):
         super(TestMultipleGerrits, self).setup_config(config_file)
-        self.config.set(
-            'zuul', 'layout_config',
+        self.self.updateConfigLayout(
             'layout-connections-multiple-gerrits.yaml')
 
     def test_multiple_project_separate_gerrits(self):
diff --git a/tests/test_layoutvalidator.py b/tests/test_layoutvalidator.py
index 46a8c7c..38c8e29 100644
--- a/tests/test_layoutvalidator.py
+++ b/tests/test_layoutvalidator.py
@@ -31,6 +31,9 @@
 
 
 class TestLayoutValidator(testtools.TestCase):
+    def setUp(self):
+        self.skip("Disabled for early v3 development")
+
     def test_layouts(self):
         """Test layout file validation"""
         print()
diff --git a/tests/test_merger_repo.py b/tests/test_merger_repo.py
index 454f3cc..7bf08ee 100644
--- a/tests/test_merger_repo.py
+++ b/tests/test_merger_repo.py
@@ -34,8 +34,11 @@
     workspace_root = None
 
     def setUp(self):
-        super(TestMergerRepo, self).setUp()
-        self.workspace_root = os.path.join(self.test_root, 'workspace')
+        self.skip("Disabled for early v3 development")
+
+    # def setUp(self):
+    #     super(TestMergerRepo, self).setUp()
+    #     self.workspace_root = os.path.join(self.test_root, 'workspace')
 
     def test_ensure_cloned(self):
         parent_path = os.path.join(self.upstream_root, 'org/project1')
diff --git a/tests/test_model.py b/tests/test_model.py
index 6ad0750..fa670a4 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -12,13 +12,15 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+
 import os
 import random
 
 import fixtures
+import testtools
 
-from zuul import change_matcher as cm
 from zuul import model
+from zuul import configloader
 
 from tests.base import BaseTestCase
 
@@ -27,11 +29,12 @@
 
     @property
     def job(self):
-        job = model.Job('job')
-        job.skip_if_matcher = cm.MatchAll([
-            cm.ProjectMatcher('^project$'),
-            cm.MatchAllFiles([cm.FileMatcher('^docs/.*$')]),
-        ])
+        layout = model.Layout()
+        job = configloader.JobParser.fromYaml(layout, {
+            'name': 'job',
+            'irrelevant-files': [
+                '^docs/.*$'
+            ]})
         return job
 
     def test_change_matches_returns_false_for_matched_skip_if(self):
@@ -44,29 +47,243 @@
         change.files = ['/COMMIT_MSG', 'foo']
         self.assertTrue(self.job.changeMatches(change))
 
-    def test_copy_retains_skip_if(self):
-        job = model.Job('job')
-        job.copy(self.job)
-        self.assertTrue(job.skip_if_matcher)
-
-    def _assert_job_booleans_are_not_none(self, job):
-        self.assertIsNotNone(job.voting)
-        self.assertIsNotNone(job.hold_following_changes)
-
     def test_job_sets_defaults_for_boolean_attributes(self):
-        job = model.Job('job')
-        self._assert_job_booleans_are_not_none(job)
+        self.assertIsNotNone(self.job.voting)
 
-    def test_metajob_does_not_set_defaults_for_boolean_attributes(self):
-        job = model.Job('^job')
-        self.assertIsNone(job.voting)
-        self.assertIsNone(job.hold_following_changes)
+    def test_job_inheritance(self):
+        layout = model.Layout()
 
-    def test_metajob_copy_does_not_set_undefined_boolean_attributes(self):
-        job = model.Job('job')
-        metajob = model.Job('^job')
-        job.copy(metajob)
-        self._assert_job_booleans_are_not_none(job)
+        pipeline = model.Pipeline('gate', layout)
+        layout.addPipeline(pipeline)
+        queue = model.ChangeQueue(pipeline)
+        project = model.Project('project')
+
+        base = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'base',
+            'timeout': 30,
+        })
+        layout.addJob(base)
+        python27 = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'python27',
+            'parent': 'base',
+            'timeout': 40,
+        })
+        layout.addJob(python27)
+        python27diablo = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'python27',
+            'branches': [
+                'stable/diablo'
+            ],
+            'timeout': 50,
+        })
+        layout.addJob(python27diablo)
+
+        project_config = configloader.ProjectParser.fromYaml(layout, {
+            'name': 'project',
+            'gate': {
+                'jobs': [
+                    'python27'
+                ]
+            }
+        })
+        layout.addProjectConfig(project_config, update_pipeline=False)
+
+        change = model.Change(project)
+        change.branch = 'master'
+        item = queue.enqueueChange(change)
+        item.current_build_set.layout = layout
+
+        self.assertTrue(base.changeMatches(change))
+        self.assertTrue(python27.changeMatches(change))
+        self.assertFalse(python27diablo.changeMatches(change))
+
+        item.freezeJobTree()
+        self.assertEqual(len(item.getJobs()), 1)
+        job = item.getJobs()[0]
+        self.assertEqual(job.name, 'python27')
+        self.assertEqual(job.timeout, 40)
+
+        change.branch = 'stable/diablo'
+        item = queue.enqueueChange(change)
+        item.current_build_set.layout = layout
+
+        self.assertTrue(base.changeMatches(change))
+        self.assertTrue(python27.changeMatches(change))
+        self.assertTrue(python27diablo.changeMatches(change))
+
+        item.freezeJobTree()
+        self.assertEqual(len(item.getJobs()), 1)
+        job = item.getJobs()[0]
+        self.assertEqual(job.name, 'python27')
+        self.assertEqual(job.timeout, 50)
+
+    def test_job_auth_inheritance(self):
+        layout = model.Layout()
+        project = model.Project('project')
+
+        base = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'base',
+            'timeout': 30,
+        })
+        layout.addJob(base)
+        pypi_upload_without_inherit = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'pypi-upload-without-inherit',
+            'parent': 'base',
+            'timeout': 40,
+            'auth': {
+                'password': {
+                    'pypipassword': 'dummypassword'
+                }
+            }
+        })
+        layout.addJob(pypi_upload_without_inherit)
+        pypi_upload_with_inherit = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'pypi-upload-with-inherit',
+            'parent': 'base',
+            'timeout': 40,
+            'auth': {
+                'inherit': True,
+                'password': {
+                    'pypipassword': 'dummypassword'
+                }
+            }
+        })
+        layout.addJob(pypi_upload_with_inherit)
+        pypi_upload_with_inherit_false = configloader.JobParser.fromYaml(
+            layout, {
+                '_source_project': project,
+                'name': 'pypi-upload-with-inherit-false',
+                'parent': 'base',
+                'timeout': 40,
+                'auth': {
+                    'inherit': False,
+                    'password': {
+                        'pypipassword': 'dummypassword'
+                    }
+                }
+            })
+        layout.addJob(pypi_upload_with_inherit_false)
+        in_repo_job_without_inherit = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'in-repo-job-without-inherit',
+            'parent': 'pypi-upload-without-inherit',
+        })
+        layout.addJob(in_repo_job_without_inherit)
+        in_repo_job_with_inherit = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'in-repo-job-with-inherit',
+            'parent': 'pypi-upload-with-inherit',
+        })
+        layout.addJob(in_repo_job_with_inherit)
+        in_repo_job_with_inherit_false = configloader.JobParser.fromYaml(
+            layout, {
+                '_source_project': project,
+                'name': 'in-repo-job-with-inherit-false',
+                'parent': 'pypi-upload-with-inherit-false',
+            })
+        layout.addJob(in_repo_job_with_inherit_false)
+
+        self.assertNotIn('auth', in_repo_job_without_inherit.auth)
+        self.assertIn('password', in_repo_job_with_inherit.auth)
+        self.assertEquals(in_repo_job_with_inherit.auth['password'],
+                          {'pypipassword': 'dummypassword'})
+        self.assertNotIn('auth', in_repo_job_with_inherit_false.auth)
+
+    def test_job_inheritance_job_tree(self):
+        layout = model.Layout()
+
+        pipeline = model.Pipeline('gate', layout)
+        layout.addPipeline(pipeline)
+        queue = model.ChangeQueue(pipeline)
+        project = model.Project('project')
+
+        base = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'base',
+            'timeout': 30,
+        })
+        layout.addJob(base)
+        python27 = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'python27',
+            'parent': 'base',
+            'timeout': 40,
+        })
+        layout.addJob(python27)
+        python27diablo = configloader.JobParser.fromYaml(layout, {
+            '_source_project': project,
+            'name': 'python27',
+            'branches': [
+                'stable/diablo'
+            ],
+            'timeout': 50,
+        })
+        layout.addJob(python27diablo)
+
+        project_config = configloader.ProjectParser.fromYaml(layout, {
+            'name': 'project',
+            'gate': {
+                'jobs': [
+                    {'python27': {'timeout': 70}}
+                ]
+            }
+        })
+        layout.addProjectConfig(project_config, update_pipeline=False)
+
+        change = model.Change(project)
+        change.branch = 'master'
+        item = queue.enqueueChange(change)
+        item.current_build_set.layout = layout
+
+        self.assertTrue(base.changeMatches(change))
+        self.assertTrue(python27.changeMatches(change))
+        self.assertFalse(python27diablo.changeMatches(change))
+
+        item.freezeJobTree()
+        self.assertEqual(len(item.getJobs()), 1)
+        job = item.getJobs()[0]
+        self.assertEqual(job.name, 'python27')
+        self.assertEqual(job.timeout, 70)
+
+        change.branch = 'stable/diablo'
+        item = queue.enqueueChange(change)
+        item.current_build_set.layout = layout
+
+        self.assertTrue(base.changeMatches(change))
+        self.assertTrue(python27.changeMatches(change))
+        self.assertTrue(python27diablo.changeMatches(change))
+
+        item.freezeJobTree()
+        self.assertEqual(len(item.getJobs()), 1)
+        job = item.getJobs()[0]
+        self.assertEqual(job.name, 'python27')
+        self.assertEqual(job.timeout, 70)
+
+    def test_job_source_project(self):
+        layout = model.Layout()
+        base_project = model.Project('base_project')
+        base = configloader.JobParser.fromYaml(layout, {
+            '_source_project': base_project,
+            'name': 'base',
+        })
+        layout.addJob(base)
+
+        other_project = model.Project('other_project')
+        base2 = configloader.JobParser.fromYaml(layout, {
+            '_source_project': other_project,
+            'name': 'base',
+        })
+        with testtools.ExpectedException(
+                Exception,
+                "Job base in other_project is not permitted "
+                "to shadow job base in base_project"):
+            layout.addJob(base2)
 
 
 class TestJobTimeData(BaseTestCase):
diff --git a/tests/test_requirements.py b/tests/test_requirements.py
index 3ae56ad..1cad659 100644
--- a/tests/test_requirements.py
+++ b/tests/test_requirements.py
@@ -27,6 +27,9 @@
 class TestRequirements(ZuulTestCase):
     """Test pipeline and trigger requirements"""
 
+    def setUp(self):
+        self.skip("Disabled for early v3 development")
+
     def test_pipeline_require_approval_newer_than(self):
         "Test pipeline requirement: approval newer than"
         return self._test_require_approval_newer_than('org/project1',
@@ -38,8 +41,8 @@
                                                       'project2-trigger')
 
     def _test_require_approval_newer_than(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-newer-than.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-newer-than.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -76,8 +79,8 @@
                                                       'project2-trigger')
 
     def _test_require_approval_older_than(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-older-than.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-older-than.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -114,8 +117,8 @@
                                                     'project2-trigger')
 
     def _test_require_approval_username(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-username.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-username.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -145,8 +148,8 @@
                                                  'project2-trigger')
 
     def _test_require_approval_email(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-email.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-email.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -176,8 +179,8 @@
                                                  'project2-trigger')
 
     def _test_require_approval_vote1(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-vote1.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-vote1.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -213,8 +216,8 @@
                                                  'project2-trigger')
 
     def _test_require_approval_vote2(self, project, job):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-vote2.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-vote2.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -261,9 +264,8 @@
 
     def test_pipeline_require_current_patchset(self):
         "Test pipeline requirement: current-patchset"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-'
-                        'current-patchset.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-current-patchset.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
         # Create two patchsets and let their tests settle out. Then
@@ -290,8 +292,8 @@
 
     def test_pipeline_require_open(self):
         "Test pipeline requirement: open"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-open.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-open.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -308,8 +310,8 @@
 
     def test_pipeline_require_status(self):
         "Test pipeline requirement: status"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-requirement-status.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-requirement-status.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -327,8 +329,7 @@
     def _test_require_reject_username(self, project, job):
         "Test negative username's match"
         # Should only trigger if Jenkins hasn't voted.
-        self.config.set(
-            'zuul', 'layout_config',
+        self.updateConfigLayout(
             'tests/fixtures/layout-requirement-reject-username.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
@@ -369,8 +370,7 @@
 
     def _test_require_reject(self, project, job):
         "Test no approval matches a reject param"
-        self.config.set(
-            'zuul', 'layout_config',
+        self.updateConfigLayout(
             'tests/fixtures/layout-requirement-reject.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py
index 335f987..51ece68 100755
--- a/tests/test_scheduler.py
+++ b/tests/test_scheduler.py
@@ -21,6 +21,7 @@
 import shutil
 import time
 import yaml
+from unittest import skip
 
 import git
 from six.moves import urllib
@@ -43,13 +44,14 @@
 
 
 class TestScheduler(ZuulTestCase):
+    tenant_config_file = 'config/single-tenant/main.yaml'
 
     def test_jobs_launched(self):
         "Test that jobs are launched and a change is merged"
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -60,6 +62,7 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+        # TODOv3(jeblair): we may want to report stats by tenant (also?).
         self.assertReportedStat('gerrit.event.comment-added', value='1|c')
         self.assertReportedStat('zuul.pipeline.gate.current_changes',
                                 value='1|g')
@@ -80,12 +83,12 @@
 
     def test_initial_pipeline_gauges(self):
         "Test that each pipeline reported its length on start"
-        pipeline_names = self.sched.layout.pipelines.keys()
-        self.assertNotEqual(len(pipeline_names), 0)
-        for name in pipeline_names:
-            self.assertReportedStat('zuul.pipeline.%s.current_changes' % name,
-                                    value='0|g')
+        self.assertReportedStat('zuul.pipeline.gate.current_changes',
+                                value='0|g')
+        self.assertReportedStat('zuul.pipeline.check.current_changes',
+                                value='0|g')
 
+    @skip("Disabled for early v3 development")
     def test_duplicate_pipelines(self):
         "Test that a change matching multiple pipelines works"
 
@@ -112,69 +115,69 @@
     def test_parallel_changes(self):
         "Test that changes are tested in parallel and merged in series"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 1)
         self.assertEqual(self.builds[0].name, 'project-merge')
-        self.assertTrue(self.job_has_changes(self.builds[0], A))
+        self.assertTrue(self.builds[0].hasChanges(A))
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 3)
         self.assertEqual(self.builds[0].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[0], A))
+        self.assertTrue(self.builds[0].hasChanges(A))
         self.assertEqual(self.builds[1].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[1], A))
+        self.assertTrue(self.builds[1].hasChanges(A))
         self.assertEqual(self.builds[2].name, 'project-merge')
-        self.assertTrue(self.job_has_changes(self.builds[2], A, B))
+        self.assertTrue(self.builds[2].hasChanges(A, B))
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 5)
         self.assertEqual(self.builds[0].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[0], A))
+        self.assertTrue(self.builds[0].hasChanges(A))
         self.assertEqual(self.builds[1].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[1], A))
+        self.assertTrue(self.builds[1].hasChanges(A))
 
         self.assertEqual(self.builds[2].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[2], A, B))
+        self.assertTrue(self.builds[2].hasChanges(A, B))
         self.assertEqual(self.builds[3].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[3], A, B))
+        self.assertTrue(self.builds[3].hasChanges(A, B))
 
         self.assertEqual(self.builds[4].name, 'project-merge')
-        self.assertTrue(self.job_has_changes(self.builds[4], A, B, C))
+        self.assertTrue(self.builds[4].hasChanges(A, B, C))
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 6)
         self.assertEqual(self.builds[0].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[0], A))
+        self.assertTrue(self.builds[0].hasChanges(A))
         self.assertEqual(self.builds[1].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[1], A))
+        self.assertTrue(self.builds[1].hasChanges(A))
 
         self.assertEqual(self.builds[2].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[2], A, B))
+        self.assertTrue(self.builds[2].hasChanges(A, B))
         self.assertEqual(self.builds[3].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[3], A, B))
+        self.assertTrue(self.builds[3].hasChanges(A, B))
 
         self.assertEqual(self.builds[4].name, 'project-test1')
-        self.assertTrue(self.job_has_changes(self.builds[4], A, B, C))
+        self.assertTrue(self.builds[4].hasChanges(A, B, C))
         self.assertEqual(self.builds[5].name, 'project-test2')
-        self.assertTrue(self.job_has_changes(self.builds[5], A, B, C))
+        self.assertTrue(self.builds[5].hasChanges(A, B, C))
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 0)
 
@@ -188,48 +191,78 @@
 
     def test_failed_changes(self):
         "Test that a change behind a failed change is retested"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertBuilds([dict(name='project-merge', changes='1,1')])
+
+        self.launch_server.release('.*-merge')
+        self.waitUntilSettled()
+        # A/project-merge is complete
+        self.assertBuilds([
+            dict(name='project-test1', changes='1,1'),
+            dict(name='project-test2', changes='1,1'),
+            dict(name='project-merge', changes='1,1 2,1'),
+        ])
+
+        self.launch_server.release('.*-merge')
+        self.waitUntilSettled()
+        # A/project-merge is complete
+        # B/project-merge is complete
+        self.assertBuilds([
+            dict(name='project-test1', changes='1,1'),
+            dict(name='project-test2', changes='1,1'),
+            dict(name='project-test1', changes='1,1 2,1'),
+            dict(name='project-test2', changes='1,1 2,1'),
+        ])
+
+        # Release project-test1 for A which will fail.  This will
+        # abort both running B jobs and relaunch project-merge for B.
+        self.builds[0].release()
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
-        self.waitUntilSettled()
+        self.orderedRelease()
+        self.assertHistory([
+            dict(name='project-merge', result='SUCCESS', changes='1,1'),
+            dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
+            dict(name='project-test1', result='FAILURE', changes='1,1'),
+            dict(name='project-test1', result='ABORTED', changes='1,1 2,1'),
+            dict(name='project-test2', result='ABORTED', changes='1,1 2,1'),
+            dict(name='project-test2', result='SUCCESS', changes='1,1'),
+            dict(name='project-merge', result='SUCCESS', changes='2,1'),
+            dict(name='project-test1', result='SUCCESS', changes='2,1'),
+            dict(name='project-test2', result='SUCCESS', changes='2,1'),
+        ])
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
-
-        self.waitUntilSettled()
-        # It's certain that the merge job for change 2 will run, but
-        # the test1 and test2 jobs may or may not run.
-        self.assertTrue(len(self.history) > 6)
         self.assertEqual(A.data['status'], 'NEW')
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
         self.assertEqual(B.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_independent_queues(self):
         "Test that changes end up in the right queues"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -241,17 +274,17 @@
         self.assertTrue(self.job_has_changes(self.builds[1], B))
 
         # Release the current merge builds
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
         # Release the merge job for project2 which is behind project1
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # All the test builds should be running:
         # project1 (3) + project2 (3) + project (2) = 8
         self.assertEqual(len(self.builds), 8)
 
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 0)
 
@@ -263,22 +296,23 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_failed_change_at_head(self):
         "Test that if a change at the head fails, jobs behind it are canceled"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -286,11 +320,11 @@
         self.assertEqual(self.builds[0].name, 'project-merge')
         self.assertTrue(self.job_has_changes(self.builds[0], A))
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 6)
@@ -308,8 +342,8 @@
         self.assertEqual(len(self.builds), 2)
         self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 4)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 0)
@@ -321,30 +355,31 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_failed_change_in_middle(self):
         "Test a failed change in the middle of the queue"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project-test1', B)
+        self.launch_server.failJob('project-test1', B)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 6)
@@ -364,7 +399,7 @@
         self.assertEqual(len(self.builds), 4)
         self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 2)
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # project-test1 and project-test2 for A
@@ -384,8 +419,8 @@
         self.assertEqual(self.countJobResults(builds, 'SUCCESS'), 1)
         self.assertEqual(self.countJobResults(builds, None), 2)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 0)
@@ -397,6 +432,7 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_failed_change_at_head_with_queue(self):
         "Test that if a change at the head fails, queued jobs are canceled"
 
@@ -404,15 +440,15 @@
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
         queue = self.gearman_server.getQueue()
@@ -459,11 +495,12 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def _test_time_database(self, iteration):
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         time.sleep(2)
 
@@ -489,40 +526,42 @@
             self.assertTrue(found_job['estimated_time'] >= 2)
             self.assertIsNotNone(found_job['remaining_time'])
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_time_database(self):
         "Test the time database"
 
         self._test_time_database(1)
         self._test_time_database(2)
 
+    @skip("Disabled for early v3 development")
     def test_two_failed_changes_at_head(self):
         "Test that changes are reparented correctly if 2 fail at head"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project-test1', A)
-        self.worker.addFailTest('project-test1', B)
+        self.launch_server.failJob('project-test1', A)
+        self.launch_server.failJob('project-test1', B)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 6)
@@ -545,7 +584,7 @@
         self.waitUntilSettled()
 
         # restart of C after B failure
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 5)
@@ -570,9 +609,9 @@
         self.waitUntilSettled()
 
         # restart of B,C after A failure
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 4)
@@ -591,8 +630,8 @@
         self.assertTrue(self.job_has_changes(self.builds[2], B))
         self.assertTrue(self.job_has_changes(self.builds[2], C))
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 0)
@@ -604,6 +643,7 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_parse_skip_if(self):
         job_yaml = """
 jobs:
@@ -634,14 +674,15 @@
         matcher = self.sched._parseSkipIf(config_job)
         self.assertEqual(expected, matcher)
 
+    @skip("Disabled for early v3 development")
     def test_patch_order(self):
         "Test that dependent patches are tested in the right order"
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
         M2 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M2')
         M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
@@ -658,7 +699,7 @@
         A.setDependsOn(M1, 1)
         M1.setDependsOn(M2, 1)
 
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -666,8 +707,8 @@
         self.assertEqual(B.data['status'], 'NEW')
         self.assertEqual(C.data['status'], 'NEW')
 
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
 
         self.waitUntilSettled()
         self.assertEqual(M2.queried, 0)
@@ -678,6 +719,7 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_needed_changes_enqueue(self):
         "Test that a needed change is enqueued ahead"
         #          A      Given a git tree like this, if we enqueue
@@ -702,14 +744,14 @@
         F.setDependsOn(B, 1)
         G.setDependsOn(A, 1)
 
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
-        E.addApproval('CRVW', 2)
-        F.addApproval('CRVW', 2)
-        G.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
+        E.addApproval('code-review', 2)
+        F.addApproval('code-review', 2)
+        G.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -730,20 +772,20 @@
         for connection in self.connections.values():
             connection.maintainCache([])
 
-        self.worker.hold_jobs_in_build = True
-        A.addApproval('APRV', 1)
-        B.addApproval('APRV', 1)
-        D.addApproval('APRV', 1)
-        E.addApproval('APRV', 1)
-        F.addApproval('APRV', 1)
-        G.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.launch_server.hold_jobs_in_build = True
+        A.addApproval('approved', 1)
+        B.addApproval('approved', 1)
+        D.addApproval('approved', 1)
+        E.addApproval('approved', 1)
+        F.addApproval('approved', 1)
+        G.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         for x in range(8):
-            self.worker.release('.*-merge')
+            self.launch_server.release('.*-merge')
             self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -763,15 +805,16 @@
         self.assertEqual(self.history[6].changes,
                          '1,1 2,1 3,1 4,1 5,1 6,1 7,1')
 
+    @skip("Disabled for early v3 development")
     def test_source_cache(self):
         "Test that the source cache operates correctly"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         X = self.fake_gerrit.addFakeChange('org/project', 'master', 'X')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
         M1.setMerged()
@@ -779,7 +822,7 @@
         B.setDependsOn(A, 1)
         A.setDependsOn(M1, 1)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.fake_gerrit.addEvent(X.getPatchsetCreatedEvent(1))
 
         self.waitUntilSettled()
@@ -793,15 +836,15 @@
                 build.release()
         self.waitUntilSettled()
 
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.log.debug("len %s" % self.fake_gerrit._change_cache.keys())
         # there should still be changes in the cache
         self.assertNotEqual(len(self.fake_gerrit._change_cache.keys()), 0)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -809,6 +852,7 @@
         self.assertEqual(A.queried, 2)  # Initial and isMerged
         self.assertEqual(B.queried, 3)  # Initial A, refresh from B, isMerged
 
+    @skip("Disabled for early v3 development")
     def test_can_merge(self):
         "Test whether a change is ready to merge"
         # TODO: move to test_gerrit (this is a unit test!)
@@ -818,14 +862,15 @@
         mgr = self.sched.layout.pipelines['gate'].manager
         self.assertFalse(source.canMerge(a, mgr.getSubmitAllowNeeds()))
 
-        A.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
         a = source._getChange(1, 2, refresh=True)
         self.assertFalse(source.canMerge(a, mgr.getSubmitAllowNeeds()))
 
-        A.addApproval('APRV', 1)
+        A.addApproval('approved', 1)
         a = source._getChange(1, 2, refresh=True)
         self.assertTrue(source.canMerge(a, mgr.getSubmitAllowNeeds()))
 
+    @skip("Disabled for early v3 development")
     def test_build_configuration(self):
         "Test that zuul merges the right commits for testing"
 
@@ -833,12 +878,12 @@
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.gearman_server.release('.*-merge')
@@ -860,6 +905,7 @@
         correct_messages = ['initial commit', 'A-1', 'B-1', 'C-1']
         self.assertEqual(repo_messages, correct_messages)
 
+    @skip("Disabled for early v3 development")
     def test_build_configuration_conflict(self):
         "Test that merge conflicts are handled"
 
@@ -872,12 +918,12 @@
         B.addPatchset(['conflict'])
         C = self.fake_gerrit.addFakeChange('org/conflict-project',
                                            'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.reported, 1)
@@ -905,6 +951,7 @@
         self.assertEqual(C.reported, 2)
         self.assertEqual(len(self.history), 6)
 
+    @skip("Disabled for early v3 development")
     def test_post(self):
         "Test that post jobs run"
 
@@ -927,6 +974,7 @@
         self.assertEqual(len(self.history), 1)
         self.assertIn('project-post', job_names)
 
+    @skip("Disabled for early v3 development")
     def test_post_ignore_deletes(self):
         "Test that deleting refs does not trigger post jobs"
 
@@ -949,11 +997,12 @@
         self.assertEqual(len(self.history), 0)
         self.assertNotIn('project-post', job_names)
 
+    @skip("Disabled for early v3 development")
     def test_post_ignore_deletes_negative(self):
         "Test that deleting refs does trigger post jobs"
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-dont-ignore-deletes.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-dont-ignore-deletes.yaml')
         self.sched.reconfigure(self.config)
 
         e = {
@@ -975,6 +1024,7 @@
         self.assertEqual(len(self.history), 1)
         self.assertIn('project-post', job_names)
 
+    @skip("Disabled for early v3 development")
     def test_build_configuration_branch(self):
         "Test that the right commits are on alternate branches"
 
@@ -982,12 +1032,12 @@
         A = self.fake_gerrit.addFakeChange('org/project', 'mp', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'mp', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'mp', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.gearman_server.release('.*-merge')
@@ -1009,6 +1059,7 @@
         correct_messages = ['initial commit', 'mp commit', 'A-1', 'B-1', 'C-1']
         self.assertEqual(repo_messages, correct_messages)
 
+    @skip("Disabled for early v3 development")
     def test_build_configuration_branch_interaction(self):
         "Test that switching between branches works"
         self.test_build_configuration()
@@ -1019,6 +1070,7 @@
         repo.heads.master.commit = repo.commit('init')
         self.test_build_configuration()
 
+    @skip("Disabled for early v3 development")
     def test_build_configuration_multi_branch(self):
         "Test that dependent changes on multiple branches are merged"
 
@@ -1026,12 +1078,12 @@
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'mp', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
         queue = self.gearman_server.getQueue()
         job_A = None
@@ -1101,16 +1153,17 @@
         self.assertNotEqual(ref_A, ref_B, ref_C)
         self.assertNotEqual(commit_A, commit_B, commit_C)
 
+    @skip("Disabled for early v3 development")
     def test_one_job_project(self):
         "Test that queueing works with one job"
         A = self.fake_gerrit.addFakeChange('org/one-job-project',
                                            'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/one-job-project',
                                            'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -1118,6 +1171,7 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_job_from_templates_launched(self):
         "Test whether a job generated via a template can be launched"
 
@@ -1131,6 +1185,7 @@
         self.assertEqual(self.getJobFromHistory('project-test2').result,
                          'SUCCESS')
 
+    @skip("Disabled for early v3 development")
     def test_layered_templates(self):
         "Test whether a job generated via a template can be launched"
 
@@ -1152,15 +1207,16 @@
         self.assertEqual(self.getJobFromHistory('project-test6').result,
                          'SUCCESS')
 
+    @skip("Disabled for early v3 development")
     def test_dependent_changes_dequeue(self):
         "Test that dependent patches are not needlessly tested"
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
         M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
         M1.setMerged()
@@ -1171,11 +1227,11 @@
         B.setDependsOn(A, 1)
         A.setDependsOn(M1, 1)
 
-        self.worker.addFailTest('project-merge', A)
+        self.launch_server.failJob('project-merge', A)
 
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -1187,52 +1243,53 @@
         self.assertEqual(C.reported, 2)
         self.assertEqual(len(self.history), 1)
 
+    @skip("Disabled for early v3 development")
     def test_failing_dependent_changes(self):
         "Test that failing dependent patches are taken out of stream"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
         D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
         E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
-        E.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
+        E.addApproval('code-review', 2)
 
         # E, D -> C -> B, A
 
         D.setDependsOn(C, 1)
         C.setDependsOn(B, 1)
 
-        self.worker.addFailTest('project-test1', B)
+        self.launch_server.failJob('project-test1', B)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(E.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(D.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(E.addApproval('approved', 1))
 
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
+        self.launch_server.hold_jobs_in_build = False
         for build in self.builds:
             if build.parameters['ZUUL_CHANGE'] != '1':
                 build.release()
                 self.waitUntilSettled()
 
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -1252,26 +1309,27 @@
         self.assertIn('Build succeeded', E.messages[1])
         self.assertEqual(len(self.history), 18)
 
+    @skip("Disabled for early v3 development")
     def test_head_is_dequeued_once(self):
         "Test that if a change at the head fails it is dequeued only once"
         # If it's dequeued more than once, we should see extra
         # aborted jobs.
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.worker.addFailTest('project1-test1', A)
-        self.worker.addFailTest('project1-test2', A)
-        self.worker.addFailTest('project1-project2-integration', A)
+        self.launch_server.failJob('project1-test1', A)
+        self.launch_server.failJob('project1-test2', A)
+        self.launch_server.failJob('project1-project2-integration', A)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -1279,11 +1337,11 @@
         self.assertEqual(self.builds[0].name, 'project1-merge')
         self.assertTrue(self.job_has_changes(self.builds[0], A))
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 9)
@@ -1303,8 +1361,8 @@
         self.assertEqual(len(self.builds), 3)  # test2,integration, merge for B
         self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 6)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 0)
@@ -1317,14 +1375,15 @@
         self.assertEqual(B.reported, 2)
         self.assertEqual(C.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_nonvoting_job(self):
         "Test that non-voting jobs don't vote."
 
         A = self.fake_gerrit.addFakeChange('org/nonvoting-project',
                                            'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.worker.addFailTest('nonvoting-project-test2', A)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.launch_server.failJob('nonvoting-project-test2', A)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -1343,6 +1402,7 @@
         for build in self.builds:
             self.assertEqual(build.parameters['ZUUL_VOTING'], '0')
 
+    @skip("Disabled for early v3 development")
     def test_check_queue_success(self):
         "Test successful check queue jobs."
 
@@ -1360,11 +1420,12 @@
         self.assertEqual(self.getJobFromHistory('project-test2').result,
                          'SUCCESS')
 
+    @skip("Disabled for early v3 development")
     def test_check_queue_failure(self):
         "Test failed check queue jobs."
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        self.worker.addFailTest('project-test2', A)
+        self.launch_server.failJob('project-test2', A)
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
 
         self.waitUntilSettled()
@@ -1378,12 +1439,13 @@
         self.assertEqual(self.getJobFromHistory('project-test2').result,
                          'FAILURE')
 
+    @skip("Disabled for early v3 development")
     def test_dependent_behind_dequeue(self):
         "test that dependent changes behind dequeued changes work"
         # This complicated test is a reproduction of a real life bug
         self.sched.reconfigure(self.config)
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
@@ -1392,44 +1454,44 @@
         F = self.fake_gerrit.addFakeChange('org/project3', 'master', 'F')
         D.setDependsOn(C, 1)
         E.setDependsOn(D, 1)
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
-        E.addApproval('CRVW', 2)
-        F.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
+        E.addApproval('code-review', 2)
+        F.addApproval('code-review', 2)
 
         A.fail_merge = True
 
         # Change object re-use in the gerrit trigger is hidden if
         # changes are added in quick succession; waiting makes it more
         # like real life.
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(D.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.fake_gerrit.addEvent(E.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(E.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.fake_gerrit.addEvent(F.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(F.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # all jobs running
@@ -1443,8 +1505,8 @@
         c.release()
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -1464,12 +1526,13 @@
         self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 15)
         self.assertEqual(len(self.history), 44)
 
+    @skip("Disabled for early v3 development")
     def test_merger_repack(self):
         "Test that the merger works after a repack"
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -1486,8 +1549,8 @@
         print(repack_repo(path))
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -1498,6 +1561,7 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_merger_repack_large_change(self):
         "Test that the merger works with large changes after a repack"
         # https://bugs.launchpad.net/zuul/+bug/1078946
@@ -1512,8 +1576,8 @@
         path = os.path.join(self.git_root, "org/project1")
         print(repack_repo(path))
 
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project1-merge').result,
                          'SUCCESS')
@@ -1524,6 +1588,7 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_nonexistent_job(self):
         "Test launching a job that doesn't exist"
         # Set to the state immediately after a restart
@@ -1531,8 +1596,8 @@
         self.launcher.negative_function_cache_ttl = 0
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         # There may be a thread about to report a lost change
         while A.reported < 2:
             self.waitUntilSettled()
@@ -1545,8 +1610,8 @@
         # Make sure things still work:
         self.registerJobs()
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -1557,6 +1622,7 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_single_nonexistent_post_job(self):
         "Test launching a single post job that doesn't exist"
         e = {
@@ -1580,10 +1646,11 @@
 
         self.assertEqual(len(self.history), 0)
 
+    @skip("Disabled for early v3 development")
     def test_new_patchset_dequeues_old(self):
         "Test that a new patchset causes the old to be dequeued"
         # D -> C (depends on B) -> B (depends on A) -> A -> M
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         M = self.fake_gerrit.addFakeChange('org/project', 'master', 'M')
         M.setMerged()
 
@@ -1591,27 +1658,27 @@
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
         D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
 
         C.setDependsOn(B, 1)
         B.setDependsOn(A, 1)
         A.setDependsOn(M, 1)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(D.addApproval('approved', 1))
         self.waitUntilSettled()
 
         B.addPatchset()
         self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -1624,10 +1691,11 @@
         self.assertEqual(D.reported, 2)
         self.assertEqual(len(self.history), 9)  # 3 each for A, B, D.
 
+    @skip("Disabled for early v3 development")
     def test_new_patchset_check(self):
         "Test a new patchset in check"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
@@ -1704,8 +1772,8 @@
         self.waitUntilSettled()
         self.builds[0].release()
         self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.reported, 1)
@@ -1719,14 +1787,15 @@
         self.assertEqual(self.history[3].result, 'SUCCESS')
         self.assertEqual(self.history[3].changes, '1,1 2,2')
 
+    @skip("Disabled for early v3 development")
     def test_abandoned_gate(self):
         "Test that an abandoned change is dequeued from gate"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 1, "One job being built (on hold)")
         self.assertEqual(self.builds[0].name, 'project-merge')
@@ -1734,7 +1803,7 @@
         self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 0, "No job running")
@@ -1744,10 +1813,11 @@
         self.assertEqual(A.reported, 1,
                          "Abandoned gate change should report only start")
 
+    @skip("Disabled for early v3 development")
     def test_abandoned_check(self):
         "Test that an abandoned change is dequeued from check"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
@@ -1787,8 +1857,8 @@
         self.assertEqual(items[1].change.number, '2')
         self.assertTrue(items[1].live)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.history), 4)
@@ -1797,14 +1867,15 @@
         self.assertEqual(A.reported, 0, "Abandoned change should not report")
         self.assertEqual(B.reported, 1, "Change should report")
 
+    @skip("Disabled for early v3 development")
     def test_abandoned_not_timer(self):
         "Test that an abandoned change does not cancel timer jobs"
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         # Start timer trigger - also org/project
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-idle.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-idle.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
         # The pipeline triggers every second, so we should have seen
@@ -1813,8 +1884,8 @@
         self.waitUntilSettled()
         # Stop queuing timer triggered jobs so that the assertions
         # below don't race against more jobs being queued.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-no-timer.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
         self.assertEqual(len(self.builds), 2, "Two timer jobs")
@@ -1829,58 +1900,60 @@
 
         self.assertEqual(len(self.builds), 2, "Two timer jobs remain")
 
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_zuul_url_return(self):
         "Test if ZUUL_URL is returning when zuul_url is set in zuul.conf"
         self.assertTrue(self.sched.config.has_option('merger', 'zuul_url'))
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 1)
         for build in self.builds:
             self.assertTrue('ZUUL_URL' in build.parameters)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_new_patchset_dequeues_old_on_head(self):
         "Test that a new patchset causes the old to be dequeued (at head)"
         # D -> C (depends on B) -> B (depends on A) -> A -> M
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         M = self.fake_gerrit.addFakeChange('org/project', 'master', 'M')
         M.setMerged()
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
         D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
 
         C.setDependsOn(B, 1)
         B.setDependsOn(A, 1)
         A.setDependsOn(M, 1)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(D.addApproval('approved', 1))
         self.waitUntilSettled()
 
         A.addPatchset()
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -1893,27 +1966,28 @@
         self.assertEqual(D.reported, 2)
         self.assertEqual(len(self.history), 7)
 
+    @skip("Disabled for early v3 development")
     def test_new_patchset_dequeues_old_without_dependents(self):
         "Test that a new patchset causes only the old to be dequeued"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         B.addPatchset()
         self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -1924,9 +1998,10 @@
         self.assertEqual(C.reported, 2)
         self.assertEqual(len(self.history), 9)
 
+    @skip("Disabled for early v3 development")
     def test_new_patchset_dequeues_old_independent_queue(self):
         "Test that a new patchset causes the old to be dequeued (independent)"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
@@ -1939,8 +2014,8 @@
         self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -1952,11 +2027,12 @@
         self.assertEqual(len(self.history), 10)
         self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
 
+    @skip("Disabled for early v3 development")
     def test_noop_job(self):
         "Test that the internal noop job works"
         A = self.fake_gerrit.addFakeChange('org/noop-project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(len(self.gearman_server.getQueue()), 0)
@@ -1965,6 +2041,7 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_no_job_project(self):
         "Test that reports with no jobs don't get sent"
         A = self.fake_gerrit.addFakeChange('org/no-jobs-project',
@@ -1982,9 +2059,10 @@
 
         self.assertEqual(len(self.history), 0)
 
+    @skip("Disabled for early v3 development")
     def test_zuul_refs(self):
         "Test that zuul refs exist and have the right changes"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         M1 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'M1')
         M1.setMerged()
         M2 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'M2')
@@ -1994,23 +2072,23 @@
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
         D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        D.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        D.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(D.addApproval('approved', 1))
 
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         a_zref = b_zref = c_zref = d_zref = None
@@ -2056,8 +2134,8 @@
         self.assertTrue(self.ref_has_change(d_zref, C))
         self.assertTrue(self.ref_has_change(d_zref, D))
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -2069,21 +2147,23 @@
         self.assertEqual(D.data['status'], 'MERGED')
         self.assertEqual(D.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_rerun_on_error(self):
         "Test that if a worker fails to run a job, it is run again"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.builds[0].run_error = True
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
         self.assertEqual(self.countJobResults(self.history, 'RUN_ERROR'), 1)
         self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 3)
 
+    @skip("Disabled for early v3 development")
     def test_statsd(self):
         "Test each of the statsd methods used in the scheduler"
         import extras
@@ -2095,6 +2175,7 @@
         self.assertReportedStat('test-timing', '3|ms')
         self.assertReportedStat('test-gauge', '12|g')
 
+    @skip("Disabled for early v3 development")
     def test_stuck_job_cleanup(self):
         "Test that pending jobs are cleaned up if removed from layout"
         # This job won't be registered at startup because it is not in
@@ -2104,13 +2185,13 @@
         self.worker.registerFunction('build:gate-noop')
         self.gearman_server.hold_jobs_in_queue = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(len(self.gearman_server.getQueue()), 1)
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-no-jobs.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-no-jobs.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
@@ -2123,6 +2204,7 @@
         self.assertEqual(self.history[0].name, 'gate-noop')
         self.assertEqual(self.history[0].result, 'SUCCESS')
 
+    @skip("Disabled for early v3 development")
     def test_file_head(self):
         # This is a regression test for an observed bug.  A change
         # with a file named "HEAD" in the root directory of the repo
@@ -2147,15 +2229,16 @@
         self.assertIn('Build succeeded', A.messages[0])
         self.assertIn('Build succeeded', B.messages[0])
 
+    @skip("Disabled for early v3 development")
     def test_file_jobs(self):
         "Test that file jobs run only when appropriate"
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         A.addPatchset(['pip-requires'])
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         testfile_jobs = [x for x in self.history
@@ -2168,10 +2251,11 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def _test_skip_if_jobs(self, branch, should_skip):
         "Test that jobs with a skip-if filter run only when appropriate"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-skip-if.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-skip-if.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -2189,35 +2273,21 @@
         else:
             self.assertIn(change.data['number'], tested_change_ids)
 
+    @skip("Disabled for early v3 development")
     def test_skip_if_match_skips_job(self):
         self._test_skip_if_jobs(branch='master', should_skip=True)
 
+    @skip("Disabled for early v3 development")
     def test_skip_if_no_match_runs_job(self):
         self._test_skip_if_jobs(branch='mp', should_skip=False)
 
+    @skip("Disabled for early v3 development")
     def test_test_config(self):
         "Test that we can test the config"
-        self.sched.testConfig(self.config.get('zuul', 'layout_config'),
+        self.sched.testConfig(self.config.get('zuul', 'tenant_config'),
                               self.connections)
 
-    def test_build_description(self):
-        "Test that build descriptions update"
-        self.worker.registerFunction('set_description:' +
-                                     self.worker.worker_id)
-
-        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.waitUntilSettled()
-        desc = self.history[0].description
-        self.log.debug("Description: %s" % desc)
-        self.assertTrue(re.search("Branch.*master", desc))
-        self.assertTrue(re.search("Pipeline.*gate", desc))
-        self.assertTrue(re.search("project-merge.*SUCCESS", desc))
-        self.assertTrue(re.search("project-test1.*SUCCESS", desc))
-        self.assertTrue(re.search("project-test2.*SUCCESS", desc))
-        self.assertTrue(re.search("Reported result.*SUCCESS", desc))
-
+    @skip("Disabled for early v3 development")
     def test_queue_names(self):
         "Test shared change queue names"
         project1 = self.sched.layout.projects['org/project1']
@@ -2227,21 +2297,22 @@
         self.assertEqual(q1.name, 'integration')
         self.assertEqual(q2.name, 'integration')
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-bad-queue.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-bad-queue.yaml')
         with testtools.ExpectedException(
             Exception, "More than one name assigned to change queue"):
             self.sched.reconfigure(self.config)
 
+    @skip("Disabled for early v3 development")
     def test_queue_precedence(self):
         "Test that queue precedence works"
 
         self.gearman_server.hold_jobs_in_queue = True
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
 
         self.waitUntilSettled()
         self.gearman_server.hold_jobs_in_queue = False
@@ -2250,7 +2321,7 @@
 
         # Run one build at a time to ensure non-race order:
         self.orderedRelease()
-        self.worker.hold_jobs_in_build = False
+        self.launch_server.hold_jobs_in_build = False
         self.waitUntilSettled()
 
         self.log.debug(self.history)
@@ -2261,15 +2332,16 @@
         self.assertEqual(self.history[4].pipeline, 'check')
         self.assertEqual(self.history[5].pipeline, 'check')
 
+    @skip("Disabled for early v3 development")
     def test_json_status(self):
         "Test that we can retrieve JSON status info"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('project-merge')
+        self.launch_server.release('project-merge')
         self.waitUntilSettled()
 
         port = self.webapp.server.socket.getsockname()[1]
@@ -2287,8 +2359,8 @@
         self.assertIn('Expires', headers)
         data = f.read()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         data = json.loads(data)
@@ -2323,20 +2395,22 @@
         self.assertEqual('http://logs.example.com/1/1/gate/project-test2/2',
                          status_jobs[2]['report_url'])
 
+    @skip("Disabled for early v3 development")
     def test_merging_queues(self):
         "Test that transitively-connected change queues are merged"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-merge-queues.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-merge-queues.yaml')
         self.sched.reconfigure(self.config)
         self.assertEqual(len(self.sched.layout.pipelines['gate'].queues), 1)
 
+    @skip("Disabled for early v3 development")
     def test_mutex(self):
         "Test job mutexes"
         self.config.set('zuul', 'layout_config',
                         'tests/fixtures/layout-mutex.yaml')
         self.sched.reconfigure(self.config)
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
@@ -2349,7 +2423,7 @@
         self.assertEqual(self.builds[1].name, 'mutex-one')
         self.assertEqual(self.builds[2].name, 'project-test1')
 
-        self.worker.release('mutex-one')
+        self.launch_server.release('mutex-one')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
@@ -2358,7 +2432,7 @@
         self.assertEqual(self.builds[2].name, 'mutex-two')
         self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
 
-        self.worker.release('mutex-two')
+        self.launch_server.release('mutex-two')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
@@ -2367,7 +2441,7 @@
         self.assertEqual(self.builds[2].name, 'mutex-one')
         self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
 
-        self.worker.release('mutex-one')
+        self.launch_server.release('mutex-one')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 3)
@@ -2376,7 +2450,7 @@
         self.assertEqual(self.builds[2].name, 'mutex-two')
         self.assertTrue('test-mutex' in self.sched.mutex.mutexes)
 
-        self.worker.release('mutex-two')
+        self.launch_server.release('mutex-two')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 2)
@@ -2384,8 +2458,8 @@
         self.assertEqual(self.builds[1].name, 'project-test1')
         self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
 
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 0)
@@ -2394,13 +2468,14 @@
         self.assertEqual(B.reported, 1)
         self.assertFalse('test-mutex' in self.sched.mutex.mutexes)
 
+    @skip("Disabled for early v3 development")
     def test_node_label(self):
         "Test that a job runs on a specific node label"
         self.worker.registerFunction('build:node-project-test1:debian')
 
         A = self.fake_gerrit.addFakeChange('org/node-project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertIsNone(self.getJobFromHistory('node-project-merge').node)
@@ -2408,18 +2483,19 @@
                          'debian')
         self.assertIsNone(self.getJobFromHistory('node-project-test2').node)
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration(self):
         "Test that live reconfiguration works"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.sched.reconfigure(self.config)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -2430,13 +2506,14 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_merge_conflict(self):
         # A real-world bug: a change in a gate queue has a merge
         # conflict and a job is added to its project while it's
         # sitting in the queue.  The job gets added to the change and
         # enqueued and the change gets stuck.
         self.worker.registerFunction('build:project-test3')
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         # This change is fine.  It's here to stop the queue long
         # enough for the next change to be subject to the
@@ -2444,17 +2521,17 @@
         # next change.  This change will succeed and merge.
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         A.addPatchset(['conflict'])
-        A.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
 
         # This change will be in merge conflict.  During the
         # reconfiguration, we will add a job.  We want to make sure
         # that doesn't cause it to get stuck.
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         B.addPatchset(['conflict'])
-        B.addApproval('CRVW', 2)
+        B.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -2466,14 +2543,13 @@
         self.assertEqual(len(self.history), 0)
 
         # Add the "project-test3" job.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-add-job.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-add-job.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -2490,31 +2566,32 @@
                          'SUCCESS')
         self.assertEqual(len(self.history), 4)
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_failed_root(self):
         # An extrapolation of test_live_reconfiguration_merge_conflict
         # that tests a job added to a job tree with a failed root does
         # not run.
         self.worker.registerFunction('build:project-test3')
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         # This change is fine.  It's here to stop the queue long
         # enough for the next change to be subject to the
         # reconfiguration.  This change will succeed and merge.
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         A.addPatchset(['conflict'])
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        self.worker.addFailTest('project-merge', B)
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.launch_server.failJob('project-merge', B)
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Both -merge jobs have run, but no others.
@@ -2529,14 +2606,13 @@
         self.assertEqual(len(self.history), 2)
 
         # Add the "project-test3" job.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-add-job.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-add-job.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -2552,6 +2628,7 @@
         self.assertEqual(self.history[4].result, 'SUCCESS')
         self.assertEqual(len(self.history), 5)
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_failed_job(self):
         # Test that a change with a removed failing job does not
         # disrupt reconfiguration.  If a change has a failed job and
@@ -2559,18 +2636,18 @@
         # bug where the code to re-set build statuses would run on
         # that build and raise an exception because the job no longer
         # existed.
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
 
         # This change will fail and later be removed by the reconfiguration.
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('project-test1')
+        self.launch_server.release('project-test1')
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -2583,14 +2660,13 @@
         self.assertEqual(len(self.history), 2)
 
         # Remove the test1 job.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-failed-job.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-failed-job.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(self.getJobFromHistory('project-test2').result,
@@ -2605,22 +2681,23 @@
         # Ensure the removed job was not included in the report.
         self.assertNotIn('project-test1', A.messages[0])
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_shared_queue(self):
         # Test that a change with a failing job which was removed from
         # this project but otherwise still exists in the system does
         # not disrupt reconfiguration.
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
 
-        self.worker.addFailTest('project1-project2-integration', A)
+        self.launch_server.failJob('project1-project2-integration', A)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('project1-project2-integration')
+        self.launch_server.release('project1-project2-integration')
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -2633,14 +2710,13 @@
         self.assertEqual(len(self.history), 2)
 
         # Remove the integration job.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-shared-queue.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-shared-queue.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(self.getJobFromHistory('project1-merge').result,
@@ -2659,6 +2735,7 @@
         # Ensure the removed job was not included in the report.
         self.assertNotIn('project1-project2-integration', A.messages[0])
 
+    @skip("Disabled for early v3 development")
     def test_double_live_reconfiguration_shared_queue(self):
         # This was a real-world regression.  A change is added to
         # gate; a reconfigure happens, a second change which depends
@@ -2667,18 +2744,18 @@
 
         # A failure may indicate incorrect caching or cleaning up of
         # references during a reconfiguration.
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         B.setDependsOn(A, 1)
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         # Add the parent change.
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Reconfigure (with only one change in the pipeline).
@@ -2686,17 +2763,17 @@
         self.waitUntilSettled()
 
         # Add the child change.
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Reconfigure (with both in the pipeline).
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.history), 8)
@@ -2706,11 +2783,12 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_del_project(self):
         # Test project deletion from layout
         # while changes are enqueued
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
@@ -2718,19 +2796,18 @@
         # A Depends-On: B
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             A.subject, B.data['id'])
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
         self.assertEqual(len(self.builds), 5)
 
         # This layout defines only org/project, not org/project1
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-del-project.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-del-project.yaml')
         self.sched.reconfigure(self.config)
         self.waitUntilSettled()
 
@@ -2740,8 +2817,8 @@
         self.assertEqual(job_c.changes, '3,1')
         self.assertEqual(job_c.result, 'ABORTED')
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(self.getJobFromHistory('project-test1').changes,
@@ -2757,13 +2834,14 @@
         self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
         self.assertIn('Build succeeded', A.messages[0])
 
+    @skip("Disabled for early v3 development")
     def test_live_reconfiguration_functions(self):
         "Test live reconfiguration with a custom function"
         self.worker.registerFunction('build:node-project-test1:debian')
         self.worker.registerFunction('build:node-project-test1:wheezy')
         A = self.fake_gerrit.addFakeChange('org/node-project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertIsNone(self.getJobFromHistory('node-project-merge').node)
@@ -2771,15 +2849,14 @@
                          'debian')
         self.assertIsNone(self.getJobFromHistory('node-project-test2').node)
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-live-'
-                        'reconfiguration-functions.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-live-reconfiguration-functions.yaml')
         self.sched.reconfigure(self.config)
         self.worker.build_history = []
 
         B = self.fake_gerrit.addFakeChange('org/node-project', 'master', 'B')
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertIsNone(self.getJobFromHistory('node-project-merge').node)
@@ -2787,16 +2864,17 @@
                          'wheezy')
         self.assertIsNone(self.getJobFromHistory('node-project-test2').node)
 
+    @skip("Disabled for early v3 development")
     def test_delayed_repo_init(self):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-delayed-repo-init.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-delayed-repo-init.yaml')
         self.sched.reconfigure(self.config)
 
         self.init_repo("org/new-project")
         A = self.fake_gerrit.addFakeChange('org/new-project', 'master', 'A')
 
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -2807,16 +2885,17 @@
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_repo_deleted(self):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-repo-deleted.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-repo-deleted.yaml')
         self.sched.reconfigure(self.config)
 
         self.init_repo("org/delete-project")
         A = self.fake_gerrit.addFakeChange('org/delete-project', 'master', 'A')
 
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -2832,8 +2911,8 @@
 
         B = self.fake_gerrit.addFakeChange('org/delete-project', 'master', 'B')
 
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
         self.assertEqual(self.getJobFromHistory('project-merge').result,
                          'SUCCESS')
@@ -2844,6 +2923,7 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_tags(self):
         "Test job tags"
         self.config.set('zuul', 'layout_config',
@@ -2863,11 +2943,12 @@
             self.assertEqual(results.get(build.name, ''),
                              build.parameters.get('BUILD_TAGS'))
 
+    @skip("Disabled for early v3 development")
     def test_timer(self):
         "Test that a periodic job is triggered"
-        self.worker.hold_jobs_in_build = True
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-timer.yaml')
+        self.launch_server.hold_jobs_in_build = True
+        self.updateConfigLayout(
+            'tests/fixtures/layout-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -2884,14 +2965,14 @@
         f = urllib.request.urlopen(req)
         data = f.read()
 
-        self.worker.hold_jobs_in_build = False
+        self.launch_server.hold_jobs_in_build = False
         # Stop queuing timer triggered jobs so that the assertions
         # below don't race against more jobs being queued.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-no-timer.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(self.getJobFromHistory(
@@ -2911,16 +2992,17 @@
         self.assertIn('project-bitrot-stable-old', status_jobs)
         self.assertIn('project-bitrot-stable-older', status_jobs)
 
+    @skip("Disabled for early v3 development")
     def test_idle(self):
         "Test that frequent periodic jobs work"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         for x in range(1, 3):
             # Test that timer triggers periodic jobs even across
             # layout config reloads.
             # Start timer trigger
-            self.config.set('zuul', 'layout_config',
-                            'tests/fixtures/layout-idle.yaml')
+            self.updateConfigLayout(
+                'tests/fixtures/layout-idle.yaml')
             self.sched.reconfigure(self.config)
             self.registerJobs()
             self.waitUntilSettled()
@@ -2931,21 +3013,22 @@
 
             # Stop queuing timer triggered jobs so that the assertions
             # below don't race against more jobs being queued.
-            self.config.set('zuul', 'layout_config',
-                            'tests/fixtures/layout-no-timer.yaml')
+            self.updateConfigLayout(
+                'tests/fixtures/layout-no-timer.yaml')
             self.sched.reconfigure(self.config)
             self.registerJobs()
             self.waitUntilSettled()
 
             self.assertEqual(len(self.builds), 2)
-            self.worker.release('.*')
+            self.launch_server.release('.*')
             self.waitUntilSettled()
             self.assertEqual(len(self.builds), 0)
             self.assertEqual(len(self.history), x * 2)
 
+    @skip("Disabled for early v3 development")
     def test_check_smtp_pool(self):
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-smtp.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-smtp.yaml')
         self.sched.reconfigure(self.config)
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -2974,11 +3057,12 @@
         self.assertEqual(A.messages[0],
                          self.smtp_messages[1]['body'])
 
+    @skip("Disabled for early v3 development")
     def test_timer_smtp(self):
         "Test that a periodic job is triggered"
-        self.worker.hold_jobs_in_build = True
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-timer-smtp.yaml')
+        self.launch_server.hold_jobs_in_build = True
+        self.updateConfigLayout(
+            'tests/fixtures/layout-timer-smtp.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -2988,7 +3072,7 @@
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 2)
-        self.worker.release('.*')
+        self.launch_server.release('.*')
         self.waitUntilSettled()
         self.assertEqual(len(self.history), 2)
 
@@ -3012,19 +3096,20 @@
 
         # Stop queuing timer triggered jobs and let any that may have
         # queued through so that end of test assertions pass.
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-no-timer.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-no-timer.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
         self.waitUntilSettled()
-        self.worker.release('.*')
+        self.launch_server.release('.*')
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_client_enqueue_change(self):
         "Test that the RPC client can enqueue a change"
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        A.addApproval('APRV', 1)
+        A.addApproval('code-review', 2)
+        A.addApproval('approved', 1)
 
         client = zuul.rpcclient.RPCClient('127.0.0.1',
                                           self.gearman_server.port)
@@ -3043,6 +3128,7 @@
         self.assertEqual(A.reported, 2)
         self.assertEqual(r, True)
 
+    @skip("Disabled for early v3 development")
     def test_client_enqueue_ref(self):
         "Test that the RPC client can enqueue a ref"
 
@@ -3061,6 +3147,7 @@
         self.assertIn('project-post', job_names)
         self.assertEqual(r, True)
 
+    @skip("Disabled for early v3 development")
     def test_client_enqueue_negative(self):
         "Test that the RPC client returns errors"
         client = zuul.rpcclient.RPCClient('127.0.0.1',
@@ -3105,19 +3192,20 @@
         self.assertEqual(len(self.history), 0)
         self.assertEqual(len(self.builds), 0)
 
+    @skip("Disabled for early v3 development")
     def test_client_promote(self):
         "Test that the RPC client can promote a change"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -3138,11 +3226,11 @@
                 enqueue_times[str(item.change)], item.enqueue_time)
 
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 6)
@@ -3165,7 +3253,7 @@
         self.assertTrue(self.job_has_changes(self.builds[4], C))
         self.assertTrue(self.job_has_changes(self.builds[4], A))
 
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -3178,24 +3266,25 @@
         client.shutdown()
         self.assertEqual(r, True)
 
+    @skip("Disabled for early v3 development")
     def test_client_promote_dependent(self):
         "Test that the RPC client can promote a dependent change"
         # C (depends on B) -> B -> A ; then promote C to get:
         # A -> C (depends on B) -> B
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
 
         C.setDependsOn(B, 1)
 
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
 
         self.waitUntilSettled()
 
@@ -3205,11 +3294,11 @@
                            change_ids=['3,1'])
 
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         self.assertEqual(len(self.builds), 6)
@@ -3232,7 +3321,7 @@
         self.assertTrue(self.job_has_changes(self.builds[4], C))
         self.assertTrue(self.job_has_changes(self.builds[4], A))
 
-        self.worker.release()
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -3245,12 +3334,13 @@
         client.shutdown()
         self.assertEqual(r, True)
 
+    @skip("Disabled for early v3 development")
     def test_client_promote_negative(self):
         "Test that the RPC client returns errors for promotion"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         client = zuul.rpcclient.RPCClient('127.0.0.1',
@@ -3268,30 +3358,31 @@
             client.shutdown()
             self.assertEqual(r, False)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_queue_rate_limiting(self):
         "Test that DependentPipelines are rate limited with dep across window"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-rate-limit.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-rate-limit.yaml')
         self.sched.reconfigure(self.config)
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
 
         C.setDependsOn(B, 1)
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         # Only A and B will have their merge jobs queued because
@@ -3300,9 +3391,9 @@
         self.assertEqual(self.builds[0].name, 'project-merge')
         self.assertEqual(self.builds[1].name, 'project-merge')
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Only A and B will have their test jobs queued because
@@ -3313,7 +3404,7 @@
         self.assertEqual(self.builds[2].name, 'project-test1')
         self.assertEqual(self.builds[3].name, 'project-test2')
 
-        self.worker.release('project-.*')
+        self.launch_server.release('project-.*')
         self.waitUntilSettled()
 
         queue = self.sched.layout.pipelines['gate'].queues[0]
@@ -3327,7 +3418,7 @@
         self.assertEqual(len(self.builds), 1)
         self.assertEqual(self.builds[0].name, 'project-merge')
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Only B's test jobs are queued because window is still 1.
@@ -3335,7 +3426,7 @@
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test2')
 
-        self.worker.release('project-.*')
+        self.launch_server.release('project-.*')
         self.waitUntilSettled()
 
         # B was successfully merged so window is increased to 2.
@@ -3347,7 +3438,7 @@
         self.assertEqual(len(self.builds), 1)
         self.assertEqual(self.builds[0].name, 'project-merge')
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # After successful merge job the test jobs for C are queued.
@@ -3355,7 +3446,7 @@
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test2')
 
-        self.worker.release('project-.*')
+        self.launch_server.release('project-.*')
         self.waitUntilSettled()
 
         # C successfully merged so window is bumped to 3.
@@ -3363,27 +3454,28 @@
         self.assertEqual(queue.window_floor, 1)
         self.assertEqual(C.data['status'], 'MERGED')
 
+    @skip("Disabled for early v3 development")
     def test_queue_rate_limiting_dependent(self):
         "Test that DependentPipelines are rate limited with dep in window"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-rate-limit.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-rate-limit.yaml')
         self.sched.reconfigure(self.config)
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
 
         B.setDependsOn(A, 1)
 
-        self.worker.addFailTest('project-test1', A)
+        self.launch_server.failJob('project-test1', A)
 
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         # Only A and B will have their merge jobs queued because
@@ -3392,9 +3484,9 @@
         self.assertEqual(self.builds[0].name, 'project-merge')
         self.assertEqual(self.builds[1].name, 'project-merge')
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Only A and B will have their test jobs queued because
@@ -3405,7 +3497,7 @@
         self.assertEqual(self.builds[2].name, 'project-test1')
         self.assertEqual(self.builds[3].name, 'project-test2')
 
-        self.worker.release('project-.*')
+        self.launch_server.release('project-.*')
         self.waitUntilSettled()
 
         queue = self.sched.layout.pipelines['gate'].queues[0]
@@ -3420,7 +3512,7 @@
         self.assertEqual(len(self.builds), 1)
         self.assertEqual(self.builds[0].name, 'project-merge')
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
 
         # Only C's test jobs are queued because window is still 1.
@@ -3428,7 +3520,7 @@
         self.assertEqual(self.builds[0].name, 'project-test1')
         self.assertEqual(self.builds[1].name, 'project-test2')
 
-        self.worker.release('project-.*')
+        self.launch_server.release('project-.*')
         self.waitUntilSettled()
 
         # C was successfully merged so window is increased to 2.
@@ -3436,13 +3528,14 @@
         self.assertEqual(queue.window_floor, 1)
         self.assertEqual(C.data['status'], 'MERGED')
 
+    @skip("Disabled for early v3 development")
     def test_worker_update_metadata(self):
         "Test if a worker can send back metadata about itself"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(len(self.launcher.builds), 1)
@@ -3470,26 +3563,27 @@
         self.assertEqual("v1.1", build.worker.version)
         self.assertEqual({'something': 'else'}, build.worker.extra)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_footer_message(self):
         "Test a pipeline's footer message is correctly added to the report."
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-footer-message.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-footer-message.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.worker.addFailTest('test1', A)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.launch_server.failJob('test1', A)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(2, len(self.smtp_messages))
@@ -3514,11 +3608,12 @@
         self.assertEqual(failure_body, self.smtp_messages[0]['body'])
         self.assertEqual(success_body, self.smtp_messages[1]['body'])
 
+    @skip("Disabled for early v3 development")
     def test_merge_failure_reporters(self):
         """Check that the config is set up correctly"""
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-merge-failure.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-merge-failure.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -3559,19 +3654,20 @@
             )
         )
 
+    @skip("Disabled for early v3 development")
     def test_merge_failure_reports(self):
         """Check that when a change fails to merge the correct message is sent
         to the correct reporter"""
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-merge-failure.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-merge-failure.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
         # Check a test failure isn't reported to SMTP
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.worker.addFailTest('project-test1', A)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.launch_server.failJob('project-test1', A)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(3, len(self.history))  # 3 jobs
@@ -3583,10 +3679,10 @@
         B.addPatchset(['conflict'])
         C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
         C.addPatchset(['conflict'])
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(C.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(6, len(self.history))  # A and B jobs
@@ -3594,6 +3690,7 @@
         self.assertEqual('The merge failed! For more information...',
                          self.smtp_messages[0]['body'])
 
+    @skip("Disabled for early v3 development")
     def test_default_merge_failure_reports(self):
         """Check that the default merge failure reports are correct."""
 
@@ -3602,10 +3699,10 @@
         A.addPatchset(['conflict'])
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
         B.addPatchset(['conflict'])
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(3, len(self.history))  # A jobs
@@ -3619,18 +3716,19 @@
         self.assertNotIn('logs.example.com', B.messages[1])
         self.assertNotIn('SKIPPED', B.messages[1])
 
+    @skip("Disabled for early v3 development")
     def test_swift_instructions(self):
         "Test that the correct swift instructions are sent to the workers"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-swift.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-swift.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
 
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(
@@ -3660,16 +3758,17 @@
                              parameters['SWIFT_MOSTLY_HMAC_BODY'].split('\n')))
         self.assertIn('SWIFT_MOSTLY_SIGNATURE', self.builds[1].parameters)
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
+    @skip("Disabled for early v3 development")
     def test_client_get_running_jobs(self):
         "Test that the RPC client can get a list of running jobs"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
-        A.addApproval('CRVW', 2)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         client = zuul.rpcclient.RPCClient('127.0.0.1',
@@ -3715,13 +3814,14 @@
                 self.assertEqual('gate', job['pipeline'])
                 break
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         running_items = client.get_running_jobs()
         self.assertEqual(0, len(running_items))
 
+    @skip("Disabled for early v3 development")
     def test_nonvoting_pipeline(self):
         "Test that a nonvoting pipeline (experimental) can still report"
 
@@ -3734,12 +3834,13 @@
             'SUCCESS')
         self.assertEqual(A.reported, 1)
 
+    @skip("Disabled for early v3 development")
     def test_crd_gate(self):
         "Test cross-repo dependencies"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         AM2 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM2')
         AM1 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM1')
@@ -3767,7 +3868,7 @@
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             A.subject, B.data['id'])
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -3776,17 +3877,17 @@
         for connection in self.connections.values():
             connection.maintainCache([])
 
-        self.worker.hold_jobs_in_build = True
-        B.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.launch_server.hold_jobs_in_build = True
+        B.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(AM2.queried, 0)
@@ -3799,34 +3900,35 @@
         self.assertEqual(self.getJobFromHistory('project1-merge').changes,
                          '2,1 1,1')
 
+    @skip("Disabled for early v3 development")
     def test_crd_branch(self):
         "Test cross-repo dependencies in multiple branches"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project2', 'mp', 'C')
         C.data['id'] = B.data['id']
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
         # A Depends-On: B+C
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             A.subject, B.data['id'])
 
-        self.worker.hold_jobs_in_build = True
-        B.addApproval('APRV', 1)
-        C.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.launch_server.hold_jobs_in_build = True
+        B.addApproval('approved', 1)
+        C.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -3839,33 +3941,34 @@
         self.assertEqual(self.getJobFromHistory('project1-merge').changes,
                          '2,1 3,1 1,1')
 
+    @skip("Disabled for early v3 development")
     def test_crd_multiline(self):
         "Test multiple depends-on lines in commit"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
         C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
-        C.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
+        C.addApproval('code-review', 2)
 
         # A Depends-On: B+C
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
             A.subject, B.data['id'], C.data['id'])
 
-        self.worker.hold_jobs_in_build = True
-        B.addApproval('APRV', 1)
-        C.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.launch_server.hold_jobs_in_build = True
+        B.addApproval('approved', 1)
+        C.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -3878,12 +3981,13 @@
         self.assertEqual(self.getJobFromHistory('project1-merge').changes,
                          '2,1 3,1 1,1')
 
+    @skip("Disabled for early v3 development")
     def test_crd_unshared_gate(self):
         "Test cross-repo dependencies in unshared gate queues"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         # A Depends-On: B
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
@@ -3891,8 +3995,8 @@
 
         # A and B do not share a queue, make sure that A is unable to
         # enqueue B (and therefore, A is unable to be enqueued).
-        B.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        B.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -3902,7 +4006,7 @@
         self.assertEqual(len(self.history), 0)
 
         # Enqueue and merge B alone.
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(B.data['status'], 'MERGED')
@@ -3910,41 +4014,42 @@
 
         # Now that B is merged, A should be able to be enqueued and
         # merged.
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
         self.assertEqual(A.reported, 2)
 
+    @skip("Disabled for early v3 development")
     def test_crd_gate_reverse(self):
         "Test reverse cross-repo dependencies"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         # A Depends-On: B
 
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             A.subject, B.data['id'])
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
         self.assertEqual(B.data['status'], 'NEW')
 
-        self.worker.hold_jobs_in_build = True
-        A.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.launch_server.hold_jobs_in_build = True
+        A.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         self.waitUntilSettled()
 
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.release('.*-merge')
+        self.launch_server.release('.*-merge')
         self.waitUntilSettled()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -3955,12 +4060,13 @@
         self.assertEqual(self.getJobFromHistory('project1-merge').changes,
                          '2,1 1,1')
 
+    @skip("Disabled for early v3 development")
     def test_crd_cycle(self):
         "Test cross-repo dependency cycles"
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         # A -> B -> A (via commit-depends)
 
@@ -3969,7 +4075,7 @@
         B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             B.subject, A.data['id'])
 
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.reported, 0)
@@ -3977,20 +4083,21 @@
         self.assertEqual(A.data['status'], 'NEW')
         self.assertEqual(B.data['status'], 'NEW')
 
+    @skip("Disabled for early v3 development")
     def test_crd_gate_unknown(self):
         "Test unknown projects in dependent pipeline"
         self.init_repo("org/unknown")
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'B')
-        A.addApproval('CRVW', 2)
-        B.addApproval('CRVW', 2)
+        A.addApproval('code-review', 2)
+        B.addApproval('code-review', 2)
 
         # A Depends-On: B
         A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
             A.subject, B.data['id'])
 
-        B.addApproval('APRV', 1)
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        B.addApproval('approved', 1)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         # Unknown projects cannot share a queue with any other
@@ -4004,14 +4111,14 @@
         self.assertEqual(len(self.history), 0)
 
         # Simulate change B being gated outside this layout
-        self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
         B.setMerged()
         self.waitUntilSettled()
         self.assertEqual(len(self.history), 0)
 
         # Now that B is merged, A should be able to be enqueued and
         # merged.
-        self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'MERGED')
@@ -4019,6 +4126,7 @@
         self.assertEqual(B.data['status'], 'MERGED')
         self.assertEqual(B.reported, 0)
 
+    @skip("Disabled for early v3 development")
     def test_crd_check(self):
         "Test cross-repo dependencies in independent pipelines"
 
@@ -4061,6 +4169,7 @@
         self.assertEqual(self.history[0].changes, '2,1 1,1')
         self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_git_depends(self):
         "Test single-repo dependencies in independent pipelines"
         self.gearman_server.hold_jobs_in_build = True
@@ -4091,9 +4200,10 @@
         self.assertIn('Build succeeded', A.messages[0])
         self.assertIn('Build succeeded', B.messages[0])
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_duplicate(self):
         "Test duplicate check in independent pipelines"
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
         B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
         check_pipeline = self.sched.layout.pipelines['check']
@@ -4117,8 +4227,8 @@
         # Release jobs in order to avoid races with change A jobs
         # finishing before change B jobs.
         self.orderedRelease()
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(A.data['status'], 'NEW')
@@ -4133,6 +4243,7 @@
         self.assertIn('Build succeeded', A.messages[0])
         self.assertIn('Build succeeded', B.messages[0])
 
+    @skip("Disabled for early v3 development")
     def _test_crd_check_reconfiguration(self, project1, project2):
         "Test cross-repo dependencies re-enqueued in independent pipelines"
 
@@ -4171,9 +4282,11 @@
         self.assertEqual(self.history[0].changes, '2,1 1,1')
         self.assertEqual(len(self.sched.layout.pipelines['check'].queues), 0)
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_reconfiguration(self):
         self._test_crd_check_reconfiguration('org/project1', 'org/project2')
 
+    @skip("Disabled for early v3 development")
     def test_crd_undefined_project(self):
         """Test that undefined projects in dependencies are handled for
         independent pipelines"""
@@ -4182,10 +4295,11 @@
         self.init_repo("org/unknown")
         self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_ignore_dependencies(self):
         "Test cross-repo dependencies can be ignored"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-ignore-dependencies.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-ignore-dependencies.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -4227,6 +4341,7 @@
         for job in self.history:
             self.assertEqual(len(job.changes.split()), 1)
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_transitive(self):
         "Test transitive cross-repo dependencies"
         # Specifically, if A -> B -> C, and C gets a new patchset and
@@ -4267,6 +4382,7 @@
         self.waitUntilSettled()
         self.assertEqual(self.history[-1].changes, '3,2 2,1 1,2')
 
+    @skip("Disabled for early v3 development")
     def test_crd_check_unknown(self):
         "Test unknown projects in independent pipeline"
         self.init_repo("org/unknown")
@@ -4286,6 +4402,7 @@
         self.assertEqual(B.data['status'], 'NEW')
         self.assertEqual(B.reported, 0)
 
+    @skip("Disabled for early v3 development")
     def test_crd_cycle_join(self):
         "Test an updated change creates a cycle"
         A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
@@ -4325,11 +4442,12 @@
         source._getChange(u'1', u'2', True)
         source._getChange(u'2', u'2', True)
 
+    @skip("Disabled for early v3 development")
     def test_disable_at(self):
         "Test a pipeline will only report to the disabled trigger when failing"
 
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-disable-at.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-disable-at.yaml')
         self.sched.reconfigure(self.config)
 
         self.assertEqual(3, self.sched.layout.pipelines['check'].disable_at)
@@ -4349,17 +4467,17 @@
         J = self.fake_gerrit.addFakeChange('org/project', 'master', 'J')
         K = self.fake_gerrit.addFakeChange('org/project', 'master', 'K')
 
-        self.worker.addFailTest('project-test1', A)
-        self.worker.addFailTest('project-test1', B)
+        self.launch_server.failJob('project-test1', A)
+        self.launch_server.failJob('project-test1', B)
         # Let C pass, resetting the counter
-        self.worker.addFailTest('project-test1', D)
-        self.worker.addFailTest('project-test1', E)
-        self.worker.addFailTest('project-test1', F)
-        self.worker.addFailTest('project-test1', G)
-        self.worker.addFailTest('project-test1', H)
+        self.launch_server.failJob('project-test1', D)
+        self.launch_server.failJob('project-test1', E)
+        self.launch_server.failJob('project-test1', F)
+        self.launch_server.failJob('project-test1', G)
+        self.launch_server.failJob('project-test1', H)
         # I also passes but should only report to the disabled reporters
-        self.worker.addFailTest('project-test1', J)
-        self.worker.addFailTest('project-test1', K)
+        self.launch_server.failJob('project-test1', J)
+        self.launch_server.failJob('project-test1', K)
 
         self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
         self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
@@ -4447,6 +4565,7 @@
         # No more messages reported via smtp
         self.assertEqual(3, len(self.smtp_messages))
 
+    @skip("Disabled for early v3 development")
     def test_success_pattern(self):
         "Ensure bad build params are ignored"
 
@@ -4455,7 +4574,7 @@
         self.config.set('zuul', 'layout_config',
                         'tests/fixtures/layout-success-pattern.yaml')
         self.sched.reconfigure(self.config)
-        self.worker.hold_jobs_in_build = True
+        self.launch_server.hold_jobs_in_build = True
         self.registerJobs()
 
         A = self.fake_gerrit.addFakeChange('org/docs', 'master', 'A')
@@ -4466,8 +4585,8 @@
         self.assertEqual(len(self.builds), 1)
         uuid = self.builds[0].unique[:7]
 
-        self.worker.hold_jobs_in_build = False
-        self.worker.release()
+        self.launch_server.hold_jobs_in_build = False
+        self.launch_server.release()
         self.waitUntilSettled()
 
         self.assertEqual(len(self.smtp_messages), 1)
diff --git a/tests/test_v3.py b/tests/test_v3.py
new file mode 100644
index 0000000..8f4e27e
--- /dev/null
+++ b/tests/test_v3.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import textwrap
+
+from tests.base import AnsibleZuulTestCase
+
+logging.basicConfig(level=logging.DEBUG,
+                    format='%(asctime)s %(name)-32s '
+                    '%(levelname)-8s %(message)s')
+
+
+class TestMultipleTenants(AnsibleZuulTestCase):
+    # A temporary class to hold new tests while others are disabled
+
+    tenant_config_file = 'config/multi-tenant/main.yaml'
+
+    def test_multiple_tenants(self):
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertEqual(self.getJobFromHistory('project1-test1').result,
+                         'SUCCESS')
+        self.assertEqual(self.getJobFromHistory('python27').result,
+                         'SUCCESS')
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2,
+                         "A should report start and success")
+        self.assertIn('tenant-one-gate', A.messages[1],
+                      "A should transit tenant-one gate")
+        self.assertNotIn('tenant-two-gate', A.messages[1],
+                         "A should *not* transit tenant-two gate")
+
+        B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertEqual(self.getJobFromHistory('python27',
+                                                'org/project2').result,
+                         'SUCCESS')
+        self.assertEqual(self.getJobFromHistory('project2-test1').result,
+                         'SUCCESS')
+        self.assertEqual(B.data['status'], 'MERGED')
+        self.assertEqual(B.reported, 2,
+                         "B should report start and success")
+        self.assertIn('tenant-two-gate', B.messages[1],
+                      "B should transit tenant-two gate")
+        self.assertNotIn('tenant-one-gate', B.messages[1],
+                         "B should *not* transit tenant-one gate")
+
+        self.assertEqual(A.reported, 2, "Activity in tenant two should"
+                         "not affect tenant one")
+
+
+class TestInRepoConfig(AnsibleZuulTestCase):
+    # A temporary class to hold new tests while others are disabled
+
+    tenant_config_file = 'config/in-repo/main.yaml'
+
+    def test_in_repo_config(self):
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertEqual(self.getJobFromHistory('project-test1').result,
+                         'SUCCESS')
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2,
+                         "A should report start and success")
+        self.assertIn('tenant-one-gate', A.messages[1],
+                      "A should transit tenant-one gate")
+
+    def test_dynamic_config(self):
+        in_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: project-test2
+
+            - project:
+                name: org/project
+                tenant-one-gate:
+                  jobs:
+                    - project-test2
+            """)
+
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+                                           files={'.zuul.yaml': in_repo_conf})
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertEqual(self.getJobFromHistory('project-test2').result,
+                         'SUCCESS')
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2,
+                         "A should report start and success")
+        self.assertIn('tenant-one-gate', A.messages[1],
+                      "A should transit tenant-one gate")
+
+
+class TestProjectTemplate(AnsibleZuulTestCase):
+    tenant_config_file = 'config/project-template/main.yaml'
+
+    def test(self):
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.waitUntilSettled()
+        self.assertEqual(self.getJobFromHistory('project-test1').result,
+                         'SUCCESS')
+        self.assertEqual(self.getJobFromHistory('project-test2').result,
+                         'SUCCESS')
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.assertEqual(A.reported, 2,
+                         "A should report start and success")
+        self.assertIn('gate', A.messages[1],
+                      "A should transit gate")
diff --git a/tests/test_webapp.py b/tests/test_webapp.py
index 94f097a..555c08f 100644
--- a/tests/test_webapp.py
+++ b/tests/test_webapp.py
@@ -30,6 +30,8 @@
         self.waitUntilSettled()
 
     def setUp(self):
+        self.skip("Disabled for early v3 development")
+
         super(TestWebapp, self).setUp()
         self.addCleanup(self._cleanup)
         self.worker.hold_jobs_in_build = True
diff --git a/tests/test_zuultrigger.py b/tests/test_zuultrigger.py
index 0d52fc9..0442c2f 100644
--- a/tests/test_zuultrigger.py
+++ b/tests/test_zuultrigger.py
@@ -26,10 +26,13 @@
 class TestZuulTrigger(ZuulTestCase):
     """Test Zuul Trigger"""
 
+    def setUp(self):
+        self.skip("Disabled for early v3 development")
+
     def test_zuul_trigger_parent_change_enqueued(self):
         "Test Zuul trigger event: parent-change-enqueued"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-zuultrigger-enqueued.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-zuultrigger-enqueued.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
@@ -74,8 +77,8 @@
 
     def test_zuul_trigger_project_change_merged(self):
         "Test Zuul trigger event: project-change-merged"
-        self.config.set('zuul', 'layout_config',
-                        'tests/fixtures/layout-zuultrigger-merged.yaml')
+        self.updateConfigLayout(
+            'tests/fixtures/layout-zuultrigger-merged.yaml')
         self.sched.reconfigure(self.config)
         self.registerJobs()
 
diff --git a/tox.ini b/tox.ini
index 06ccbcd..1f6b39e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,7 +10,7 @@
          VIRTUAL_ENV={envdir}
          OS_TEST_TIMEOUT=30
          OS_LOG_DEFAULTS={env:OS_LOG_DEFAULTS:gear.Server=INFO,gear.Client=INFO}
-passenv = ZUUL_TEST_ROOT
+passenv = ZUUL_TEST_ROOT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE
 usedevelop = True
 install_command = pip install {opts} {packages}
 deps = -r{toxinidir}/requirements.txt
diff --git a/zuul/cmd/__init__.py b/zuul/cmd/__init__.py
index 5ffd431..3102f3b 100644
--- a/zuul/cmd/__init__.py
+++ b/zuul/cmd/__init__.py
@@ -91,5 +91,5 @@
             logging.basicConfig(level=logging.DEBUG)
 
     def configure_connections(self):
-        self.connections = zuul.lib.connections.configure_connections(
-            self.config)
+        self.connections = zuul.lib.connections.ConnectionRegistry()
+        self.connections.configure(self.config)
diff --git a/zuul/cmd/server.py b/zuul/cmd/server.py
index 0b7538d..ff9f2d9 100755
--- a/zuul/cmd/server.py
+++ b/zuul/cmd/server.py
@@ -147,8 +147,9 @@
     def main(self):
         # See comment at top of file about zuul imports
         import zuul.scheduler
-        import zuul.launcher.gearman
+        import zuul.launcher.client
         import zuul.merger.client
+        import zuul.nodepool
         import zuul.lib.swift
         import zuul.webapp
         import zuul.rpclistener
@@ -165,9 +166,10 @@
         # TODO(jhesketh): Move swift into a connection?
         self.swift = zuul.lib.swift.Swift(self.config)
 
-        gearman = zuul.launcher.gearman.Gearman(self.config, self.sched,
-                                                self.swift)
+        gearman = zuul.launcher.client.LaunchClient(self.config, self.sched,
+                                                    self.swift)
         merger = zuul.merger.client.MergeClient(self.config, self.sched)
+        nodepool = zuul.nodepool.Nodepool(self.sched)
 
         if self.config.has_option('zuul', 'status_expiry'):
             cache_expiry = self.config.getint('zuul', 'status_expiry')
@@ -192,6 +194,7 @@
         self.configure_connections()
         self.sched.setLauncher(gearman)
         self.sched.setMerger(merger)
+        self.sched.setNodepool(nodepool)
 
         self.log.info('Starting scheduler')
         self.sched.start()
diff --git a/zuul/configloader.py b/zuul/configloader.py
new file mode 100644
index 0000000..bc2f7fc
--- /dev/null
+++ b/zuul/configloader.py
@@ -0,0 +1,646 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import logging
+import six
+import yaml
+
+import voluptuous as vs
+
+from zuul import model
+import zuul.manager.dependent
+import zuul.manager.independent
+from zuul import change_matcher
+
+
+# Several forms accept either a single item or a list, this makes
+# specifying that in the schema easy (and explicit).
+def to_list(x):
+    return vs.Any([x], x)
+
+
+def as_list(item):
+    if not item:
+        return []
+    if isinstance(item, list):
+        return item
+    return [item]
+
+
+class JobParser(object):
+    @staticmethod
+    def getSchema():
+        swift_tmpurl = {vs.Required('name'): str,
+                        'container': str,
+                        'expiry': int,
+                        'max_file_size': int,
+                        'max-file-size': int,
+                        'max_file_count': int,
+                        'max-file-count': int,
+                        'logserver_prefix': str,
+                        'logserver-prefix': str,
+                        }
+
+        password = {str: str}
+
+        auth = {'password': to_list(password),
+                'inherit': bool,
+                'swift-tmpurl': to_list(swift_tmpurl),
+                }
+
+        node = {vs.Required('name'): str,
+                vs.Required('image'): str,
+                }
+
+        job = {vs.Required('name'): str,
+               'parent': str,
+               'queue-name': str,
+               'failure-message': str,
+               'success-message': str,
+               'failure-url': str,
+               'success-url': str,
+               'hold-following-changes': bool,
+               'voting': bool,
+               'mutex': str,
+               'tags': to_list(str),
+               'branches': to_list(str),
+               'files': to_list(str),
+               'auth': to_list(auth),
+               'irrelevant-files': to_list(str),
+               'nodes': [node],
+               'timeout': int,
+               '_source_project': model.Project,
+               }
+
+        return vs.Schema(job)
+
+    @staticmethod
+    def fromYaml(layout, conf):
+        JobParser.getSchema()(conf)
+        job = model.Job(conf['name'])
+        if 'auth' in conf:
+            job.auth = conf.get('auth')
+        if 'parent' in conf:
+            parent = layout.getJob(conf['parent'])
+            job.inheritFrom(parent)
+        job.timeout = conf.get('timeout', job.timeout)
+        job.workspace = conf.get('workspace', job.workspace)
+        job.pre_run = as_list(conf.get('pre-run', job.pre_run))
+        job.post_run = as_list(conf.get('post-run', job.post_run))
+        job.voting = conf.get('voting', True)
+        job.hold_following_changes = conf.get('hold-following-changes', False)
+        job.mutex = conf.get('mutex', None)
+        tags = conf.get('tags')
+        if tags:
+            # Tags are merged via a union rather than a
+            # destructive copy because they are intended to
+            # accumulate onto any previously applied tags from
+            # metajobs.
+            job.tags = job.tags.union(set(tags))
+        # This attribute may not be overridden -- it is always
+        # supplied by the config loader and is the Project instance of
+        # the repo where it originated.
+        job.source_project = conf.get('_source_project')
+        job.failure_message = conf.get('failure-message', job.failure_message)
+        job.success_message = conf.get('success-message', job.success_message)
+        job.failure_url = conf.get('failure-url', job.failure_url)
+        job.success_url = conf.get('success-url', job.success_url)
+
+        if 'branches' in conf:
+            matchers = []
+            for branch in as_list(conf['branches']):
+                matchers.append(change_matcher.BranchMatcher(branch))
+            job.branch_matcher = change_matcher.MatchAny(matchers)
+        if 'files' in conf:
+            matchers = []
+            for fn in as_list(conf['files']):
+                matchers.append(change_matcher.FileMatcher(fn))
+            job.file_matcher = change_matcher.MatchAny(matchers)
+        if 'irrelevant-files' in conf:
+            matchers = []
+            for fn in as_list(conf['irrelevant-files']):
+                matchers.append(change_matcher.FileMatcher(fn))
+            job.irrelevant_file_matcher = change_matcher.MatchAllFiles(
+                matchers)
+        return job
+
+
+class ProjectTemplateParser(object):
+    log = logging.getLogger("zuul.ProjectTemplateParser")
+
+    @staticmethod
+    def getSchema(layout):
+        project_template = {vs.Required('name'): str}
+        for p in layout.pipelines.values():
+            project_template[p.name] = {'queue': str,
+                                        'jobs': [vs.Any(str, dict)]}
+        return vs.Schema(project_template)
+
+    @staticmethod
+    def fromYaml(layout, conf):
+        ProjectTemplateParser.getSchema(layout)(conf)
+        project_template = model.ProjectConfig(conf['name'])
+        for pipeline in layout.pipelines.values():
+            conf_pipeline = conf.get(pipeline.name)
+            if not conf_pipeline:
+                continue
+            project_pipeline = model.ProjectPipelineConfig()
+            project_template.pipelines[pipeline.name] = project_pipeline
+            project_pipeline.queue_name = conf.get('queue')
+            project_pipeline.job_tree = ProjectTemplateParser._parseJobTree(
+                layout, conf_pipeline.get('jobs'))
+        return project_template
+
+    @staticmethod
+    def _parseJobTree(layout, conf, tree=None):
+        if not tree:
+            tree = model.JobTree(None)
+        for conf_job in conf:
+            if isinstance(conf_job, six.string_types):
+                tree.addJob(model.Job(conf_job))
+            elif isinstance(conf_job, dict):
+                # A dictionary in a job tree may override params, or
+                # be the root of a sub job tree, or both.
+                jobname, attrs = conf_job.items()[0]
+                jobs = attrs.pop('jobs', None)
+                if attrs:
+                    # We are overriding params, so make a new job def
+                    attrs['name'] = jobname
+                    subtree = tree.addJob(JobParser.fromYaml(layout, attrs))
+                else:
+                    # Not overriding, so get existing job
+                    subtree = tree.addJob(layout.getJob(jobname))
+
+                if jobs:
+                    # This is the root of a sub tree
+                    ProjectTemplateParser._parseJobTree(layout, jobs, subtree)
+            else:
+                raise Exception("Job must be a string or dictionary")
+        return tree
+
+
+class ProjectParser(object):
+    log = logging.getLogger("zuul.ProjectParser")
+
+    @staticmethod
+    def getSchema(layout):
+        project = {vs.Required('name'): str,
+                   'templates': [str]}
+        for p in layout.pipelines.values():
+            project[p.name] = {'queue': str,
+                               'jobs': [vs.Any(str, dict)]}
+        return vs.Schema(project)
+
+    @staticmethod
+    def fromYaml(layout, conf):
+        ProjectParser.getSchema(layout)(conf)
+        conf_templates = conf.pop('templates', [])
+        # The way we construct a project definition is by parsing the
+        # definition as a template, then applying all of the
+        # templates, including the newly parsed one, in order.
+        project_template = ProjectTemplateParser.fromYaml(layout, conf)
+        configs = [layout.project_templates[name] for name in conf_templates]
+        configs.append(project_template)
+        project = model.ProjectConfig(conf['name'])
+        for pipeline in layout.pipelines.values():
+            project_pipeline = model.ProjectPipelineConfig()
+            project_pipeline.job_tree = model.JobTree(None)
+            queue_name = None
+            # For every template, iterate over the job tree and replace or
+            # create the jobs in the final definition as needed.
+            pipeline_defined = False
+            for template in configs:
+                ProjectParser.log.debug("Applying template %s to pipeline %s" %
+                                        (template.name, pipeline.name))
+                if pipeline.name in template.pipelines:
+                    pipeline_defined = True
+                    template_pipeline = template.pipelines[pipeline.name]
+                    project_pipeline.job_tree.inheritFrom(
+                        template_pipeline.job_tree)
+                    if template_pipeline.queue_name:
+                        queue_name = template_pipeline.queue_name
+            if queue_name:
+                project_pipeline.queue_name = queue_name
+            if pipeline_defined:
+                project.pipelines[pipeline.name] = project_pipeline
+        return project
+
+
+class PipelineParser(object):
+    log = logging.getLogger("zuul.PipelineParser")
+
+    # A set of reporter configuration keys to action mapping
+    reporter_actions = {
+        'start': 'start_actions',
+        'success': 'success_actions',
+        'failure': 'failure_actions',
+        'merge-failure': 'merge_failure_actions',
+        'disabled': 'disabled_actions',
+    }
+
+    @staticmethod
+    def getDriverSchema(dtype, connections):
+        # TODO(jhesketh): Make the driver discovery dynamic
+        connection_drivers = {
+            'trigger': {
+                'gerrit': 'zuul.trigger.gerrit',
+            },
+            'reporter': {
+                'gerrit': 'zuul.reporter.gerrit',
+                'smtp': 'zuul.reporter.smtp',
+            },
+        }
+        standard_drivers = {
+            'trigger': {
+                'timer': 'zuul.trigger.timer',
+                'zuul': 'zuul.trigger.zuultrigger',
+            }
+        }
+
+        schema = {}
+        # Add the configured connections as available layout options
+        for connection_name, connection in connections.connections.items():
+            for dname, dmod in connection_drivers.get(dtype, {}).items():
+                if connection.driver_name == dname:
+                    schema[connection_name] = to_list(__import__(
+                        connection_drivers[dtype][dname],
+                        fromlist=['']).getSchema())
+
+        # Standard drivers are always available and don't require a unique
+        # (connection) name
+        for dname, dmod in standard_drivers.get(dtype, {}).items():
+            schema[dname] = to_list(__import__(
+                standard_drivers[dtype][dname], fromlist=['']).getSchema())
+
+        return schema
+
+    @staticmethod
+    def getSchema(layout, connections):
+        manager = vs.Any('independent',
+                         'dependent')
+
+        precedence = vs.Any('normal', 'low', 'high')
+
+        approval = vs.Schema({'username': str,
+                              'email-filter': str,
+                              'email': str,
+                              'older-than': str,
+                              'newer-than': str,
+                              }, extra=True)
+
+        require = {'approval': to_list(approval),
+                   'open': bool,
+                   'current-patchset': bool,
+                   'status': to_list(str)}
+
+        reject = {'approval': to_list(approval)}
+
+        window = vs.All(int, vs.Range(min=0))
+        window_floor = vs.All(int, vs.Range(min=1))
+        window_type = vs.Any('linear', 'exponential')
+        window_factor = vs.All(int, vs.Range(min=1))
+
+        pipeline = {vs.Required('name'): str,
+                    vs.Required('manager'): manager,
+                    'source': str,
+                    'precedence': precedence,
+                    'description': str,
+                    'require': require,
+                    'reject': reject,
+                    'success-message': str,
+                    'failure-message': str,
+                    'merge-failure-message': str,
+                    'footer-message': str,
+                    'dequeue-on-new-patchset': bool,
+                    'ignore-dependencies': bool,
+                    'disable-after-consecutive-failures':
+                        vs.All(int, vs.Range(min=1)),
+                    'window': window,
+                    'window-floor': window_floor,
+                    'window-increase-type': window_type,
+                    'window-increase-factor': window_factor,
+                    'window-decrease-type': window_type,
+                    'window-decrease-factor': window_factor,
+                    }
+        pipeline['trigger'] = vs.Required(
+            PipelineParser.getDriverSchema('trigger', connections))
+        for action in ['start', 'success', 'failure', 'merge-failure',
+                       'disabled']:
+            pipeline[action] = PipelineParser.getDriverSchema('reporter',
+                                                              connections)
+        return vs.Schema(pipeline)
+
+    @staticmethod
+    def fromYaml(layout, connections, scheduler, conf):
+        PipelineParser.getSchema(layout, connections)(conf)
+        pipeline = model.Pipeline(conf['name'], layout)
+        pipeline.description = conf.get('description')
+
+        pipeline.source = connections.getSource(conf['source'])
+
+        precedence = model.PRECEDENCE_MAP[conf.get('precedence')]
+        pipeline.precedence = precedence
+        pipeline.failure_message = conf.get('failure-message',
+                                            "Build failed.")
+        pipeline.merge_failure_message = conf.get(
+            'merge-failure-message', "Merge Failed.\n\nThis change or one "
+            "of its cross-repo dependencies was unable to be "
+            "automatically merged with the current state of its "
+            "repository. Please rebase the change and upload a new "
+            "patchset.")
+        pipeline.success_message = conf.get('success-message',
+                                            "Build succeeded.")
+        pipeline.footer_message = conf.get('footer-message', "")
+        pipeline.start_message = conf.get('start-message',
+                                          "Starting {pipeline.name} jobs.")
+        pipeline.dequeue_on_new_patchset = conf.get(
+            'dequeue-on-new-patchset', True)
+        pipeline.ignore_dependencies = conf.get(
+            'ignore-dependencies', False)
+
+        for conf_key, action in PipelineParser.reporter_actions.items():
+            reporter_set = []
+            if conf.get(conf_key):
+                for reporter_name, params \
+                    in conf.get(conf_key).items():
+                    reporter = connections.getReporter(reporter_name,
+                                                       params)
+                    reporter.setAction(conf_key)
+                    reporter_set.append(reporter)
+            setattr(pipeline, action, reporter_set)
+
+        # If merge-failure actions aren't explicit, use the failure actions
+        if not pipeline.merge_failure_actions:
+            pipeline.merge_failure_actions = pipeline.failure_actions
+
+        pipeline.disable_at = conf.get(
+            'disable-after-consecutive-failures', None)
+
+        pipeline.window = conf.get('window', 20)
+        pipeline.window_floor = conf.get('window-floor', 3)
+        pipeline.window_increase_type = conf.get(
+            'window-increase-type', 'linear')
+        pipeline.window_increase_factor = conf.get(
+            'window-increase-factor', 1)
+        pipeline.window_decrease_type = conf.get(
+            'window-decrease-type', 'exponential')
+        pipeline.window_decrease_factor = conf.get(
+            'window-decrease-factor', 2)
+
+        manager_name = conf['manager']
+        if manager_name == 'dependent':
+            manager = zuul.manager.dependent.DependentPipelineManager(
+                scheduler, pipeline)
+        elif manager_name == 'independent':
+            manager = zuul.manager.independent.IndependentPipelineManager(
+                scheduler, pipeline)
+
+        pipeline.setManager(manager)
+        layout.pipelines[conf['name']] = pipeline
+
+        if 'require' in conf or 'reject' in conf:
+            require = conf.get('require', {})
+            reject = conf.get('reject', {})
+            f = model.ChangeishFilter(
+                open=require.get('open'),
+                current_patchset=require.get('current-patchset'),
+                statuses=to_list(require.get('status')),
+                required_approvals=to_list(require.get('approval')),
+                reject_approvals=to_list(reject.get('approval'))
+            )
+            manager.changeish_filters.append(f)
+
+        for trigger_name, trigger_config\
+            in conf.get('trigger').items():
+            trigger = connections.getTrigger(trigger_name, trigger_config)
+            pipeline.triggers.append(trigger)
+
+            # TODO: move
+            manager.event_filters += trigger.getEventFilters(
+                conf['trigger'][trigger_name])
+
+        return pipeline
+
+
+class TenantParser(object):
+    log = logging.getLogger("zuul.TenantParser")
+
+    tenant_source = vs.Schema({'config-repos': [str],
+                               'project-repos': [str]})
+
+    @staticmethod
+    def validateTenantSources(connections):
+        def v(value, path=[]):
+            if isinstance(value, dict):
+                for k, val in value.items():
+                    connections.getSource(k)
+                    TenantParser.validateTenantSource(val, path + [k])
+            else:
+                raise vs.Invalid("Invalid tenant source", path)
+        return v
+
+    @staticmethod
+    def validateTenantSource(value, path=[]):
+        TenantParser.tenant_source(value)
+
+    @staticmethod
+    def getSchema(connections=None):
+        tenant = {vs.Required('name'): str,
+                  'source': TenantParser.validateTenantSources(connections)}
+        return vs.Schema(tenant)
+
+    @staticmethod
+    def fromYaml(base, connections, scheduler, merger, conf):
+        TenantParser.getSchema(connections)(conf)
+        tenant = model.Tenant(conf['name'])
+        unparsed_config = model.UnparsedTenantConfig()
+        tenant.config_repos, tenant.project_repos = \
+            TenantParser._loadTenantConfigRepos(connections, conf)
+        tenant.config_repos_config, tenant.project_repos_config = \
+            TenantParser._loadTenantInRepoLayouts(
+                merger, connections, tenant.config_repos, tenant.project_repos)
+        unparsed_config.extend(tenant.config_repos_config)
+        unparsed_config.extend(tenant.project_repos_config)
+        tenant.layout = TenantParser._parseLayout(base, unparsed_config,
+                                                  scheduler, connections)
+        tenant.layout.tenant = tenant
+        return tenant
+
+    @staticmethod
+    def _loadTenantConfigRepos(connections, conf_tenant):
+        config_repos = []
+        project_repos = []
+
+        for source_name, conf_source in conf_tenant.get('source', {}).items():
+            source = connections.getSource(source_name)
+
+            for conf_repo in conf_source.get('config-repos', []):
+                project = source.getProject(conf_repo)
+                config_repos.append((source, project))
+
+            for conf_repo in conf_source.get('project-repos', []):
+                project = source.getProject(conf_repo)
+                project_repos.append((source, project))
+
+        return config_repos, project_repos
+
+    @staticmethod
+    def _loadTenantInRepoLayouts(merger, connections, config_repos,
+                                 project_repos):
+        config_repos_config = model.UnparsedTenantConfig()
+        project_repos_config = model.UnparsedTenantConfig()
+        jobs = []
+
+        for (source, project) in config_repos:
+            # Get main config files.  These files are permitted the
+            # full range of configuration.
+            url = source.getGitUrl(project)
+            job = merger.getFiles(project.name, url, 'master',
+                                  files=['zuul.yaml', '.zuul.yaml'])
+            job.project = project
+            job.config_repo = True
+            jobs.append(job)
+
+        for (source, project) in project_repos:
+            # Get in-project-repo config files which have a restricted
+            # set of options.
+            url = source.getGitUrl(project)
+            # TODOv3(jeblair): config should be branch specific
+            job = merger.getFiles(project.name, url, 'master',
+                                  files=['.zuul.yaml'])
+            job.project = project
+            job.config_repo = False
+            jobs.append(job)
+
+        for job in jobs:
+            # Note: this is an ordered list -- we wait for cat jobs to
+            # complete in the order they were launched which is the
+            # same order they were defined in the main config file.
+            # This is important for correct inheritance.
+            TenantParser.log.debug("Waiting for cat job %s" % (job,))
+            job.wait()
+            for fn in ['zuul.yaml', '.zuul.yaml']:
+                if job.files.get(fn):
+                    TenantParser.log.info(
+                        "Loading configuration from %s/%s" %
+                        (job.project, fn))
+                    if job.config_repo:
+                        incdata = TenantParser._parseConfigRepoLayout(
+                            job.files[fn], job.project)
+                        config_repos_config.extend(incdata)
+                    else:
+                        incdata = TenantParser._parseProjectRepoLayout(
+                            job.files[fn], job.project)
+                        project_repos_config.extend(incdata)
+                    job.project.unparsed_config = incdata
+        return config_repos_config, project_repos_config
+
+    @staticmethod
+    def _parseConfigRepoLayout(data, project):
+        # This is the top-level configuration for a tenant.
+        config = model.UnparsedTenantConfig()
+        config.extend(yaml.load(data), project)
+
+        return config
+
+    @staticmethod
+    def _parseProjectRepoLayout(data, project):
+        # TODOv3(jeblair): this should implement some rules to protect
+        # aspects of the config that should not be changed in-repo
+        config = model.UnparsedTenantConfig()
+        config.extend(yaml.load(data), project)
+
+        return config
+
+    @staticmethod
+    def _parseLayout(base, data, scheduler, connections):
+        layout = model.Layout()
+
+        for config_pipeline in data.pipelines:
+            layout.addPipeline(PipelineParser.fromYaml(layout, connections,
+                                                       scheduler,
+                                                       config_pipeline))
+
+        for config_job in data.jobs:
+            layout.addJob(JobParser.fromYaml(layout, config_job))
+
+        for config_template in data.project_templates:
+            layout.addProjectTemplate(ProjectTemplateParser.fromYaml(
+                layout, config_template))
+
+        for config_project in data.projects:
+            layout.addProjectConfig(ProjectParser.fromYaml(
+                layout, config_project))
+
+        for pipeline in layout.pipelines.values():
+            pipeline.manager._postConfig(layout)
+
+        return layout
+
+
+class ConfigLoader(object):
+    log = logging.getLogger("zuul.ConfigLoader")
+
+    def expandConfigPath(self, config_path):
+        if config_path:
+            config_path = os.path.expanduser(config_path)
+        if not os.path.exists(config_path):
+            raise Exception("Unable to read tenant config file at %s" %
+                            config_path)
+        return config_path
+
+    def loadConfig(self, config_path, scheduler, merger, connections):
+        abide = model.Abide()
+
+        config_path = self.expandConfigPath(config_path)
+        with open(config_path) as config_file:
+            self.log.info("Loading configuration from %s" % (config_path,))
+            data = yaml.load(config_file)
+        config = model.UnparsedAbideConfig()
+        config.extend(data)
+        base = os.path.dirname(os.path.realpath(config_path))
+
+        for conf_tenant in config.tenants:
+            tenant = TenantParser.fromYaml(base, connections, scheduler,
+                                           merger, conf_tenant)
+            abide.tenants[tenant.name] = tenant
+        return abide
+
+    def createDynamicLayout(self, tenant, files):
+        config = tenant.config_repos_config.copy()
+        for source, project in tenant.project_repos:
+            # TODOv3(jeblair): config should be branch specific
+            data = files.getFile(project.name, 'master', '.zuul.yaml')
+            if not data:
+                data = project.unparsed_config
+            if not data:
+                continue
+            incdata = TenantParser._parseProjectRepoLayout(data, project)
+            config.extend(incdata)
+
+        layout = model.Layout()
+        # TODOv3(jeblair): copying the pipelines could be dangerous/confusing.
+        layout.pipelines = tenant.layout.pipelines
+
+        for config_job in config.jobs:
+            layout.addJob(JobParser.fromYaml(layout, config_job))
+
+        for config_template in config.project_templates:
+            layout.addProjectTemplate(ProjectTemplateParser.fromYaml(
+                layout, config_template))
+
+        for config_project in config.projects:
+            layout.addProjectConfig(ProjectParser.fromYaml(
+                layout, config_project), update_pipeline=False)
+
+        return layout
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
index 066b4db..9b439a9 100644
--- a/zuul/connection/__init__.py
+++ b/zuul/connection/__init__.py
@@ -43,14 +43,6 @@
         self.connection_name = connection_name
         self.connection_config = connection_config
 
-        # Keep track of the sources, triggers and reporters using this
-        # connection
-        self.attached_to = {
-            'source': [],
-            'trigger': [],
-            'reporter': [],
-        }
-
     def onLoad(self):
         pass
 
@@ -60,9 +52,6 @@
     def registerScheduler(self, sched):
         self.sched = sched
 
-    def registerUse(self, what, instance):
-        self.attached_to[what].append(instance)
-
     def maintainCache(self, relevant):
         """Make cache contain relevant changes.
 
diff --git a/zuul/connection/gerrit.py b/zuul/connection/gerrit.py
index 62891cd..bf77bff 100644
--- a/zuul/connection/gerrit.py
+++ b/zuul/connection/gerrit.py
@@ -13,9 +13,10 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import threading
-import select
 import json
+import re
+import select
+import threading
 import time
 from six.moves import queue as Queue
 from six.moves import urllib
@@ -25,7 +26,22 @@
 import voluptuous as v
 
 from zuul.connection import BaseConnection
-from zuul.model import TriggerEvent
+from zuul.model import TriggerEvent, Project, Change, Ref, NullChange
+from zuul import exceptions
+
+
+# Walk the change dependency tree to find a cycle
+def detect_cycle(change, history=None):
+    if history is None:
+        history = []
+    else:
+        history = history[:]
+    history.append(change.number)
+    for dep in change.needs_changes:
+        if dep.number in history:
+            raise Exception("Dependency cycle detected: %s in %s" % (
+                dep.number, history))
+        detect_cycle(dep, history)
 
 
 class GerritEventConnector(threading.Thread):
@@ -98,24 +114,22 @@
                     Can not get account information." % event.type)
             event.account = None
 
-        if (event.change_number and
-            self.connection.sched.getProject(event.project_name)):
+        if event.change_number:
+            # TODO(jhesketh): Check if the project exists?
+            # and self.connection.sched.getProject(event.project_name):
+
             # Call _getChange for the side effect of updating the
             # cache.  Note that this modifies Change objects outside
             # the main thread.
             # NOTE(jhesketh): Ideally we'd just remove the change from the
             # cache to denote that it needs updating. However the change
-            # object is already used by Item's and hence BuildSet's etc. and
+            # object is already used by Items and hence BuildSets etc. and
             # we need to update those objects by reference so that they have
             # the correct/new information and also avoid hitting gerrit
             # multiple times.
-            if self.connection.attached_to['source']:
-                self.connection.attached_to['source'][0]._getChange(
-                    event.change_number, event.patch_number, refresh=True)
-                # We only need to do this once since the connection maintains
-                # the cache (which is shared between all the sources)
-                # NOTE(jhesketh): We may couple sources and connections again
-                # at which point this becomes more sensible.
+            self.connection._getChange(event.change_number,
+                                       event.patch_number,
+                                       refresh=True)
         self.connection.sched.addEvent(event)
 
     def run(self):
@@ -209,6 +223,10 @@
 class GerritConnection(BaseConnection):
     driver_name = 'gerrit'
     log = logging.getLogger("connection.gerrit")
+    depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
+                               re.MULTILINE | re.IGNORECASE)
+    replication_timeout = 300
+    replication_retry_interval = 5
 
     def __init__(self, connection_name, connection_config):
         super(GerritConnection, self).__init__(connection_name,
@@ -225,26 +243,20 @@
         self.port = int(self.connection_config.get('port', 29418))
         self.keyfile = self.connection_config.get('sshkey', None)
         self.watcher_thread = None
-        self.event_queue = None
+        self.event_queue = Queue.Queue()
         self.client = None
 
         self.baseurl = self.connection_config.get('baseurl',
                                                   'https://%s' % self.server)
 
         self._change_cache = {}
+        self.projects = {}
         self.gerrit_event_connector = None
 
-    def getCachedChange(self, key):
-        if key in self._change_cache:
-            return self._change_cache.get(key)
-        return None
-
-    def updateChangeCache(self, key, value):
-        self._change_cache[key] = value
-
-    def deleteCachedChange(self, key):
-        if key in self._change_cache:
-            del self._change_cache[key]
+    def getProject(self, name):
+        if name not in self.projects:
+            self.projects[name] = Project(name)
+        return self.projects[name]
 
     def maintainCache(self, relevant):
         # This lets the user supply a list of change objects that are
@@ -257,6 +269,309 @@
         for key in remove:
             del self._change_cache[key]
 
+    def getChange(self, event):
+        if event.change_number:
+            refresh = False
+            change = self._getChange(event.change_number, event.patch_number,
+                                     refresh=refresh)
+        elif event.ref:
+            project = self.getProject(event.project_name)
+            change = Ref(project)
+            change.ref = event.ref
+            change.oldrev = event.oldrev
+            change.newrev = event.newrev
+            change.url = self._getGitwebUrl(project, sha=event.newrev)
+        else:
+            # TODOv3(jeblair): we need to get the project from the event
+            change = NullChange(project)
+        return change
+
+    def _getChange(self, number, patchset, refresh=False, history=None):
+        key = '%s,%s' % (number, patchset)
+        change = self._change_cache.get(key)
+        if change and not refresh:
+            return change
+        if not change:
+            change = Change(None)
+            change.number = number
+            change.patchset = patchset
+        key = '%s,%s' % (change.number, change.patchset)
+        self._change_cache[key] = change
+        try:
+            self._updateChange(change, history)
+        except Exception:
+            if key in self._change_cache:
+                del self._change_cache[key]
+            raise
+        return change
+
+    def _getDependsOnFromCommit(self, message, change):
+        records = []
+        seen = set()
+        for match in self.depends_on_re.findall(message):
+            if match in seen:
+                self.log.debug("Ignoring duplicate Depends-On: %s" %
+                               (match,))
+                continue
+            seen.add(match)
+            query = "change:%s" % (match,)
+            self.log.debug("Updating %s: Running query %s "
+                           "to find needed changes" %
+                           (change, query,))
+            records.extend(self.simpleQuery(query))
+        return records
+
+    def _getNeededByFromCommit(self, change_id, change):
+        records = []
+        seen = set()
+        query = 'message:%s' % change_id
+        self.log.debug("Updating %s: Running query %s "
+                       "to find changes needed-by" %
+                       (change, query,))
+        results = self.simpleQuery(query)
+        for result in results:
+            for match in self.depends_on_re.findall(
+                result['commitMessage']):
+                if match != change_id:
+                    continue
+                key = (result['number'], result['currentPatchSet']['number'])
+                if key in seen:
+                    continue
+                self.log.debug("Updating %s: Found change %s,%s "
+                               "needs %s from commit" %
+                               (change, key[0], key[1], change_id))
+                seen.add(key)
+                records.append(result)
+        return records
+
+    def _updateChange(self, change, history=None):
+        self.log.info("Updating %s" % (change,))
+        data = self.query(change.number)
+        change._data = data
+
+        if change.patchset is None:
+            change.patchset = data['currentPatchSet']['number']
+
+        if 'project' not in data:
+            raise exceptions.ChangeNotFound(change.number, change.patchset)
+        change.project = self.getProject(data['project'])
+        change.branch = data['branch']
+        change.url = data['url']
+        max_ps = 0
+        files = []
+        for ps in data['patchSets']:
+            if ps['number'] == change.patchset:
+                change.refspec = ps['ref']
+                for f in ps.get('files', []):
+                    files.append(f['file'])
+            if int(ps['number']) > int(max_ps):
+                max_ps = ps['number']
+        if max_ps == change.patchset:
+            change.is_current_patchset = True
+        else:
+            change.is_current_patchset = False
+        change.files = files
+
+        change.is_merged = self._isMerged(change)
+        change.approvals = data['currentPatchSet'].get('approvals', [])
+        change.open = data['open']
+        change.status = data['status']
+        change.owner = data['owner']
+
+        if change.is_merged:
+            # This change is merged, so we don't need to look any further
+            # for dependencies.
+            self.log.debug("Updating %s: change is merged" % (change,))
+            return change
+
+        if history is None:
+            history = []
+        else:
+            history = history[:]
+        history.append(change.number)
+
+        needs_changes = []
+        if 'dependsOn' in data:
+            parts = data['dependsOn'][0]['ref'].split('/')
+            dep_num, dep_ps = parts[3], parts[4]
+            if dep_num in history:
+                raise Exception("Dependency cycle detected: %s in %s" % (
+                    dep_num, history))
+            self.log.debug("Updating %s: Getting git-dependent change %s,%s" %
+                           (change, dep_num, dep_ps))
+            dep = self._getChange(dep_num, dep_ps, history=history)
+            # Because we are not forcing a refresh in _getChange, it
+            # may return without executing this code, so if we are
+            # updating our change to add ourselves to a dependency
+            # cycle, we won't detect it.  By explicitly performing a
+            # walk of the dependency tree, we will.
+            detect_cycle(dep, history)
+            if (not dep.is_merged) and dep not in needs_changes:
+                needs_changes.append(dep)
+
+        for record in self._getDependsOnFromCommit(data['commitMessage'],
+                                                   change):
+            dep_num = record['number']
+            dep_ps = record['currentPatchSet']['number']
+            if dep_num in history:
+                raise Exception("Dependency cycle detected: %s in %s" % (
+                    dep_num, history))
+            self.log.debug("Updating %s: Getting commit-dependent "
+                           "change %s,%s" %
+                           (change, dep_num, dep_ps))
+            dep = self._getChange(dep_num, dep_ps, history=history)
+            # Because we are not forcing a refresh in _getChange, it
+            # may return without executing this code, so if we are
+            # updating our change to add ourselves to a dependency
+            # cycle, we won't detect it.  By explicitly performing a
+            # walk of the dependency tree, we will.
+            detect_cycle(dep, history)
+            if (not dep.is_merged) and dep not in needs_changes:
+                needs_changes.append(dep)
+        change.needs_changes = needs_changes
+
+        needed_by_changes = []
+        if 'neededBy' in data:
+            for needed in data['neededBy']:
+                parts = needed['ref'].split('/')
+                dep_num, dep_ps = parts[3], parts[4]
+                self.log.debug("Updating %s: Getting git-needed change %s,%s" %
+                               (change, dep_num, dep_ps))
+                dep = self._getChange(dep_num, dep_ps)
+                if (not dep.is_merged) and dep.is_current_patchset:
+                    needed_by_changes.append(dep)
+
+        for record in self._getNeededByFromCommit(data['id'], change):
+            dep_num = record['number']
+            dep_ps = record['currentPatchSet']['number']
+            self.log.debug("Updating %s: Getting commit-needed change %s,%s" %
+                           (change, dep_num, dep_ps))
+            # Because a commit needed-by may be a cross-repo
+            # dependency, cause that change to refresh so that it will
+            # reference the latest patchset of its Depends-On (this
+            # change).
+            dep = self._getChange(dep_num, dep_ps, refresh=True)
+            if (not dep.is_merged) and dep.is_current_patchset:
+                needed_by_changes.append(dep)
+        change.needed_by_changes = needed_by_changes
+
+        return change
+
+    def isMerged(self, change, head=None):
+        self.log.debug("Checking if change %s is merged" % change)
+        if not change.number:
+            self.log.debug("Change has no number; considering it merged")
+            # Good question.  It's probably ref-updated, which, ah,
+            # means it's merged.
+            return True
+
+        data = self.query(change.number)
+        change._data = data
+        change.is_merged = self._isMerged(change)
+        if change.is_merged:
+            self.log.debug("Change %s is merged" % (change,))
+        else:
+            self.log.debug("Change %s is not merged" % (change,))
+        if not head:
+            return change.is_merged
+        if not change.is_merged:
+            return False
+
+        ref = 'refs/heads/' + change.branch
+        self.log.debug("Waiting for %s to appear in git repo" % (change))
+        if self._waitForRefSha(change.project, ref, change._ref_sha):
+            self.log.debug("Change %s is in the git repo" %
+                           (change))
+            return True
+        self.log.debug("Change %s did not appear in the git repo" %
+                       (change))
+        return False
+
+    def _isMerged(self, change):
+        data = change._data
+        if not data:
+            return False
+        status = data.get('status')
+        if not status:
+            return False
+        if status == 'MERGED':
+            return True
+        return False
+
+    def _waitForRefSha(self, project, ref, old_sha=''):
+        # Wait for the ref to show up in the repo
+        start = time.time()
+        while time.time() - start < self.replication_timeout:
+            sha = self.getRefSha(project.name, ref)
+            if old_sha != sha:
+                return True
+            time.sleep(self.replication_retry_interval)
+        return False
+
+    def getRefSha(self, project, ref):
+        refs = {}
+        try:
+            refs = self.getInfoRefs(project)
+        except:
+            self.log.exception("Exception looking for ref %s" %
+                               ref)
+        sha = refs.get(ref, '')
+        return sha
+
+    def canMerge(self, change, allow_needs):
+        if not change.number:
+            self.log.debug("Change has no number; considering it merged")
+            # Good question.  It's probably ref-updated, which, ah,
+            # means it's merged.
+            return True
+        data = change._data
+        if not data:
+            return False
+        if 'submitRecords' not in data:
+            return False
+        try:
+            for sr in data['submitRecords']:
+                if sr['status'] == 'OK':
+                    return True
+                elif sr['status'] == 'NOT_READY':
+                    for label in sr['labels']:
+                        if label['status'] in ['OK', 'MAY']:
+                            continue
+                        elif label['status'] in ['NEED', 'REJECT']:
+                            # It may be our own rejection, so we ignore
+                            if label['label'].lower() not in allow_needs:
+                                return False
+                            continue
+                        else:
+                            # IMPOSSIBLE
+                            return False
+                else:
+                    # CLOSED, RULE_ERROR
+                    return False
+        except:
+            self.log.exception("Exception determining whether change"
+                               "%s can merge:" % change)
+            return False
+        return True
+
+    def getProjectOpenChanges(self, project):
+        # This is a best-effort function in case Gerrit is unable to return
+        # a particular change.  It happens.
+        query = "project:%s status:open" % (project.name,)
+        self.log.debug("Running query %s to get project open changes" %
+                       (query,))
+        data = self.simpleQuery(query)
+        changes = []
+        for record in data:
+            try:
+                changes.append(
+                    self._getChange(record['number'],
+                                    record['currentPatchSet']['number']))
+            except Exception:
+                self.log.exception("Unable to query change %s" %
+                                   (record.get('number'),))
+        return changes
+
     def addEvent(self, data):
         return self.event_queue.put((time.time(), data))
 
@@ -455,7 +770,6 @@
             self.watcher_thread.join()
 
     def _start_watcher_thread(self):
-        self.event_queue = Queue.Queue()
         self.watcher_thread = GerritWatcher(
             self,
             self.user,
diff --git a/zuul/launcher/gearman.py b/zuul/launcher/client.py
similarity index 87%
rename from zuul/launcher/gearman.py
rename to zuul/launcher/client.py
index 02f78fd..cd6dcfd 100644
--- a/zuul/launcher/gearman.py
+++ b/zuul/launcher/client.py
@@ -26,6 +26,35 @@
 from zuul.model import Build
 
 
+def make_merger_item(item):
+    # Create a dictionary with all info about the item needed by
+    # the merger.
+    number = None
+    patchset = None
+    oldrev = None
+    newrev = None
+    if hasattr(item.change, 'number'):
+        number = item.change.number
+        patchset = item.change.patchset
+    elif hasattr(item.change, 'newrev'):
+        oldrev = item.change.oldrev
+        newrev = item.change.newrev
+    connection_name = item.pipeline.source.connection.connection_name
+    return dict(project=item.change.project.name,
+                url=item.pipeline.source.getGitUrl(
+                    item.change.project),
+                connection_name=connection_name,
+                merge_mode=item.change.project.merge_mode,
+                refspec=item.change.refspec,
+                branch=item.change.branch,
+                ref=item.current_build_set.ref,
+                number=number,
+                patchset=patchset,
+                oldrev=oldrev,
+                newrev=newrev,
+                )
+
+
 class GearmanCleanup(threading.Thread):
     """ A thread that checks to see if outstanding builds have
     completed without reporting back. """
@@ -149,8 +178,8 @@
         self.log.info("Done waiting for Gearman server")
 
 
-class Gearman(object):
-    log = logging.getLogger("zuul.Gearman")
+class LaunchClient(object):
+    log = logging.getLogger("zuul.LaunchClient")
     negative_function_cache_ttl = 5
 
     def __init__(self, config, sched, swift):
@@ -165,11 +194,6 @@
             port = config.get('gearman', 'port')
         else:
             port = 4730
-        if config.has_option('gearman', 'check_job_registration'):
-            self.job_registration = config.getboolean(
-                'gearman', 'check_job_registration')
-        else:
-            self.job_registration = True
 
         self.gearman = ZuulGearmanClient(self)
         self.gearman.addServer(server, port)
@@ -230,20 +254,7 @@
         # NOTE(jhesketh): The params need to stay in a key=value data pair
         # as workers cannot necessarily handle lists.
 
-        if callable(job.parameter_function):
-            pargs = inspect.getargspec(job.parameter_function)
-            if len(pargs.args) == 2:
-                job.parameter_function(item, params)
-            else:
-                job.parameter_function(item, job, params)
-            self.log.debug("Custom parameter function used for job %s, "
-                           "change: %s, params: %s" % (job, item.change,
-                                                       params))
-
-        # NOTE(mmedvede): Swift parameter creation should remain after the call
-        # to job.parameter_function to make it possible to update LOG_PATH for
-        # swift upload url using parameter_function mechanism.
-        if job.swift and self.swift.connection:
+        if 'swift' in job.auth and self.swift.connection:
 
             for name, s in job.swift.items():
                 swift_instructions = {}
@@ -273,6 +284,16 @@
                 for key, value in swift_instructions.items():
                     params['_'.join(['SWIFT', name, key])] = value
 
+        if callable(job.parameter_function):
+            pargs = inspect.getargspec(job.parameter_function)
+            if len(pargs.args) == 2:
+                job.parameter_function(item, params)
+            else:
+                job.parameter_function(item, job, params)
+            self.log.debug("Custom parameter function used for job %s, "
+                           "change: %s, params: %s" % (job, item.change,
+                                                       params))
+
     def launch(self, job, item, pipeline, dependent_items=[]):
         uuid = str(uuid4().hex)
         self.log.info(
@@ -313,7 +334,7 @@
             params['ZUUL_REF'] = item.change.ref
             params['ZUUL_COMMIT'] = item.change.newrev
 
-        # The destination_path is a unqiue path for this build request
+        # The destination_path is a unique path for this build request
         # and generally where the logs are expected to be placed
         destination_path = os.path.join(item.change.getBasePath(),
                                         pipeline.name, job.name, uuid[:7])
@@ -344,10 +365,21 @@
         # ZUUL_OLDREV
         # ZUUL_NEWREV
 
-        if 'ZUUL_NODE' in params:
-            name = "build:%s:%s" % (job.name, params['ZUUL_NODE'])
-        else:
-            name = "build:%s" % job.name
+        all_items = dependent_items + [item]
+        merger_items = map(make_merger_item, all_items)
+
+        params['job'] = job.name
+        params['items'] = merger_items
+        params['projects'] = []
+        projects = set()
+        for item in all_items:
+            if item.change.project not in projects:
+                params['projects'].append(
+                    dict(name=item.change.project.name,
+                         url=item.pipeline.source.getGitUrl(
+                             item.change.project)))
+                projects.add(item.change.project)
+
         build = Build(job, uuid)
         build.parameters = params
 
@@ -355,18 +387,12 @@
             self.sched.onBuildCompleted(build, 'SUCCESS')
             return build
 
-        gearman_job = gear.Job(name, json.dumps(params),
+        gearman_job = gear.Job('launcher:launch', json.dumps(params),
                                unique=uuid)
         build.__gearman_job = gearman_job
+        build.__gearman_manager = None
         self.builds[uuid] = build
 
-        if self.job_registration and not self.isJobRegistered(
-                gearman_job.name):
-            self.log.error("Job %s is not registered with Gearman" %
-                           gearman_job)
-            self.onBuildCompleted(gearman_job, 'NOT_REGISTERED')
-            return build
-
         if pipeline.precedence == zuul.model.PRECEDENCE_NORMAL:
             precedence = gear.PRECEDENCE_NORMAL
         elif pipeline.precedence == zuul.model.PRECEDENCE_HIGH:
@@ -403,7 +429,8 @@
             self.log.debug("Build %s has no associated gearman job" % build)
             return
 
-        if build.number is not None:
+        # TODOv3(jeblair): make a nicer way of recording build start.
+        if build.url is not None:
             self.log.debug("Build %s has already started" % build)
             self.cancelRunningBuild(build)
             self.log.debug("Canceled running build %s" % build)
@@ -419,7 +446,7 @@
         time.sleep(1)
 
         self.log.debug("Still unable to find build %s to cancel" % build)
-        if build.number:
+        if build.url:
             self.log.debug("Build %s has just started" % build)
             self.log.debug("Canceled running build %s" % build)
             self.cancelRunningBuild(build)
@@ -448,7 +475,7 @@
             # internal dict after it's added to the report queue.
             del self.builds[job.unique]
         else:
-            if not job.name.startswith("stop:"):
+            if not job.name.startswith("launcher:stop:"):
                 self.log.error("Unable to find build %s" % job.unique)
 
     def onWorkStatus(self, job):
@@ -456,14 +483,14 @@
         self.log.debug("Build %s update %s" % (job, data))
         build = self.builds.get(job.unique)
         if build:
+            started = (build.url is not None)
             # Allow URL to be updated
-            build.url = data.get('url') or build.url
+            build.url = data.get('url', build.url)
             # Update information about worker
             build.worker.updateFromData(data)
 
-            if build.number is None:
+            if not started:
                 self.log.info("Build %s started" % job)
-                build.number = data.get('number')
                 build.__gearman_manager = data.get('manager')
                 self.sched.onBuildStarted(build)
         else:
@@ -493,10 +520,12 @@
         return False
 
     def cancelRunningBuild(self, build):
+        if not build.__gearman_manager:
+            self.log.error("Build %s has no manager while canceling" %
+                           (build,))
         stop_uuid = str(uuid4().hex)
-        data = dict(name=build.job.name,
-                    number=build.number)
-        stop_job = gear.Job("stop:%s" % build.__gearman_manager,
+        data = dict(uuid=build.__gearman_job.unique)
+        stop_job = gear.Job("launcher:stop:%s" % build.__gearman_manager,
                             json.dumps(data), unique=stop_uuid)
         self.meta_jobs[stop_uuid] = stop_job
         self.log.debug("Submitting stop job: %s", stop_job)
@@ -504,28 +533,6 @@
                                timeout=300)
         return True
 
-    def setBuildDescription(self, build, desc):
-        try:
-            name = "set_description:%s" % build.__gearman_manager
-        except AttributeError:
-            # We haven't yet received the first data packet that tells
-            # us where the job is running.
-            return False
-
-        if self.job_registration and not self.isJobRegistered(name):
-            return False
-
-        desc_uuid = str(uuid4().hex)
-        data = dict(name=build.job.name,
-                    number=build.number,
-                    html_description=desc)
-        desc_job = gear.Job(name, json.dumps(data), unique=desc_uuid)
-        self.meta_jobs[desc_uuid] = desc_job
-        self.log.debug("Submitting describe job: %s", desc_job)
-        self.gearman.submitJob(desc_job, precedence=gear.PRECEDENCE_LOW,
-                               timeout=300)
-        return True
-
     def lookForLostBuilds(self):
         self.log.debug("Looking for lost builds")
         for build in self.builds.values():
diff --git a/zuul/launcher/server.py b/zuul/launcher/server.py
new file mode 100644
index 0000000..2a988b0
--- /dev/null
+++ b/zuul/launcher/server.py
@@ -0,0 +1,341 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import json
+import logging
+import os
+import shutil
+import socket
+import subprocess
+import tempfile
+import threading
+import traceback
+
+import gear
+import yaml
+
+import zuul.merger
+
+
+# TODOv3(mordred): put git repos in a hierarchy that includes source
+# hostname, eg: git.openstack.org/openstack/nova.  Also, configure
+# sources to have an alias, so that the review.openstack.org source
+# repos end up in git.openstack.org.
+
+class JobDir(object):
+    def __init__(self):
+        self.root = tempfile.mkdtemp()
+        self.git_root = os.path.join(self.root, 'git')
+        os.makedirs(self.git_root)
+        self.ansible_root = os.path.join(self.root, 'ansible')
+        os.makedirs(self.ansible_root)
+        self.inventory = os.path.join(self.ansible_root, 'inventory')
+        self.playbook = os.path.join(self.ansible_root, 'playbook')
+        self.config = os.path.join(self.ansible_root, 'ansible.cfg')
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, etype, value, tb):
+        shutil.rmtree(self.root)
+
+
+class UpdateTask(object):
+    def __init__(self, project, url):
+        self.project = project
+        self.url = url
+        self.event = threading.Event()
+
+    def __eq__(self, other):
+        if other.project == self.project:
+            return True
+        return False
+
+    def wait(self):
+        self.event.wait()
+
+    def setComplete(self):
+        self.event.set()
+
+
+class DeduplicateQueue(object):
+    def __init__(self):
+        self.queue = collections.deque()
+        self.condition = threading.Condition()
+
+    def qsize(self):
+        return len(self.queue)
+
+    def put(self, item):
+        # Returns the original item if added, or an equivalent item if
+        # already enqueued.
+        self.condition.acquire()
+        ret = None
+        try:
+            for x in self.queue:
+                if item == x:
+                    ret = x
+            if ret is None:
+                ret = item
+                self.queue.append(item)
+                self.condition.notify()
+        finally:
+            self.condition.release()
+        return ret
+
+    def get(self):
+        self.condition.acquire()
+        try:
+            while True:
+                try:
+                    ret = self.queue.popleft()
+                    return ret
+                except IndexError:
+                    pass
+                self.condition.wait()
+        finally:
+            self.condition.release()
+
+
+class LaunchServer(object):
+    log = logging.getLogger("zuul.LaunchServer")
+
+    def __init__(self, config, connections={}):
+        self.config = config
+        # TODOv3(mordred): make the launcher name more unique --
+        # perhaps hostname+pid.
+        self.hostname = socket.gethostname()
+        self.zuul_url = config.get('merger', 'zuul_url')
+
+        if self.config.has_option('merger', 'git_dir'):
+            self.merge_root = self.config.get('merger', 'git_dir')
+        else:
+            self.merge_root = '/var/lib/zuul/git'
+
+        if self.config.has_option('merger', 'git_user_email'):
+            self.merge_email = self.config.get('merger', 'git_user_email')
+        else:
+            self.merge_email = None
+
+        if self.config.has_option('merger', 'git_user_name'):
+            self.merge_name = self.config.get('merger', 'git_user_name')
+        else:
+            self.merge_name = None
+
+        self.connections = connections
+        self.merger = self._getMerger(self.merge_root)
+        self.update_queue = DeduplicateQueue()
+
+    def _getMerger(self, root):
+        return zuul.merger.merger.Merger(root, self.connections,
+                                         self.merge_email, self.merge_name)
+
+    def start(self):
+        self._running = True
+        server = self.config.get('gearman', 'server')
+        if self.config.has_option('gearman', 'port'):
+            port = self.config.get('gearman', 'port')
+        else:
+            port = 4730
+        self.worker = gear.Worker('Zuul Launch Server')
+        self.worker.addServer(server, port)
+        self.log.debug("Waiting for server")
+        self.worker.waitForServer()
+        self.log.debug("Registering")
+        self.register()
+        self.log.debug("Starting worker")
+        self.update_thread = threading.Thread(target=self._updateLoop)
+        self.update_thread.daemon = True
+        self.update_thread.start()
+        self.thread = threading.Thread(target=self.run)
+        self.thread.daemon = True
+        self.thread.start()
+
+    def register(self):
+        self.worker.registerFunction("launcher:launch")
+        self.worker.registerFunction("launcher:stop:%s" % self.hostname)
+        self.worker.registerFunction("merger:merge")
+        self.worker.registerFunction("merger:cat")
+
+    def stop(self):
+        self.log.debug("Stopping")
+        self._running = False
+        self.worker.shutdown()
+        self.log.debug("Stopped")
+
+    def join(self):
+        self.update_thread.join()
+        self.thread.join()
+
+    def _updateLoop(self):
+        while self._running:
+            try:
+                self._innerUpdateLoop()
+            except:
+                self.log.exception("Exception in update thread:")
+
+    def _innerUpdateLoop(self):
+        # Inside of a loop that keeps the main repository up to date
+        task = self.update_queue.get()
+        self.log.info("Updating repo %s from %s" % (task.project, task.url))
+        self.merger.updateRepo(task.project, task.url)
+        self.log.debug("Finished updating repo %s from %s" %
+                       (task.project, task.url))
+        task.setComplete()
+
+    def update(self, project, url):
+        task = UpdateTask(project, url)
+        task = self.update_queue.put(task)
+        return task
+
+    def run(self):
+        self.log.debug("Starting launch listener")
+        while self._running:
+            try:
+                job = self.worker.getJob()
+                try:
+                    if job.name == 'launcher:launch':
+                        self.log.debug("Got launch job: %s" % job.unique)
+                        self.launchJob(job)
+                    elif job.name.startswith('launcher:stop'):
+                        self.log.debug("Got stop job: %s" % job.unique)
+                        self.stopJob(job)
+                    elif job.name == 'merger:cat':
+                        self.log.debug("Got cat job: %s" % job.unique)
+                        self.cat(job)
+                    elif job.name == 'merger:merge':
+                        self.log.debug("Got merge job: %s" % job.unique)
+                        self.merge(job)
+                    else:
+                        self.log.error("Unable to handle job %s" % job.name)
+                        job.sendWorkFail()
+                except Exception:
+                    self.log.exception("Exception while running job")
+                    job.sendWorkException(traceback.format_exc())
+            except Exception:
+                self.log.exception("Exception while getting job")
+
+    def launchJob(self, job):
+        thread = threading.Thread(target=self._launch, args=(job,))
+        thread.start()
+
+    def _launch(self, job):
+        self.log.debug("Job %s: beginning" % (job.unique,))
+        with JobDir() as jobdir:
+            self.log.debug("Job %s: job root at %s" %
+                           (job.unique, jobdir.root))
+            args = json.loads(job.arguments)
+            tasks = []
+            for project in args['projects']:
+                self.log.debug("Job %s: updating project %s" %
+                               (job.unique, project['name']))
+                tasks.append(self.update(project['name'], project['url']))
+            for task in tasks:
+                task.wait()
+            self.log.debug("Job %s: git updates complete" % (job.unique,))
+            merger = self._getMerger(jobdir.git_root)
+            commit = merger.mergeChanges(args['items'])  # noqa
+
+            # TODOv3: Ansible the ansible thing here.
+            self.prepareAnsibleFiles(jobdir, args)
+
+            data = {
+                'manager': self.hostname,
+                'url': 'https://server/job',
+            }
+
+            # TODOv3:
+            # 'name': self.name,
+            # 'manager': self.launch_server.hostname,
+            # 'worker_name': 'My Worker',
+            # 'worker_hostname': 'localhost',
+            # 'worker_ips': ['127.0.0.1', '192.168.1.1'],
+            # 'worker_fqdn': 'zuul.example.org',
+            # 'worker_program': 'FakeBuilder',
+            # 'worker_version': 'v1.1',
+            # 'worker_extra': {'something': 'else'}
+
+            job.sendWorkData(json.dumps(data))
+            job.sendWorkStatus(0, 100)
+
+            result = self.runAnsible(jobdir, job)
+
+            result = dict(result=result)
+            job.sendWorkComplete(json.dumps(result))
+
+    def stopJob(self, job):
+        # TODOv3: implement.
+        job.sendWorkComplete()
+
+    def getHostList(self, args):
+        # TODOv3: This should get the appropriate nodes from nodepool,
+        # or in the unit tests, be overriden to return localhost.
+        return [('localhost', dict(ansible_connection='local'))]
+
+    def prepareAnsibleFiles(self, jobdir, args):
+        with open(jobdir.inventory, 'w') as inventory:
+            for host_name, host_vars in self.getHostList(args):
+                inventory.write(host_name)
+                inventory.write(' ')
+                for k, v in host_vars.items():
+                    inventory.write('%s=%s' % (k, v))
+                inventory.write('\n')
+        with open(jobdir.playbook, 'w') as playbook:
+            play = dict(hosts='localhost',
+                        tasks=[dict(name='test',
+                                    shell='echo Hello world')])
+            playbook.write(yaml.dump([play]))
+        with open(jobdir.config, 'w') as config:
+            config.write('[defaults]\n')
+            config.write('hostfile = %s\n' % jobdir.inventory)
+
+    def runAnsible(self, jobdir, job):
+        # Job is included here for the benefit of the test framework.
+        proc = subprocess.Popen(
+            ['ansible-playbook', jobdir.playbook],
+            cwd=jobdir.ansible_root,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
+        (out, err) = proc.communicate()
+        ret = proc.wait()
+        print(out)
+        print(err)
+        if ret == 0:
+            return 'SUCCESS'
+        else:
+            return 'FAILURE'
+
+    def cat(self, job):
+        args = json.loads(job.arguments)
+        task = self.update(args['project'], args['url'])
+        task.wait()
+        files = self.merger.getFiles(args['project'], args['url'],
+                                     args['branch'], args['files'])
+        result = dict(updated=True,
+                      files=files,
+                      zuul_url=self.zuul_url)
+        job.sendWorkComplete(json.dumps(result))
+
+    def merge(self, job):
+        args = json.loads(job.arguments)
+        ret = self.merger.mergeChanges(args['items'], args.get('files'))
+        result = dict(merged=(ret is not None),
+                      zuul_url=self.zuul_url)
+        if args.get('files'):
+            result['commit'], result['files'] = ret
+        else:
+            result['commit'] = ret
+        job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/layoutvalidator.py b/zuul/layoutvalidator.py
index e1e8ac6..1f008c3 100644
--- a/zuul/layoutvalidator.py
+++ b/zuul/layoutvalidator.py
@@ -25,6 +25,30 @@
     return v.Any([x], x)
 
 
+class ConfigSchema(object):
+    tenant_source = v.Schema({'repos': [str]})
+
+    def validateTenantSources(self, value, path=[]):
+        if isinstance(value, dict):
+            for k, val in value.items():
+                self.validateTenantSource(val, path + [k])
+        else:
+            raise v.Invalid("Invalid tenant source", path)
+
+    def validateTenantSource(self, value, path=[]):
+        # TODOv3(jeblair): validate against connections
+        self.tenant_source(value)
+
+    def getSchema(self, data, connections=None):
+        tenant = {v.Required('name'): str,
+                  'include': toList(str),
+                  'source': self.validateTenantSources}
+
+        schema = v.Schema({'tenants': [tenant]})
+
+        return schema
+
+
 class LayoutSchema(object):
     include = {'python-file': str}
     includes = [include]
@@ -342,3 +366,9 @@
                 if action in pipeline:
                     self.extraDriverValidation('reporter', pipeline[action],
                                                connections)
+
+
+class ConfigValidator(object):
+    def validate(self, data, connections=None):
+        schema = ConfigSchema().getSchema(data, connections)
+        schema(data)
diff --git a/zuul/lib/connections.py b/zuul/lib/connections.py
index 92ddb0f..a37907a 100644
--- a/zuul/lib/connections.py
+++ b/zuul/lib/connections.py
@@ -18,49 +18,113 @@
 import zuul.connection.smtp
 
 
-def configure_connections(config):
-    # Register connections from the config
+class ConnectionRegistry(object):
+    """A registry of connections"""
 
-    # TODO(jhesketh): import connection modules dynamically
-    connections = {}
+    def __init__(self):
+        self.connections = {}
 
-    for section_name in config.sections():
-        con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
-                             section_name, re.I)
-        if not con_match:
-            continue
-        con_name = con_match.group(2)
-        con_config = dict(config.items(section_name))
+    def registerScheduler(self, sched, load=True):
+        for connection_name, connection in self.connections.items():
+            connection.registerScheduler(sched)
+            if load:
+                connection.onLoad()
 
-        if 'driver' not in con_config:
-            raise Exception("No driver specified for connection %s."
-                            % con_name)
+    def stop(self):
+        for connection_name, connection in self.connections.items():
+            connection.onStop()
 
-        con_driver = con_config['driver']
+    def configure(self, config):
+        # Register connections from the config
+        # TODO(jhesketh): import connection modules dynamically
+        connections = {}
 
-        # TODO(jhesketh): load the required class automatically
-        if con_driver == 'gerrit':
-            connections[con_name] = \
-                zuul.connection.gerrit.GerritConnection(con_name,
-                                                        con_config)
-        elif con_driver == 'smtp':
-            connections[con_name] = \
-                zuul.connection.smtp.SMTPConnection(con_name, con_config)
+        for section_name in config.sections():
+            con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
+                                 section_name, re.I)
+            if not con_match:
+                continue
+            con_name = con_match.group(2)
+            con_config = dict(config.items(section_name))
+
+            if 'driver' not in con_config:
+                raise Exception("No driver specified for connection %s."
+                                % con_name)
+
+            con_driver = con_config['driver']
+
+            # TODO(jhesketh): load the required class automatically
+            if con_driver == 'gerrit':
+                connections[con_name] = \
+                    zuul.connection.gerrit.GerritConnection(con_name,
+                                                            con_config)
+            elif con_driver == 'smtp':
+                connections[con_name] = \
+                    zuul.connection.smtp.SMTPConnection(con_name, con_config)
+            else:
+                raise Exception("Unknown driver, %s, for connection %s"
+                                % (con_config['driver'], con_name))
+
+        # If the [gerrit] or [smtp] sections still exist, load them in as a
+        # connection named 'gerrit' or 'smtp' respectfully
+
+        if 'gerrit' in config.sections():
+            connections['gerrit'] = \
+                zuul.connection.gerrit.GerritConnection(
+                    'gerrit', dict(config.items('gerrit')))
+
+        if 'smtp' in config.sections():
+            connections['smtp'] = \
+                zuul.connection.smtp.SMTPConnection(
+                    'smtp', dict(config.items('smtp')))
+
+        self.connections = connections
+
+    def _getDriver(self, dtype, connection_name, driver_config={}):
+        # Instantiate a driver such as a trigger, source or reporter
+        # TODO(jhesketh): Make this list dynamic or use entrypoints etc.
+        # Stevedore was not a good fit here due to the nature of triggers.
+        # Specifically we don't want to load a trigger per a pipeline as one
+        # trigger can listen to a stream (from gerrit, for example) and the
+        # scheduler decides which eventfilter to use. As such we want to load
+        # trigger+connection pairs uniquely.
+        drivers = {
+            'source': {
+                'gerrit': 'zuul.source.gerrit:GerritSource',
+            },
+            'trigger': {
+                'gerrit': 'zuul.trigger.gerrit:GerritTrigger',
+                'timer': 'zuul.trigger.timer:TimerTrigger',
+                'zuul': 'zuul.trigger.zuultrigger:ZuulTrigger',
+            },
+            'reporter': {
+                'gerrit': 'zuul.reporter.gerrit:GerritReporter',
+                'smtp': 'zuul.reporter.smtp:SMTPReporter',
+            },
+        }
+
+        # TODO(jhesketh): Check the connection_name exists
+        if connection_name in self.connections.keys():
+            driver_name = self.connections[connection_name].driver_name
+            connection = self.connections[connection_name]
         else:
-            raise Exception("Unknown driver, %s, for connection %s"
-                            % (con_config['driver'], con_name))
+            # In some cases a driver may not be related to a connection. For
+            # example, the 'timer' or 'zuul' triggers.
+            driver_name = connection_name
+            connection = None
+        driver = drivers[dtype][driver_name].split(':')
+        driver_instance = getattr(
+            __import__(driver[0], fromlist=['']), driver[1])(
+                driver_config, connection
+        )
 
-    # If the [gerrit] or [smtp] sections still exist, load them in as a
-    # connection named 'gerrit' or 'smtp' respectfully
+        return driver_instance
 
-    if 'gerrit' in config.sections():
-        connections['gerrit'] = \
-            zuul.connection.gerrit.GerritConnection(
-                'gerrit', dict(config.items('gerrit')))
+    def getSource(self, connection_name):
+        return self._getDriver('source', connection_name)
 
-    if 'smtp' in config.sections():
-        connections['smtp'] = \
-            zuul.connection.smtp.SMTPConnection(
-                'smtp', dict(config.items('smtp')))
+    def getReporter(self, connection_name, driver_config={}):
+        return self._getDriver('reporter', connection_name, driver_config)
 
-    return connections
+    def getTrigger(self, connection_name, driver_config={}):
+        return self._getDriver('trigger', connection_name, driver_config)
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
new file mode 100644
index 0000000..0cd1877
--- /dev/null
+++ b/zuul/manager/__init__.py
@@ -0,0 +1,723 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from zuul import exceptions
+from zuul.model import NullChange
+
+
+class DynamicChangeQueueContextManager(object):
+    def __init__(self, change_queue):
+        self.change_queue = change_queue
+
+    def __enter__(self):
+        return self.change_queue
+
+    def __exit__(self, etype, value, tb):
+        if self.change_queue and not self.change_queue.queue:
+            self.change_queue.pipeline.removeQueue(self.change_queue)
+
+
+class StaticChangeQueueContextManager(object):
+    def __init__(self, change_queue):
+        self.change_queue = change_queue
+
+    def __enter__(self):
+        return self.change_queue
+
+    def __exit__(self, etype, value, tb):
+        pass
+
+
+class PipelineManager(object):
+    """Abstract Base Class for enqueing and processing Changes in a Pipeline"""
+
+    log = logging.getLogger("zuul.PipelineManager")
+
+    def __init__(self, sched, pipeline):
+        self.sched = sched
+        self.pipeline = pipeline
+        self.event_filters = []
+        self.changeish_filters = []
+
+    def __str__(self):
+        return "<%s %s>" % (self.__class__.__name__, self.pipeline.name)
+
+    def _postConfig(self, layout):
+        self.log.info("Configured Pipeline Manager %s" % self.pipeline.name)
+        self.log.info("  Source: %s" % self.pipeline.source)
+        self.log.info("  Requirements:")
+        for f in self.changeish_filters:
+            self.log.info("    %s" % f)
+        self.log.info("  Events:")
+        for e in self.event_filters:
+            self.log.info("    %s" % e)
+        self.log.info("  Projects:")
+
+        def log_jobs(tree, indent=0):
+            istr = '    ' + ' ' * indent
+            if tree.job:
+                efilters = ''
+                for b in tree.job._branches:
+                    efilters += str(b)
+                for f in tree.job._files:
+                    efilters += str(f)
+                if tree.job.skip_if_matcher:
+                    efilters += str(tree.job.skip_if_matcher)
+                if efilters:
+                    efilters = ' ' + efilters
+                tags = []
+                if tree.job.hold_following_changes:
+                    tags.append('[hold]')
+                if not tree.job.voting:
+                    tags.append('[nonvoting]')
+                if tree.job.mutex:
+                    tags.append('[mutex: %s]' % tree.job.mutex)
+                tags = ' '.join(tags)
+                self.log.info("%s%s%s %s" % (istr, repr(tree.job),
+                                             efilters, tags))
+            for x in tree.job_trees:
+                log_jobs(x, indent + 2)
+
+        for p in layout.projects.values():
+            tree = self.pipeline.getJobTree(p)
+            if tree:
+                self.log.info("    %s" % p)
+                log_jobs(tree)
+        self.log.info("  On start:")
+        self.log.info("    %s" % self.pipeline.start_actions)
+        self.log.info("  On success:")
+        self.log.info("    %s" % self.pipeline.success_actions)
+        self.log.info("  On failure:")
+        self.log.info("    %s" % self.pipeline.failure_actions)
+        self.log.info("  On merge-failure:")
+        self.log.info("    %s" % self.pipeline.merge_failure_actions)
+        self.log.info("  When disabled:")
+        self.log.info("    %s" % self.pipeline.disabled_actions)
+
+    def getSubmitAllowNeeds(self):
+        # Get a list of code review labels that are allowed to be
+        # "needed" in the submit records for a change, with respect
+        # to this queue.  In other words, the list of review labels
+        # this queue itself is likely to set before submitting.
+        allow_needs = set()
+        for action_reporter in self.pipeline.success_actions:
+            allow_needs.update(action_reporter.getSubmitAllowNeeds())
+        return allow_needs
+
+    def eventMatches(self, event, change):
+        if event.forced_pipeline:
+            if event.forced_pipeline == self.pipeline.name:
+                self.log.debug("Event %s for change %s was directly assigned "
+                               "to pipeline %s" % (event, change, self))
+                return True
+            else:
+                return False
+        for ef in self.event_filters:
+            if ef.matches(event, change):
+                self.log.debug("Event %s for change %s matched %s "
+                               "in pipeline %s" % (event, change, ef, self))
+                return True
+        return False
+
+    def isChangeAlreadyInPipeline(self, change):
+        # Checks live items in the pipeline
+        for item in self.pipeline.getAllItems():
+            if item.live and change.equals(item.change):
+                return True
+        return False
+
+    def isChangeAlreadyInQueue(self, change, change_queue):
+        # Checks any item in the specified change queue
+        for item in change_queue.queue:
+            if change.equals(item.change):
+                return True
+        return False
+
+    def reportStart(self, item):
+        if not self.pipeline._disabled:
+            try:
+                self.log.info("Reporting start, action %s item %s" %
+                              (self.pipeline.start_actions, item))
+                ret = self.sendReport(self.pipeline.start_actions,
+                                      self.pipeline.source, item)
+                if ret:
+                    self.log.error("Reporting item start %s received: %s" %
+                                   (item, ret))
+            except:
+                self.log.exception("Exception while reporting start:")
+
+    def sendReport(self, action_reporters, source, item,
+                   message=None):
+        """Sends the built message off to configured reporters.
+
+        Takes the action_reporters, item, message and extra options and
+        sends them to the pluggable reporters.
+        """
+        report_errors = []
+        if len(action_reporters) > 0:
+            for reporter in action_reporters:
+                ret = reporter.report(source, self.pipeline, item)
+                if ret:
+                    report_errors.append(ret)
+            if len(report_errors) == 0:
+                return
+        return report_errors
+
+    def isChangeReadyToBeEnqueued(self, change):
+        return True
+
+    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
+                            change_queue):
+        return True
+
+    def enqueueChangesBehind(self, change, quiet, ignore_requirements,
+                             change_queue):
+        return True
+
+    def checkForChangesNeededBy(self, change, change_queue):
+        return True
+
+    def getFailingDependentItems(self, item):
+        return None
+
+    def getDependentItems(self, item):
+        orig_item = item
+        items = []
+        while item.item_ahead:
+            items.append(item.item_ahead)
+            item = item.item_ahead
+        self.log.info("Change %s depends on changes %s" %
+                      (orig_item.change,
+                       [x.change for x in items]))
+        return items
+
+    def getItemForChange(self, change):
+        for item in self.pipeline.getAllItems():
+            if item.change.equals(change):
+                return item
+        return None
+
+    def findOldVersionOfChangeAlreadyInQueue(self, change):
+        for item in self.pipeline.getAllItems():
+            if not item.live:
+                continue
+            if change.isUpdateOf(item.change):
+                return item
+        return None
+
+    def removeOldVersionsOfChange(self, change):
+        if not self.pipeline.dequeue_on_new_patchset:
+            return
+        old_item = self.findOldVersionOfChangeAlreadyInQueue(change)
+        if old_item:
+            self.log.debug("Change %s is a new version of %s, removing %s" %
+                           (change, old_item.change, old_item))
+            self.removeItem(old_item)
+
+    def removeAbandonedChange(self, change):
+        self.log.debug("Change %s abandoned, removing." % change)
+        for item in self.pipeline.getAllItems():
+            if not item.live:
+                continue
+            if item.change.equals(change):
+                self.removeItem(item)
+
+    def reEnqueueItem(self, item, last_head):
+        with self.getChangeQueue(item.change, last_head.queue) as change_queue:
+            if change_queue:
+                self.log.debug("Re-enqueing change %s in queue %s" %
+                               (item.change, change_queue))
+                change_queue.enqueueItem(item)
+
+                # Get an updated copy of the layout if necessary.
+                # This will return one of the following:
+                # 1) An existing layout from the item ahead or pipeline.
+                # 2) A newly created layout from the cached pipeline
+                #    layout config plus the previously returned
+                #    in-repo files stored in the buildset.
+                # 3) None in the case that a fetch of the files from
+                #    the merger is still pending.
+                item.current_build_set.layout = self.getLayout(item)
+
+                # Rebuild the frozen job tree from the new layout, if
+                # we have one.  If not, it will be built later.
+                if item.current_build_set.layout:
+                    item.freezeJobTree()
+
+                # Re-set build results in case any new jobs have been
+                # added to the tree.
+                for build in item.current_build_set.getBuilds():
+                    if build.result:
+                        item.setResult(build)
+                # Similarly, reset the item state.
+                if item.current_build_set.unable_to_merge:
+                    item.setUnableToMerge()
+                if item.dequeued_needing_change:
+                    item.setDequeuedNeedingChange()
+
+                self.reportStats(item)
+                return True
+            else:
+                self.log.error("Unable to find change queue for project %s" %
+                               item.change.project)
+                return False
+
+    def addChange(self, change, quiet=False, enqueue_time=None,
+                  ignore_requirements=False, live=True,
+                  change_queue=None):
+        self.log.debug("Considering adding change %s" % change)
+
+        # If we are adding a live change, check if it's a live item
+        # anywhere in the pipeline.  Otherwise, we will perform the
+        # duplicate check below on the specific change_queue.
+        if live and self.isChangeAlreadyInPipeline(change):
+            self.log.debug("Change %s is already in pipeline, "
+                           "ignoring" % change)
+            return True
+
+        if not self.isChangeReadyToBeEnqueued(change):
+            self.log.debug("Change %s is not ready to be enqueued, ignoring" %
+                           change)
+            return False
+
+        if not ignore_requirements:
+            for f in self.changeish_filters:
+                if not f.matches(change):
+                    self.log.debug("Change %s does not match pipeline "
+                                   "requirement %s" % (change, f))
+                    return False
+
+        with self.getChangeQueue(change, change_queue) as change_queue:
+            if not change_queue:
+                self.log.debug("Unable to find change queue for "
+                               "change %s in project %s" %
+                               (change, change.project))
+                return False
+
+            if not self.enqueueChangesAhead(change, quiet, ignore_requirements,
+                                            change_queue):
+                self.log.debug("Failed to enqueue changes "
+                               "ahead of %s" % change)
+                return False
+
+            if self.isChangeAlreadyInQueue(change, change_queue):
+                self.log.debug("Change %s is already in queue, "
+                               "ignoring" % change)
+                return True
+
+            self.log.debug("Adding change %s to queue %s" %
+                           (change, change_queue))
+            item = change_queue.enqueueChange(change)
+            if enqueue_time:
+                item.enqueue_time = enqueue_time
+            item.live = live
+            self.reportStats(item)
+            if not quiet:
+                if len(self.pipeline.start_actions) > 0:
+                    self.reportStart(item)
+            self.enqueueChangesBehind(change, quiet, ignore_requirements,
+                                      change_queue)
+            for trigger in self.sched.triggers.values():
+                trigger.onChangeEnqueued(item.change, self.pipeline)
+            return True
+
+    def dequeueItem(self, item):
+        self.log.debug("Removing change %s from queue" % item.change)
+        item.queue.dequeueItem(item)
+
+    def removeItem(self, item):
+        # Remove an item from the queue, probably because it has been
+        # superseded by another change.
+        self.log.debug("Canceling builds behind change: %s "
+                       "because it is being removed." % item.change)
+        self.cancelJobs(item)
+        self.dequeueItem(item)
+        self.reportStats(item)
+
+    def provisionNodes(self, item):
+        jobs = item.findJobsToRequest()
+        if not jobs:
+            return False
+        build_set = item.current_build_set
+        self.log.debug("Requesting nodes for change %s" % item.change)
+        for job in jobs:
+            req = self.sched.nodepool.requestNodes(build_set, job)
+            self.log.debug("Adding node request %s for job %s to item %s" %
+                           (req, job, item))
+            build_set.setJobNodeRequest(job.name, req)
+        return True
+
+    def _launchJobs(self, item, jobs):
+        self.log.debug("Launching jobs for change %s" % item.change)
+        dependent_items = self.getDependentItems(item)
+        for job in jobs:
+            self.log.debug("Found job %s for change %s" % (job, item.change))
+            try:
+                build = self.sched.launcher.launch(job, item,
+                                                   self.pipeline,
+                                                   dependent_items)
+                self.log.debug("Adding build %s of job %s to item %s" %
+                               (build, job, item))
+                item.addBuild(build)
+            except:
+                self.log.exception("Exception while launching job %s "
+                                   "for change %s:" % (job, item.change))
+
+    def launchJobs(self, item):
+        # TODO(jeblair): This should return a value indicating a job
+        # was launched.  Appears to be a longstanding bug.
+        if not item.current_build_set.layout:
+            return False
+
+        jobs = item.findJobsToRun(self.sched.mutex)
+        if jobs:
+            self._launchJobs(item, jobs)
+
+    def cancelJobs(self, item, prime=True):
+        self.log.debug("Cancel jobs for change %s" % item.change)
+        canceled = False
+        old_build_set = item.current_build_set
+        if prime and item.current_build_set.ref:
+            item.resetAllBuilds()
+        for req in old_build_set.node_requests.values():
+            self.sched.nodepool.cancelRequest(req)
+        old_build_set.node_requests = {}
+        for build in old_build_set.getBuilds():
+            try:
+                self.sched.launcher.cancel(build)
+            except:
+                self.log.exception("Exception while canceling build %s "
+                                   "for change %s" % (build, item.change))
+            build.result = 'CANCELED'
+            canceled = True
+        for item_behind in item.items_behind:
+            self.log.debug("Canceling jobs for change %s, behind change %s" %
+                           (item_behind.change, item.change))
+            if self.cancelJobs(item_behind, prime=prime):
+                canceled = True
+        return canceled
+
+    def _makeMergerItem(self, item):
+        # Create a dictionary with all info about the item needed by
+        # the merger.
+        number = None
+        patchset = None
+        oldrev = None
+        newrev = None
+        if hasattr(item.change, 'number'):
+            number = item.change.number
+            patchset = item.change.patchset
+        elif hasattr(item.change, 'newrev'):
+            oldrev = item.change.oldrev
+            newrev = item.change.newrev
+        connection_name = self.pipeline.source.connection.connection_name
+        return dict(project=item.change.project.name,
+                    url=self.pipeline.source.getGitUrl(
+                        item.change.project),
+                    connection_name=connection_name,
+                    merge_mode=item.change.project.merge_mode,
+                    refspec=item.change.refspec,
+                    branch=item.change.branch,
+                    ref=item.current_build_set.ref,
+                    number=number,
+                    patchset=patchset,
+                    oldrev=oldrev,
+                    newrev=newrev,
+                    )
+
+    def getLayout(self, item):
+        if not item.change.updatesConfig():
+            if item.item_ahead:
+                return item.item_ahead.current_build_set.layout
+            else:
+                return item.queue.pipeline.layout
+        # This item updates the config, ask the merger for the result.
+        build_set = item.current_build_set
+        if build_set.merge_state == build_set.PENDING:
+            return None
+        if build_set.merge_state == build_set.COMPLETE:
+            if build_set.unable_to_merge:
+                return None
+            # Load layout
+            # Late import to break an import loop
+            import zuul.configloader
+            loader = zuul.configloader.ConfigLoader()
+            self.log.debug("Load dynamic layout with %s" % build_set.files)
+            layout = loader.createDynamicLayout(item.pipeline.layout.tenant,
+                                                build_set.files)
+            return layout
+        build_set.merge_state = build_set.PENDING
+        self.log.debug("Preparing dynamic layout for: %s" % item.change)
+        dependent_items = self.getDependentItems(item)
+        dependent_items.reverse()
+        all_items = dependent_items + [item]
+        merger_items = map(self._makeMergerItem, all_items)
+        self.sched.merger.mergeChanges(merger_items,
+                                       item.current_build_set,
+                                       ['.zuul.yaml'],
+                                       self.pipeline.precedence)
+
+    def prepareLayout(self, item):
+        # Get a copy of the layout in the context of the current
+        # queue.
+        # Returns True if the ref is ready, false otherwise
+        if not item.current_build_set.ref:
+            item.current_build_set.setConfiguration()
+        if not item.current_build_set.layout:
+            item.current_build_set.layout = self.getLayout(item)
+        if not item.current_build_set.layout:
+            return False
+        if not item.job_tree:
+            item.freezeJobTree()
+        return True
+
+    def _processOneItem(self, item, nnfi):
+        changed = False
+        item_ahead = item.item_ahead
+        if item_ahead and (not item_ahead.live):
+            item_ahead = None
+        change_queue = item.queue
+        failing_reasons = []  # Reasons this item is failing
+
+        if self.checkForChangesNeededBy(item.change, change_queue) is not True:
+            # It's not okay to enqueue this change, we should remove it.
+            self.log.info("Dequeuing change %s because "
+                          "it can no longer merge" % item.change)
+            self.cancelJobs(item)
+            self.dequeueItem(item)
+            item.setDequeuedNeedingChange()
+            if item.live:
+                try:
+                    self.reportItem(item)
+                except exceptions.MergeFailure:
+                    pass
+            return (True, nnfi)
+        dep_items = self.getFailingDependentItems(item)
+        actionable = change_queue.isActionable(item)
+        item.active = actionable
+        ready = False
+        if dep_items:
+            failing_reasons.append('a needed change is failing')
+            self.cancelJobs(item, prime=False)
+        else:
+            item_ahead_merged = False
+            if (item_ahead and item_ahead.change.is_merged):
+                item_ahead_merged = True
+            if (item_ahead != nnfi and not item_ahead_merged):
+                # Our current base is different than what we expected,
+                # and it's not because our current base merged.  Something
+                # ahead must have failed.
+                self.log.info("Resetting builds for change %s because the "
+                              "item ahead, %s, is not the nearest non-failing "
+                              "item, %s" % (item.change, item_ahead, nnfi))
+                change_queue.moveItem(item, nnfi)
+                changed = True
+                self.cancelJobs(item)
+            if actionable:
+                ready = self.prepareLayout(item)
+                if item.current_build_set.unable_to_merge:
+                    failing_reasons.append("it has a merge conflict")
+                if ready and self.provisionNodes(item):
+                    changed = True
+        if actionable and ready and self.launchJobs(item):
+            changed = True
+        if item.didAnyJobFail():
+            failing_reasons.append("at least one job failed")
+        if (not item.live) and (not item.items_behind):
+            failing_reasons.append("is a non-live item with no items behind")
+            self.dequeueItem(item)
+            changed = True
+        if ((not item_ahead) and item.areAllJobsComplete() and item.live):
+            try:
+                self.reportItem(item)
+            except exceptions.MergeFailure:
+                failing_reasons.append("it did not merge")
+                for item_behind in item.items_behind:
+                    self.log.info("Resetting builds for change %s because the "
+                                  "item ahead, %s, failed to merge" %
+                                  (item_behind.change, item))
+                    self.cancelJobs(item_behind)
+            self.dequeueItem(item)
+            changed = True
+        elif not failing_reasons and item.live:
+            nnfi = item
+        item.current_build_set.failing_reasons = failing_reasons
+        if failing_reasons:
+            self.log.debug("%s is a failing item because %s" %
+                           (item, failing_reasons))
+        return (changed, nnfi)
+
+    def processQueue(self):
+        # Do whatever needs to be done for each change in the queue
+        self.log.debug("Starting queue processor: %s" % self.pipeline.name)
+        changed = False
+        for queue in self.pipeline.queues:
+            queue_changed = False
+            nnfi = None  # Nearest non-failing item
+            for item in queue.queue[:]:
+                item_changed, nnfi = self._processOneItem(
+                    item, nnfi)
+                if item_changed:
+                    queue_changed = True
+                self.reportStats(item)
+            if queue_changed:
+                changed = True
+                status = ''
+                for item in queue.queue:
+                    status += item.formatStatus()
+                if status:
+                    self.log.debug("Queue %s status is now:\n %s" %
+                                   (queue.name, status))
+        self.log.debug("Finished queue processor: %s (changed: %s)" %
+                       (self.pipeline.name, changed))
+        return changed
+
+    def onBuildStarted(self, build):
+        self.log.debug("Build %s started" % build)
+        return True
+
+    def onBuildCompleted(self, build):
+        self.log.debug("Build %s completed" % build)
+        item = build.build_set.item
+
+        item.setResult(build)
+        self.sched.mutex.release(item, build.job)
+        self.log.debug("Item %s status is now:\n %s" %
+                       (item, item.formatStatus()))
+        return True
+
+    def onMergeCompleted(self, event):
+        build_set = event.build_set
+        item = build_set.item
+        build_set.merge_state = build_set.COMPLETE
+        build_set.zuul_url = event.zuul_url
+        if event.merged:
+            build_set.commit = event.commit
+            build_set.files.setFiles(event.files)
+        elif event.updated:
+            if not isinstance(item.change, NullChange):
+                build_set.commit = item.change.newrev
+        if not build_set.commit and not isinstance(item.change, NullChange):
+            self.log.info("Unable to merge change %s" % item.change)
+            item.setUnableToMerge()
+
+    def onNodesProvisioned(self, event):
+        request = event.request
+        build_set = request.build_set
+        build_set.jobNodeRequestComplete(request.job.name, request,
+                                         request.nodes)
+        self.log.info("Completed node request %s for job %s of item %s" %
+                      (request, request.job.name, build_set.item))
+
+    def reportItem(self, item):
+        if not item.reported:
+            # _reportItem() returns True if it failed to report.
+            item.reported = not self._reportItem(item)
+        if self.changes_merge:
+            succeeded = item.didAllJobsSucceed()
+            merged = item.reported
+            if merged:
+                merged = self.pipeline.source.isMerged(item.change,
+                                                       item.change.branch)
+            self.log.info("Reported change %s status: all-succeeded: %s, "
+                          "merged: %s" % (item.change, succeeded, merged))
+            change_queue = item.queue
+            if not (succeeded and merged):
+                self.log.debug("Reported change %s failed tests or failed "
+                               "to merge" % (item.change))
+                change_queue.decreaseWindowSize()
+                self.log.debug("%s window size decreased to %s" %
+                               (change_queue, change_queue.window))
+                raise exceptions.MergeFailure(
+                    "Change %s failed to merge" % item.change)
+            else:
+                change_queue.increaseWindowSize()
+                self.log.debug("%s window size increased to %s" %
+                               (change_queue, change_queue.window))
+
+                for trigger in self.sched.triggers.values():
+                    trigger.onChangeMerged(item.change, self.pipeline.source)
+
+    def _reportItem(self, item):
+        self.log.debug("Reporting change %s" % item.change)
+        ret = True  # Means error as returned by trigger.report
+        if not item.getJobs():
+            # We don't send empty reports with +1,
+            # and the same for -1's (merge failures or transient errors)
+            # as they cannot be followed by +1's
+            self.log.debug("No jobs for change %s" % item.change)
+            actions = []
+        elif item.didAllJobsSucceed():
+            self.log.debug("success %s" % (self.pipeline.success_actions))
+            actions = self.pipeline.success_actions
+            item.setReportedResult('SUCCESS')
+            self.pipeline._consecutive_failures = 0
+        elif item.didMergerFail():
+            actions = self.pipeline.merge_failure_actions
+            item.setReportedResult('MERGER_FAILURE')
+        else:
+            actions = self.pipeline.failure_actions
+            item.setReportedResult('FAILURE')
+            self.pipeline._consecutive_failures += 1
+        if self.pipeline._disabled:
+            actions = self.pipeline.disabled_actions
+        # Check here if we should disable so that we only use the disabled
+        # reporters /after/ the last disable_at failure is still reported as
+        # normal.
+        if (self.pipeline.disable_at and not self.pipeline._disabled and
+            self.pipeline._consecutive_failures >= self.pipeline.disable_at):
+            self.pipeline._disabled = True
+        if actions:
+            try:
+                self.log.info("Reporting item %s, actions: %s" %
+                              (item, actions))
+                ret = self.sendReport(actions, self.pipeline.source, item)
+                if ret:
+                    self.log.error("Reporting item %s received: %s" %
+                                   (item, ret))
+            except:
+                self.log.exception("Exception while reporting:")
+                item.setReportedResult('ERROR')
+        return ret
+
+    def reportStats(self, item):
+        if not self.sched.statsd:
+            return
+        try:
+            # Update the gauge on enqueue and dequeue, but timers only
+            # when dequeing.
+            if item.dequeue_time:
+                dt = int((item.dequeue_time - item.enqueue_time) * 1000)
+            else:
+                dt = None
+            items = len(self.pipeline.getAllItems())
+
+            # stats.timers.zuul.pipeline.NAME.resident_time
+            # stats_counts.zuul.pipeline.NAME.total_changes
+            # stats.gauges.zuul.pipeline.NAME.current_changes
+            key = 'zuul.pipeline.%s' % self.pipeline.name
+            self.sched.statsd.gauge(key + '.current_changes', items)
+            if dt:
+                self.sched.statsd.timing(key + '.resident_time', dt)
+                self.sched.statsd.incr(key + '.total_changes')
+
+            # stats.timers.zuul.pipeline.NAME.ORG.PROJECT.resident_time
+            # stats_counts.zuul.pipeline.NAME.ORG.PROJECT.total_changes
+            project_name = item.change.project.name.replace('/', '.')
+            key += '.%s' % project_name
+            if dt:
+                self.sched.statsd.timing(key + '.resident_time', dt)
+                self.sched.statsd.incr(key + '.total_changes')
+        except:
+            self.log.exception("Exception reporting pipeline stats")
diff --git a/zuul/manager/dependent.py b/zuul/manager/dependent.py
new file mode 100644
index 0000000..686a593
--- /dev/null
+++ b/zuul/manager/dependent.py
@@ -0,0 +1,205 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from zuul import model
+from zuul.manager import PipelineManager, StaticChangeQueueContextManager
+
+
+class DependentPipelineManager(PipelineManager):
+    """PipelineManager for handling interrelated Changes.
+
+    The DependentPipelineManager puts Changes that share a Pipeline
+    into a shared :py:class:`~zuul.model.ChangeQueue`. It them processes them
+    using the Optmistic Branch Prediction logic with Nearest Non-Failing Item
+    reparenting algorithm for handling errors.
+    """
+    log = logging.getLogger("zuul.DependentPipelineManager")
+    changes_merge = True
+
+    def __init__(self, *args, **kwargs):
+        super(DependentPipelineManager, self).__init__(*args, **kwargs)
+
+    def _postConfig(self, layout):
+        super(DependentPipelineManager, self)._postConfig(layout)
+        self.buildChangeQueues()
+
+    def buildChangeQueues(self):
+        self.log.debug("Building shared change queues")
+        change_queues = []
+
+        for project in self.pipeline.getProjects():
+            change_queue = model.ChangeQueue(
+                self.pipeline,
+                window=self.pipeline.window,
+                window_floor=self.pipeline.window_floor,
+                window_increase_type=self.pipeline.window_increase_type,
+                window_increase_factor=self.pipeline.window_increase_factor,
+                window_decrease_type=self.pipeline.window_decrease_type,
+                window_decrease_factor=self.pipeline.window_decrease_factor)
+            change_queue.addProject(project)
+            change_queues.append(change_queue)
+            self.log.debug("Created queue: %s" % change_queue)
+
+        # Iterate over all queues trying to combine them, and keep doing
+        # so until they can not be combined further.
+        last_change_queues = change_queues
+        while True:
+            new_change_queues = self.combineChangeQueues(last_change_queues)
+            if len(last_change_queues) == len(new_change_queues):
+                break
+            last_change_queues = new_change_queues
+
+        self.log.info("  Shared change queues:")
+        for queue in new_change_queues:
+            self.pipeline.addQueue(queue)
+            self.log.info("    %s containing %s" % (
+                queue, queue.generated_name))
+
+    def combineChangeQueues(self, change_queues):
+        self.log.debug("Combining shared queues")
+        new_change_queues = []
+        for a in change_queues:
+            merged_a = False
+            for b in new_change_queues:
+                if not a.getJobs().isdisjoint(b.getJobs()):
+                    self.log.debug("Merging queue %s into %s" % (a, b))
+                    b.mergeChangeQueue(a)
+                    merged_a = True
+                    break  # this breaks out of 'for b' and continues 'for a'
+            if not merged_a:
+                self.log.debug("Keeping queue %s" % (a))
+                new_change_queues.append(a)
+        return new_change_queues
+
+    def getChangeQueue(self, change, existing=None):
+        if existing:
+            return StaticChangeQueueContextManager(existing)
+        return StaticChangeQueueContextManager(
+            self.pipeline.getQueue(change.project))
+
+    def isChangeReadyToBeEnqueued(self, change):
+        if not self.pipeline.source.canMerge(change,
+                                             self.getSubmitAllowNeeds()):
+            self.log.debug("Change %s can not merge, ignoring" % change)
+            return False
+        return True
+
+    def enqueueChangesBehind(self, change, quiet, ignore_requirements,
+                             change_queue):
+        to_enqueue = []
+        self.log.debug("Checking for changes needing %s:" % change)
+        if not hasattr(change, 'needed_by_changes'):
+            self.log.debug("  Changeish does not support dependencies")
+            return
+        for other_change in change.needed_by_changes:
+            with self.getChangeQueue(other_change) as other_change_queue:
+                if other_change_queue != change_queue:
+                    self.log.debug("  Change %s in project %s can not be "
+                                   "enqueued in the target queue %s" %
+                                   (other_change, other_change.project,
+                                    change_queue))
+                    continue
+            if self.pipeline.source.canMerge(other_change,
+                                             self.getSubmitAllowNeeds()):
+                self.log.debug("  Change %s needs %s and is ready to merge" %
+                               (other_change, change))
+                to_enqueue.append(other_change)
+
+        if not to_enqueue:
+            self.log.debug("  No changes need %s" % change)
+
+        for other_change in to_enqueue:
+            self.addChange(other_change, quiet=quiet,
+                           ignore_requirements=ignore_requirements,
+                           change_queue=change_queue)
+
+    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
+                            change_queue):
+        ret = self.checkForChangesNeededBy(change, change_queue)
+        if ret in [True, False]:
+            return ret
+        self.log.debug("  Changes %s must be merged ahead of %s" %
+                       (ret, change))
+        for needed_change in ret:
+            r = self.addChange(needed_change, quiet=quiet,
+                               ignore_requirements=ignore_requirements,
+                               change_queue=change_queue)
+            if not r:
+                return False
+        return True
+
+    def checkForChangesNeededBy(self, change, change_queue):
+        self.log.debug("Checking for changes needed by %s:" % change)
+        # Return true if okay to proceed enqueing this change,
+        # false if the change should not be enqueued.
+        if not hasattr(change, 'needs_changes'):
+            self.log.debug("  Changeish does not support dependencies")
+            return True
+        if not change.needs_changes:
+            self.log.debug("  No changes needed")
+            return True
+        changes_needed = []
+        # Ignore supplied change_queue
+        with self.getChangeQueue(change) as change_queue:
+            for needed_change in change.needs_changes:
+                self.log.debug("  Change %s needs change %s:" % (
+                    change, needed_change))
+                if needed_change.is_merged:
+                    self.log.debug("  Needed change is merged")
+                    continue
+                with self.getChangeQueue(needed_change) as needed_change_queue:
+                    if needed_change_queue != change_queue:
+                        self.log.debug("  Change %s in project %s does not "
+                                       "share a change queue with %s "
+                                       "in project %s" %
+                                       (needed_change, needed_change.project,
+                                        change, change.project))
+                        return False
+                if not needed_change.is_current_patchset:
+                    self.log.debug("  Needed change is not the "
+                                   "current patchset")
+                    return False
+                if self.isChangeAlreadyInQueue(needed_change, change_queue):
+                    self.log.debug("  Needed change is already ahead "
+                                   "in the queue")
+                    continue
+                if self.pipeline.source.canMerge(needed_change,
+                                                 self.getSubmitAllowNeeds()):
+                    self.log.debug("  Change %s is needed" % needed_change)
+                    if needed_change not in changes_needed:
+                        changes_needed.append(needed_change)
+                        continue
+                # The needed change can't be merged.
+                self.log.debug("  Change %s is needed but can not be merged" %
+                               needed_change)
+                return False
+        if changes_needed:
+            return changes_needed
+        return True
+
+    def getFailingDependentItems(self, item):
+        if not hasattr(item.change, 'needs_changes'):
+            return None
+        if not item.change.needs_changes:
+            return None
+        failing_items = set()
+        for needed_change in item.change.needs_changes:
+            needed_item = self.getItemForChange(needed_change)
+            if not needed_item:
+                continue
+            if needed_item.current_build_set.failing_reasons:
+                failing_items.add(needed_item)
+        if failing_items:
+            return failing_items
+        return None
diff --git a/zuul/manager/independent.py b/zuul/manager/independent.py
new file mode 100644
index 0000000..d3b6b0d
--- /dev/null
+++ b/zuul/manager/independent.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from zuul import model
+from zuul.manager import PipelineManager, DynamicChangeQueueContextManager
+
+
+class IndependentPipelineManager(PipelineManager):
+    """PipelineManager that puts every Change into its own ChangeQueue."""
+
+    log = logging.getLogger("zuul.IndependentPipelineManager")
+    changes_merge = False
+
+    def _postConfig(self, layout):
+        super(IndependentPipelineManager, self)._postConfig(layout)
+
+    def getChangeQueue(self, change, existing=None):
+        # creates a new change queue for every change
+        if existing:
+            return DynamicChangeQueueContextManager(existing)
+        if change.project not in self.pipeline.getProjects():
+            self.pipeline.addProject(change.project)
+        change_queue = model.ChangeQueue(self.pipeline)
+        change_queue.addProject(change.project)
+        self.pipeline.addQueue(change_queue)
+        self.log.debug("Dynamically created queue %s", change_queue)
+        return DynamicChangeQueueContextManager(change_queue)
+
+    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
+                            change_queue):
+        ret = self.checkForChangesNeededBy(change, change_queue)
+        if ret in [True, False]:
+            return ret
+        self.log.debug("  Changes %s must be merged ahead of %s" %
+                       (ret, change))
+        for needed_change in ret:
+            # This differs from the dependent pipeline by enqueuing
+            # changes ahead as "not live", that is, not intended to
+            # have jobs run.  Also, pipeline requirements are always
+            # ignored (which is safe because the changes are not
+            # live).
+            r = self.addChange(needed_change, quiet=True,
+                               ignore_requirements=True,
+                               live=False, change_queue=change_queue)
+            if not r:
+                return False
+        return True
+
+    def checkForChangesNeededBy(self, change, change_queue):
+        if self.pipeline.ignore_dependencies:
+            return True
+        self.log.debug("Checking for changes needed by %s:" % change)
+        # Return true if okay to proceed enqueing this change,
+        # false if the change should not be enqueued.
+        if not hasattr(change, 'needs_changes'):
+            self.log.debug("  Changeish does not support dependencies")
+            return True
+        if not change.needs_changes:
+            self.log.debug("  No changes needed")
+            return True
+        changes_needed = []
+        for needed_change in change.needs_changes:
+            self.log.debug("  Change %s needs change %s:" % (
+                change, needed_change))
+            if needed_change.is_merged:
+                self.log.debug("  Needed change is merged")
+                continue
+            if self.isChangeAlreadyInQueue(needed_change, change_queue):
+                self.log.debug("  Needed change is already ahead in the queue")
+                continue
+            self.log.debug("  Change %s is needed" % needed_change)
+            if needed_change not in changes_needed:
+                changes_needed.append(needed_change)
+                continue
+            # This differs from the dependent pipeline check in not
+            # verifying that the dependent change is mergable.
+        if changes_needed:
+            return changes_needed
+        return True
+
+    def dequeueItem(self, item):
+        super(IndependentPipelineManager, self).dequeueItem(item)
+        # An independent pipeline manager dynamically removes empty
+        # queues
+        if not item.queue.queue:
+            self.pipeline.removeQueue(item.queue)
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index 950c385..1e98532 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -14,6 +14,7 @@
 
 import json
 import logging
+import threading
 from uuid import uuid4
 
 import gear
@@ -55,6 +56,18 @@
         self.__merge_client.onBuildCompleted(job)
 
 
+class MergeJob(gear.Job):
+    def __init__(self, *args, **kw):
+        super(MergeJob, self).__init__(*args, **kw)
+        self.__event = threading.Event()
+
+    def setComplete(self):
+        self.__event.set()
+
+    def wait(self, timeout=300):
+        return self.__event.wait(timeout)
+
+
 class MergeClient(object):
     log = logging.getLogger("zuul.MergeClient")
 
@@ -71,30 +84,33 @@
         self.gearman.addServer(server, port)
         self.log.debug("Waiting for gearman")
         self.gearman.waitForServer()
-        self.build_sets = {}
+        self.jobs = set()
 
     def stop(self):
         self.gearman.shutdown()
 
     def areMergesOutstanding(self):
-        if self.build_sets:
+        if self.jobs:
             return True
         return False
 
     def submitJob(self, name, data, build_set,
                   precedence=zuul.model.PRECEDENCE_NORMAL):
         uuid = str(uuid4().hex)
-        job = gear.Job(name,
+        job = MergeJob(name,
                        json.dumps(data),
                        unique=uuid)
+        job.build_set = build_set
         self.log.debug("Submitting job %s with data %s" % (job, data))
-        self.build_sets[uuid] = build_set
+        self.jobs.add(job)
         self.gearman.submitJob(job, precedence=precedence,
                                timeout=300)
+        return job
 
-    def mergeChanges(self, items, build_set,
+    def mergeChanges(self, items, build_set, files=None,
                      precedence=zuul.model.PRECEDENCE_NORMAL):
-        data = dict(items=items)
+        data = dict(items=items,
+                    files=files)
         self.submitJob('merger:merge', data, build_set, precedence)
 
     def updateRepo(self, project, url, build_set,
@@ -103,21 +119,30 @@
                     url=url)
         self.submitJob('merger:update', data, build_set, precedence)
 
+    def getFiles(self, project, url, branch, files,
+                 precedence=zuul.model.PRECEDENCE_HIGH):
+        data = dict(project=project,
+                    url=url,
+                    branch=branch,
+                    files=files)
+        job = self.submitJob('merger:cat', data, None, precedence)
+        return job
+
     def onBuildCompleted(self, job):
-        build_set = self.build_sets.get(job.unique)
-        if build_set:
-            data = getJobData(job)
-            zuul_url = data.get('zuul_url')
-            merged = data.get('merged', False)
-            updated = data.get('updated', False)
-            commit = data.get('commit')
-            self.log.info("Merge %s complete, merged: %s, updated: %s, "
-                          "commit: %s" %
-                          (job, merged, updated, build_set.commit))
-            self.sched.onMergeCompleted(build_set, zuul_url,
-                                        merged, updated, commit)
-            # The test suite expects the build_set to be removed from
-            # the internal dict after the wake flag is set.
-            del self.build_sets[job.unique]
-        else:
-            self.log.error("Unable to find build set for uuid %s" % job.unique)
+        data = getJobData(job)
+        zuul_url = data.get('zuul_url')
+        merged = data.get('merged', False)
+        updated = data.get('updated', False)
+        commit = data.get('commit')
+        files = data.get('files', {})
+        job.files = files
+        self.log.info("Merge %s complete, merged: %s, updated: %s, "
+                      "commit: %s" %
+                      (job, merged, updated, commit))
+        job.setComplete()
+        if job.build_set:
+            self.sched.onMergeCompleted(job.build_set, zuul_url,
+                                        merged, updated, commit, files)
+        # The test suite expects the job to be removed from the
+        # internal account after the wake flag is set.
+        self.jobs.remove(job)
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index b3cfaca..692dd83 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -77,12 +77,8 @@
         return self._initialized
 
     def createRepoObject(self):
-        try:
-            self._ensure_cloned()
-            repo = git.Repo(self.local_path)
-        except:
-            self.log.exception("Unable to initialize repo for %s" %
-                               self.local_path)
+        self._ensure_cloned()
+        repo = git.Repo(self.local_path)
         return repo
 
     def reset(self):
@@ -195,6 +191,20 @@
             origin.fetch()
         origin.fetch(tags=True)
 
+    def getFiles(self, files, branch=None, commit=None):
+        ret = {}
+        repo = self.createRepoObject()
+        if branch:
+            tree = repo.heads[branch].commit.tree
+        else:
+            tree = repo.commit(commit).tree
+        for fn in files:
+            if fn in tree:
+                ret[fn] = tree[fn].data_stream.read()
+            else:
+                ret[fn] = None
+        return ret
+
 
 class Merger(object):
     log = logging.getLogger("zuul.Merger")
@@ -209,7 +219,7 @@
         self.username = username
 
     def _makeSSHWrappers(self, working_root, connections):
-        for connection_name, connection in connections.items():
+        for connection_name, connection in connections.connections.items():
             sshkey = connection.connection_config.get('sshkey')
             if sshkey:
                 self._makeSSHWrapper(sshkey, working_root, connection_name)
@@ -339,9 +349,10 @@
                 return None
         return commit
 
-    def mergeChanges(self, items):
+    def mergeChanges(self, items, files=None):
         recent = {}
         commit = None
+        read_files = []
         for item in items:
             if item.get("number") and item.get("patchset"):
                 self.log.debug("Merging for change %s,%s." %
@@ -352,4 +363,16 @@
             commit = self._mergeItem(item, recent)
             if not commit:
                 return None
+            if files:
+                repo = self.getRepo(item['project'], item['url'])
+                repo_files = repo.getFiles(files, commit=commit)
+                read_files.append(dict(project=item['project'],
+                                       branch=item['branch'],
+                                       files=repo_files))
+        if files:
+            return commit.hexsha, read_files
         return commit.hexsha
+
+    def getFiles(self, project, url, branch, files):
+        repo = self.getRepo(project, url)
+        return repo.getFiles(files, branch=branch)
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index d56993c..750d560 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -68,6 +68,7 @@
     def register(self):
         self.worker.registerFunction("merger:merge")
         self.worker.registerFunction("merger:update")
+        self.worker.registerFunction("merger:cat")
 
     def stop(self):
         self.log.debug("Stopping")
@@ -90,6 +91,9 @@
                     elif job.name == 'merger:update':
                         self.log.debug("Got update job: %s" % job.unique)
                         self.update(job)
+                    elif job.name == 'merger:cat':
+                        self.log.debug("Got cat job: %s" % job.unique)
+                        self.cat(job)
                     else:
                         self.log.error("Unable to handle job %s" % job.name)
                         job.sendWorkFail()
@@ -101,10 +105,13 @@
 
     def merge(self, job):
         args = json.loads(job.arguments)
-        commit = self.merger.mergeChanges(args['items'])
-        result = dict(merged=(commit is not None),
-                      commit=commit,
+        ret = self.merger.mergeChanges(args['items'], args.get('files'))
+        result = dict(merged=(ret is not None),
                       zuul_url=self.zuul_url)
+        if args.get('files'):
+            result['commit'], result['files'] = ret
+        else:
+            result['commit'] = ret
         job.sendWorkComplete(json.dumps(result))
 
     def update(self, job):
@@ -113,3 +120,13 @@
         result = dict(updated=True,
                       zuul_url=self.zuul_url)
         job.sendWorkComplete(json.dumps(result))
+
+    def cat(self, job):
+        args = json.loads(job.arguments)
+        self.merger.updateRepo(args['project'], args['url'])
+        files = self.merger.getFiles(args['project'], args['url'],
+                                     args['branch'], args['files'])
+        result = dict(updated=True,
+                      files=files,
+                      zuul_url=self.zuul_url)
+        job.sendWorkComplete(json.dumps(result))
diff --git a/zuul/model.py b/zuul/model.py
index 46b0b98..ce3d1a2 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -68,14 +68,30 @@
 
 
 class Pipeline(object):
-    """A top-level pipeline such as check, gate, post, etc."""
-    def __init__(self, name):
+    """A configuration that ties triggers, reporters, managers and sources.
+
+    Source
+        Where changes should come from. It is a named connection to
+        an external service defined in zuul.conf
+
+    Trigger
+        A description of which events should be processed
+
+    Manager
+        Responsible for enqueing and dequeing Changes
+
+    Reporter
+        Communicates success and failure results somewhere
+    """
+    def __init__(self, name, layout):
         self.name = name
+        self.layout = layout
         self.description = None
         self.failure_message = None
         self.merge_failure_message = None
         self.success_message = None
         self.footer_message = None
+        self.start_message = None
         self.dequeue_on_new_patchset = True
         self.ignore_dependencies = False
         self.job_trees = {}  # project -> JobTree
@@ -83,6 +99,7 @@
         self.queues = []
         self.precedence = PRECEDENCE_NORMAL
         self.source = None
+        self.triggers = []
         self.start_actions = []
         self.success_actions = []
         self.failure_actions = []
@@ -98,6 +115,16 @@
         self.window_decrease_type = None
         self.window_decrease_factor = None
 
+    @property
+    def actions(self):
+        return (
+            self.start_actions +
+            self.success_actions +
+            self.failure_actions +
+            self.merge_failure_actions +
+            self.disabled_actions
+        )
+
     def __repr__(self):
         return '<Pipeline %s>' % self.name
 
@@ -132,134 +159,6 @@
         tree = self.job_trees.get(project)
         return tree
 
-    def getJobs(self, item):
-        if not item.live:
-            return []
-        tree = self.getJobTree(item.change.project)
-        if not tree:
-            return []
-        return item.change.filterJobs(tree.getJobs())
-
-    def _findJobsToRun(self, job_trees, item, mutex):
-        torun = []
-        if item.item_ahead:
-            # Only run jobs if any 'hold' jobs on the change ahead
-            # have completed successfully.
-            if self.isHoldingFollowingChanges(item.item_ahead):
-                return []
-        for tree in job_trees:
-            job = tree.job
-            result = None
-            if job:
-                if not job.changeMatches(item.change):
-                    continue
-                build = item.current_build_set.getBuild(job.name)
-                if build:
-                    result = build.result
-                else:
-                    # There is no build for the root of this job tree,
-                    # so we should run it.
-                    if mutex.acquire(item, job):
-                        # If this job needs a mutex, either acquire it or make
-                        # sure that we have it before running the job.
-                        torun.append(job)
-            # If there is no job, this is a null job tree, and we should
-            # run all of its jobs.
-            if result == 'SUCCESS' or not job:
-                torun.extend(self._findJobsToRun(tree.job_trees, item, mutex))
-        return torun
-
-    def findJobsToRun(self, item, mutex):
-        if not item.live:
-            return []
-        tree = self.getJobTree(item.change.project)
-        if not tree:
-            return []
-        return self._findJobsToRun(tree.job_trees, item, mutex)
-
-    def haveAllJobsStarted(self, item):
-        for job in self.getJobs(item):
-            build = item.current_build_set.getBuild(job.name)
-            if not build or not build.start_time:
-                return False
-        return True
-
-    def areAllJobsComplete(self, item):
-        for job in self.getJobs(item):
-            build = item.current_build_set.getBuild(job.name)
-            if not build or not build.result:
-                return False
-        return True
-
-    def didAllJobsSucceed(self, item):
-        for job in self.getJobs(item):
-            if not job.voting:
-                continue
-            build = item.current_build_set.getBuild(job.name)
-            if not build:
-                return False
-            if build.result != 'SUCCESS':
-                return False
-        return True
-
-    def didMergerSucceed(self, item):
-        if item.current_build_set.unable_to_merge:
-            return False
-        return True
-
-    def didAnyJobFail(self, item):
-        for job in self.getJobs(item):
-            if not job.voting:
-                continue
-            build = item.current_build_set.getBuild(job.name)
-            if build and build.result and (build.result != 'SUCCESS'):
-                return True
-        return False
-
-    def isHoldingFollowingChanges(self, item):
-        if not item.live:
-            return False
-        for job in self.getJobs(item):
-            if not job.hold_following_changes:
-                continue
-            build = item.current_build_set.getBuild(job.name)
-            if not build:
-                return True
-            if build.result != 'SUCCESS':
-                return True
-
-        if not item.item_ahead:
-            return False
-        return self.isHoldingFollowingChanges(item.item_ahead)
-
-    def setResult(self, item, build):
-        if build.retry:
-            item.removeBuild(build)
-        elif build.result != 'SUCCESS':
-            # Get a JobTree from a Job so we can find only its dependent jobs
-            root = self.getJobTree(item.change.project)
-            tree = root.getJobTreeForJob(build.job)
-            for job in tree.getJobs():
-                fakebuild = Build(job, None)
-                fakebuild.result = 'SKIPPED'
-                item.addBuild(fakebuild)
-
-    def setUnableToMerge(self, item):
-        item.current_build_set.unable_to_merge = True
-        root = self.getJobTree(item.change.project)
-        for job in root.getJobs():
-            fakebuild = Build(job, None)
-            fakebuild.result = 'SKIPPED'
-            item.addBuild(fakebuild)
-
-    def setDequeuedNeedingChange(self, item):
-        item.dequeued_needing_change = True
-        root = self.getJobTree(item.change.project)
-        for job in root.getJobs():
-            fakebuild = Build(job, None)
-            fakebuild.result = 'SKIPPED'
-            item.addBuild(fakebuild)
-
     def getChangesInQueue(self):
         changes = []
         for shared_queue in self.queues:
@@ -302,11 +201,22 @@
 
 
 class ChangeQueue(object):
-    """DependentPipelines have multiple parallel queues shared by
-    different projects; this is one of them.  For instance, there may
-    a queue shared by interrelated projects foo and bar, and a second
-    queue for independent project baz.  Pipelines have one or more
-    ChangeQueues."""
+    """A ChangeQueue contains Changes to be processed related projects.
+
+    A Pipeline with a DependentPipelineManager has multiple parallel
+    ChangeQueues shared by different projects. For instance, there may a
+    ChangeQueue shared by interrelated projects foo and bar, and a second queue
+    for independent project baz.
+
+    A Pipeline with an IndependentPipelineManager puts every Change into its
+    own ChangeQueue
+
+    The ChangeQueue Window is inspired by TCP windows and controlls how many
+    Changes in a given ChangeQueue will be considered active and ready to
+    be processed. If a Change succeeds, the Window is increased by
+    `window_increase_factor`. If a Change fails, the Window is decreased by
+    `window_decrease_factor`.
+    """
     def __init__(self, pipeline, window=0, window_floor=1,
                  window_increase_type='linear', window_increase_factor=1,
                  window_decrease_type='exponential', window_decrease_factor=2):
@@ -333,20 +243,10 @@
     def addProject(self, project):
         if project not in self.projects:
             self.projects.append(project)
-            self._jobs |= set(self.pipeline.getJobTree(project).getJobs())
 
             names = [x.name for x in self.projects]
             names.sort()
             self.generated_name = ', '.join(names)
-
-            for job in self._jobs:
-                if job.queue_name:
-                    if (self.assigned_name and
-                            job.queue_name != self.assigned_name):
-                        raise Exception("More than one name assigned to "
-                                        "change queue: %s != %s" %
-                                        (self.assigned_name, job.queue_name))
-                    self.assigned_name = job.queue_name
             self.name = self.assigned_name or self.generated_name
 
     def enqueueChange(self, change):
@@ -425,13 +325,22 @@
 
 
 class Project(object):
+    """A Project represents a git repository such as openstack/nova."""
+
+    # NOTE: Projects should only be instantiated via a Source object
+    # so that they are associated with and cached by their Connection.
+    # This makes a Project instance a unique identifier for a given
+    # project from a given source.
+
     def __init__(self, name, foreign=False):
         self.name = name
         self.merge_mode = MERGER_MERGE_RESOLVE
         # foreign projects are those referenced in dependencies
         # of layout projects, this should matter
         # when deciding whether to enqueue their changes
+        # TODOv3 (jeblair): re-add support for foreign projects if needed
         self.foreign = foreign
+        self.unparsed_config = None
 
     def __str__(self):
         return self.name
@@ -441,105 +350,94 @@
 
 
 class Job(object):
+    """A Job represents the defintion of actions to perform."""
+
+    attributes = dict(
+        timeout=None,
+        # variables={},
+        nodes=[],
+        auth={},
+        workspace=None,
+        pre_run=None,
+        post_run=None,
+        voting=None,
+        hold_following_changes=None,
+        failure_message=None,
+        success_message=None,
+        failure_url=None,
+        success_url=None,
+        # Matchers.  These are separate so they can be individually
+        # overidden.
+        branch_matcher=None,
+        file_matcher=None,
+        irrelevant_file_matcher=None,  # skip-if
+        parameter_function=None,  # TODOv3(jeblair): remove
+        tags=set(),
+        mutex=None,
+    )
+
     def __init__(self, name):
-        # If you add attributes here, be sure to add them to the copy method.
         self.name = name
-        self.queue_name = None
-        self.failure_message = None
-        self.success_message = None
-        self.failure_pattern = None
-        self.success_pattern = None
-        self.parameter_function = None
-        self.tags = set()
-        self.mutex = None
-        # A metajob should only supply values for attributes that have
-        # been explicitly provided, so avoid setting boolean defaults.
-        if self.is_metajob:
-            self.hold_following_changes = None
-            self.voting = None
-        else:
-            self.hold_following_changes = False
-            self.voting = True
-        self.branches = []
-        self._branches = []
-        self.files = []
-        self._files = []
-        self.skip_if_matcher = None
-        self.swift = {}
+        self.project_source = None
+        for k, v in self.attributes.items():
+            setattr(self, k, v)
+
+    def __equals__(self, other):
+        # Compare the name and all inheritable attributes to determine
+        # whether two jobs with the same name are identically
+        # configured.  Useful upon reconfiguration.
+        if not isinstance(other, Job):
+            return False
+        if self.name != other.name:
+            return False
+        for k, v in self.attributes.items():
+            if getattr(self, k) != getattr(other, k):
+                return False
+        return True
 
     def __str__(self):
         return self.name
 
     def __repr__(self):
-        return '<Job %s>' % (self.name)
+        return '<Job %s>' % (self.name,)
 
-    @property
-    def is_metajob(self):
-        return self.name.startswith('^')
+    def inheritFrom(self, other):
+        """Copy the inheritable attributes which have been set on the other
+        job to this job."""
 
-    def copy(self, other):
-        if other.failure_message:
-            self.failure_message = other.failure_message
-        if other.success_message:
-            self.success_message = other.success_message
-        if other.failure_pattern:
-            self.failure_pattern = other.failure_pattern
-        if other.success_pattern:
-            self.success_pattern = other.success_pattern
-        if other.parameter_function:
-            self.parameter_function = other.parameter_function
-        if other.branches:
-            self.branches = other.branches[:]
-            self._branches = other._branches[:]
-        if other.files:
-            self.files = other.files[:]
-            self._files = other._files[:]
-        if other.skip_if_matcher:
-            self.skip_if_matcher = other.skip_if_matcher.copy()
-        if other.swift:
-            self.swift.update(other.swift)
-        if other.mutex:
-            self.mutex = other.mutex
-        # Tags are merged via a union rather than a destructive copy
-        # because they are intended to accumulate as metajobs are
-        # applied.
-        if other.tags:
-            self.tags = self.tags.union(other.tags)
-        # Only non-None values should be copied for boolean attributes.
-        if other.hold_following_changes is not None:
-            self.hold_following_changes = other.hold_following_changes
-        if other.voting is not None:
-            self.voting = other.voting
+        if not isinstance(other, Job):
+            raise Exception("Job unable to inherit from %s" % (other,))
+        for k, v in self.attributes.items():
+            if getattr(other, k) != v and k != 'auth':
+                setattr(self, k, getattr(other, k))
+        # Inherit auth only if explicitly allowed
+        if other.auth and 'inherit' in other.auth and other.auth['inherit']:
+            setattr(self, 'auth', getattr(other, 'auth'))
 
     def changeMatches(self, change):
-        matches_branch = False
-        for branch in self.branches:
-            if hasattr(change, 'branch') and branch.match(change.branch):
-                matches_branch = True
-            if hasattr(change, 'ref') and branch.match(change.ref):
-                matches_branch = True
-        if self.branches and not matches_branch:
+        if self.branch_matcher and not self.branch_matcher.matches(change):
             return False
 
-        matches_file = False
-        for f in self.files:
-            if hasattr(change, 'files'):
-                for cf in change.files:
-                    if f.match(cf):
-                        matches_file = True
-        if self.files and not matches_file:
+        if self.file_matcher and not self.file_matcher.matches(change):
             return False
 
-        if self.skip_if_matcher and self.skip_if_matcher.matches(change):
+        # NB: This is a negative match.
+        if (self.irrelevant_file_matcher and
+            self.irrelevant_file_matcher.matches(change)):
             return False
 
         return True
 
 
 class JobTree(object):
-    """ A JobTree represents an instance of one Job, and holds JobTrees
-    whose jobs should be run if that Job succeeds.  A root node of a
-    JobTree will have no associated Job. """
+    """A JobTree holds one or more Jobs to represent Job dependencies.
+
+    If Job foo should only execute if Job bar succeeds, then there will
+    be a JobTree for foo, which will contain a JobTree for bar. A JobTree
+    can hold more than one dependent JobTrees, such that jobs bar and bang
+    both depend on job foo being successful.
+
+    A root node of a JobTree will have no associated Job."""
 
     def __init__(self, job):
         self.job = job
@@ -570,13 +468,25 @@
                 return ret
         return None
 
+    def inheritFrom(self, other):
+        if other.job:
+            self.job = Job(other.job.name)
+            self.job.inheritFrom(other.job)
+        for other_tree in other.job_trees:
+            this_tree = self.getJobTreeForJob(other_tree.job)
+            if not this_tree:
+                this_tree = JobTree(None)
+                self.job_trees.append(this_tree)
+            this_tree.inheritFrom(other_tree)
+
 
 class Build(object):
+    """A Build is an instance of a single running Job."""
+
     def __init__(self, job, uuid):
         self.job = job
         self.uuid = uuid
         self.url = None
-        self.number = None
         self.result = None
         self.build_set = None
         self.launch_time = time.time()
@@ -597,7 +507,7 @@
 
 
 class Worker(object):
-    """A model of the worker running a job"""
+    """Information about the specific worker executing a Build."""
     def __init__(self):
         self.name = "Unknown"
         self.hostname = None
@@ -621,7 +531,34 @@
         return '<Worker %s>' % self.name
 
 
+class RepoFiles(object):
+    """RepoFiles holds config-file content for per-project job config."""
+    # When we ask a merger to prepare a future multiple-repo state and
+    # collect files so that we can dynamically load our configuration,
+    # this class provides easy access to that data.
+    def __init__(self):
+        self.projects = {}
+
+    def __repr__(self):
+        return '<RepoFiles %s>' % self.projects
+
+    def setFiles(self, items):
+        self.projects = {}
+        for item in items:
+            project = self.projects.setdefault(item['project'], {})
+            branch = project.setdefault(item['branch'], {})
+            branch.update(item['files'])
+
+    def getFile(self, project, branch, fn):
+        return self.projects.get(project, {}).get(branch, {}).get(fn)
+
+
 class BuildSet(object):
+    """Contains the Builds for a Change representing potential future state.
+
+    A BuildSet also holds the UUID used to produce the Zuul Ref that builders
+    check out.
+    """
     # Merge states:
     NEW = 1
     PENDING = 2
@@ -646,6 +583,10 @@
         self.unable_to_merge = False
         self.failing_reasons = []
         self.merge_state = self.NEW
+        self.nodes = {}  # job -> nodes
+        self.node_requests = {}  # job -> reqs
+        self.files = RepoFiles()
+        self.layout = None
 
     def __repr__(self):
         return '<BuildSet item: %s #builds: %s merge state: %s>' % (
@@ -684,9 +625,32 @@
         keys.sort()
         return [self.builds.get(x) for x in keys]
 
+    def getJobNodes(self, job_name):
+        # Return None if not provisioned; [] if no nodes required
+        return self.nodes.get(job_name)
+
+    def setJobNodeRequest(self, job_name, req):
+        if job_name in self.node_requests:
+            raise Exception("Prior node request for %s" % (job_name))
+        self.node_requests[job_name] = req
+
+    def getJobNodeRequest(self, job_name):
+        return self.node_requests.get(job_name)
+
+    def jobNodeRequestComplete(self, job_name, req, nodes):
+        if job_name in self.nodes:
+            raise Exception("Prior node request for %s" % (job_name))
+        self.nodes[job_name] = nodes
+        del self.node_requests[job_name]
+
 
 class QueueItem(object):
-    """A changish inside of a Pipeline queue"""
+    """Represents the position of a Change in a ChangeQueue.
+
+    All Changes are enqueued into ChangeQueue in a QueueItem. The QueueItem
+    holds the current `BuildSet` as well as all previous `BuildSets` that were
+    produced for this `QueueItem`.
+    """
 
     def __init__(self, queue, change):
         self.pipeline = queue.pipeline
@@ -703,6 +667,8 @@
         self.reported = False
         self.active = False  # Whether an item is within an active window
         self.live = True  # Whether an item is intended to be processed at all
+        self.layout = None  # This item's shadow layout
+        self.job_tree = None
 
     def __repr__(self):
         if self.pipeline:
@@ -730,6 +696,174 @@
     def setReportedResult(self, result):
         self.current_build_set.result = result
 
+    def freezeJobTree(self):
+        """Find or create actual matching jobs for this item's change and
+        store the resulting job tree."""
+        layout = self.current_build_set.layout
+        self.job_tree = layout.createJobTree(self)
+
+    def hasJobTree(self):
+        """Returns True if the item has a job tree."""
+        return self.job_tree is not None
+
+    def getJobs(self):
+        if not self.live or not self.job_tree:
+            return []
+        return self.job_tree.getJobs()
+
+    def haveAllJobsStarted(self):
+        if not self.hasJobTree():
+            return False
+        for job in self.getJobs():
+            build = self.current_build_set.getBuild(job.name)
+            if not build or not build.start_time:
+                return False
+        return True
+
+    def areAllJobsComplete(self):
+        if not self.hasJobTree():
+            return False
+        for job in self.getJobs():
+            build = self.current_build_set.getBuild(job.name)
+            if not build or not build.result:
+                return False
+        return True
+
+    def didAllJobsSucceed(self):
+        if not self.hasJobTree():
+            return False
+        for job in self.getJobs():
+            if not job.voting:
+                continue
+            build = self.current_build_set.getBuild(job.name)
+            if not build:
+                return False
+            if build.result != 'SUCCESS':
+                return False
+        return True
+
+    def didAnyJobFail(self):
+        if not self.hasJobTree():
+            return False
+        for job in self.getJobs():
+            if not job.voting:
+                continue
+            build = self.current_build_set.getBuild(job.name)
+            if build and build.result and (build.result != 'SUCCESS'):
+                return True
+        return False
+
+    def didMergerFail(self):
+        if self.current_build_set.unable_to_merge:
+            return True
+        return False
+
+    def isHoldingFollowingChanges(self):
+        if not self.live:
+            return False
+        if not self.hasJobTree():
+            return False
+        for job in self.getJobs():
+            if not job.hold_following_changes:
+                continue
+            build = self.current_build_set.getBuild(job.name)
+            if not build:
+                return True
+            if build.result != 'SUCCESS':
+                return True
+
+        if not self.item_ahead:
+            return False
+        return self.item_ahead.isHoldingFollowingChanges()
+
+    def _findJobsToRun(self, job_trees, mutex):
+        torun = []
+        if self.item_ahead:
+            # Only run jobs if any 'hold' jobs on the change ahead
+            # have completed successfully.
+            if self.item_ahead.isHoldingFollowingChanges():
+                return []
+        for tree in job_trees:
+            job = tree.job
+            result = None
+            if job:
+                if not job.changeMatches(self.change):
+                    continue
+                build = self.current_build_set.getBuild(job.name)
+                if build:
+                    result = build.result
+                else:
+                    # There is no build for the root of this job tree,
+                    # so we should run it.
+                    if mutex.acquire(self, job):
+                        # If this job needs a mutex, either acquire it or make
+                        # sure that we have it before running the job.
+                        torun.append(job)
+            # If there is no job, this is a null job tree, and we should
+            # run all of its jobs.
+            if result == 'SUCCESS' or not job:
+                torun.extend(self._findJobsToRun(tree.job_trees, mutex))
+        return torun
+
+    def findJobsToRun(self, mutex):
+        if not self.live:
+            return []
+        tree = self.job_tree
+        if not tree:
+            return []
+        return self._findJobsToRun(tree.job_trees, mutex)
+
+    def _findJobsToRequest(self, job_trees):
+        toreq = []
+        for tree in job_trees:
+            job = tree.job
+            if job:
+                if not job.changeMatches(self.change):
+                    continue
+                nodes = self.current_build_set.getJobNodes(job.name)
+                if nodes is None:
+                    req = self.current_build_set.getJobNodeRequest(job.name)
+                    if req is None:
+                        toreq.append(job)
+            # If there is no job, this is a null job tree, and we should
+            # run all of its jobs.
+            if not job:
+                toreq.extend(self._findJobsToRequest(tree.job_trees))
+        return toreq
+
+    def findJobsToRequest(self):
+        if not self.live:
+            return []
+        tree = self.job_tree
+        if not tree:
+            return []
+        return self._findJobsToRequest(tree.job_trees)
+
+    def setResult(self, build):
+        if build.retry:
+            self.removeBuild(build)
+        elif build.result != 'SUCCESS':
+            # Get a JobTree from a Job so we can find only its dependent jobs
+            tree = self.job_tree.getJobTreeForJob(build.job)
+            for job in tree.getJobs():
+                fakebuild = Build(job, None)
+                fakebuild.result = 'SKIPPED'
+                self.addBuild(fakebuild)
+
+    def setDequeuedNeedingChange(self):
+        self.dequeued_needing_change = True
+        self._setAllJobsSkipped()
+
+    def setUnableToMerge(self):
+        self.current_build_set.unable_to_merge = True
+        self._setAllJobsSkipped()
+
+    def _setAllJobsSkipped(self):
+        for job in self.getJobs():
+            fakebuild = Build(job, None)
+            fakebuild.result = 'SKIPPED'
+            self.addBuild(fakebuild)
+
     def formatJobResult(self, job, url_pattern=None):
         build = self.current_build_set.getBuild(job.name)
         result = build.result
@@ -737,13 +871,13 @@
         if result == 'SUCCESS':
             if job.success_message:
                 result = job.success_message
-            if job.success_pattern:
-                pattern = job.success_pattern
+            if job.success_url:
+                pattern = job.success_url
         elif result == 'FAILURE':
             if job.failure_message:
                 result = job.failure_message
-            if job.failure_pattern:
-                pattern = job.failure_pattern
+            if job.failure_url:
+                pattern = job.failure_url
         url = None
         if pattern:
             try:
@@ -788,7 +922,7 @@
         else:
             ret['owner'] = None
         max_remaining = 0
-        for job in self.pipeline.getJobs(self):
+        for job in self.getJobs():
             now = time.time()
             build = self.current_build_set.getBuild(job.name)
             elapsed = None
@@ -840,13 +974,12 @@
                 'pipeline': build.pipeline.name if build else None,
                 'canceled': build.canceled if build else None,
                 'retry': build.retry if build else None,
-                'number': build.number if build else None,
                 'node_labels': build.node_labels if build else [],
                 'node_name': build.node_name if build else None,
                 'worker': worker,
             })
 
-        if self.pipeline.haveAllJobsStarted(self):
+        if self.haveAllJobsStarted():
             ret['remaining_time'] = max_remaining
         else:
             ret['remaining_time'] = None
@@ -868,7 +1001,7 @@
                 changeish.project.name,
                 changeish._id(),
                 self.item_ahead)
-        for job in self.pipeline.getJobs(self):
+        for job in self.getJobs():
             build = self.current_build_set.getBuild(job.name)
             if build:
                 result = build.result
@@ -892,7 +1025,7 @@
 
 
 class Changeish(object):
-    """Something like a change; either a change or a ref"""
+    """Base class for Change and Ref."""
 
     def __init__(self, project):
         self.project = project
@@ -919,8 +1052,12 @@
     def getRelatedChanges(self):
         return set()
 
+    def updatesConfig(self):
+        return False
+
 
 class Change(Changeish):
+    """A proposed new state for a Project."""
     def __init__(self, project):
         super(Change, self).__init__(project)
         self.branch = None
@@ -970,8 +1107,14 @@
             related.update(c.getRelatedChanges())
         return related
 
+    def updatesConfig(self):
+        if 'zuul.yaml' in self.files or '.zuul.yaml' in self.files:
+            return True
+        return False
+
 
 class Ref(Changeish):
+    """An existing state of a Project."""
     def __init__(self, project):
         super(Ref, self).__init__(project)
         self.ref = None
@@ -1008,6 +1151,8 @@
 
 
 class NullChange(Changeish):
+    # TODOv3(jeblair): remove this in favor of enqueueing Refs (eg
+    # current master) instead.
     def __repr__(self):
         return '<NullChange for %s>' % (self.project)
 
@@ -1025,6 +1170,7 @@
 
 
 class TriggerEvent(object):
+    """Incoming event from an external system."""
     def __init__(self):
         self.data = None
         # common
@@ -1069,6 +1215,7 @@
 
 
 class BaseFilter(object):
+    """Base Class for filtering which Changes and Events to process."""
     def __init__(self, required_approvals=[], reject_approvals=[]):
         self._required_approvals = copy.deepcopy(required_approvals)
         self.required_approvals = self._tidy_approvals(required_approvals)
@@ -1160,6 +1307,7 @@
 
 
 class EventFilter(BaseFilter):
+    """Allows a Pipeline to only respond to certain events."""
     def __init__(self, trigger, types=[], branches=[], refs=[],
                  event_approvals={}, comments=[], emails=[], usernames=[],
                  timespecs=[], required_approvals=[], reject_approvals=[],
@@ -1316,6 +1464,7 @@
 
 
 class ChangeishFilter(BaseFilter):
+    """Allows a Manager to only enqueue Changes that meet certain criteria."""
     def __init__(self, open=None, current_patchset=None,
                  statuses=[], required_approvals=[],
                  reject_approvals=[]):
@@ -1365,27 +1514,220 @@
         return True
 
 
-class Layout(object):
+class ProjectPipelineConfig(object):
+    # Represents a project cofiguration in the context of a pipeline
     def __init__(self):
+        self.job_tree = None
+        self.queue_name = None
+        # TODOv3(jeblair): add merge mode
+
+
+class ProjectConfig(object):
+    # Represents a project cofiguration
+    def __init__(self, name):
+        self.name = name
+        self.pipelines = {}
+
+
+class UnparsedAbideConfig(object):
+    """A collection of yaml lists that has not yet been parsed into objects.
+
+    An Abide is a collection of tenants.
+    """
+
+    def __init__(self):
+        self.tenants = []
+
+    def extend(self, conf):
+        if isinstance(conf, UnparsedAbideConfig):
+            self.tenants.extend(conf.tenants)
+            return
+
+        if not isinstance(conf, list):
+            raise Exception("Configuration items must be in the form of "
+                            "a list of dictionaries (when parsing %s)" %
+                            (conf,))
+        for item in conf:
+            if not isinstance(item, dict):
+                raise Exception("Configuration items must be in the form of "
+                                "a list of dictionaries (when parsing %s)" %
+                                (conf,))
+            if len(item.keys()) > 1:
+                raise Exception("Configuration item dictionaries must have "
+                                "a single key (when parsing %s)" %
+                                (conf,))
+            key, value = item.items()[0]
+            if key == 'tenant':
+                self.tenants.append(value)
+            else:
+                raise Exception("Configuration item not recognized "
+                                "(when parsing %s)" %
+                                (conf,))
+
+
+class UnparsedTenantConfig(object):
+    """A collection of yaml lists that has not yet been parsed into objects."""
+
+    def __init__(self):
+        self.pipelines = []
+        self.jobs = []
+        self.project_templates = []
+        self.projects = []
+
+    def copy(self):
+        r = UnparsedTenantConfig()
+        r.pipelines = copy.deepcopy(self.pipelines)
+        r.jobs = copy.deepcopy(self.jobs)
+        r.project_templates = copy.deepcopy(self.project_templates)
+        r.projects = copy.deepcopy(self.projects)
+        return r
+
+    def extend(self, conf, source_project=None):
+        if isinstance(conf, UnparsedTenantConfig):
+            self.pipelines.extend(conf.pipelines)
+            self.jobs.extend(conf.jobs)
+            self.project_templates.extend(conf.project_templates)
+            self.projects.extend(conf.projects)
+            return
+
+        if not isinstance(conf, list):
+            raise Exception("Configuration items must be in the form of "
+                            "a list of dictionaries (when parsing %s)" %
+                            (conf,))
+        for item in conf:
+            if not isinstance(item, dict):
+                raise Exception("Configuration items must be in the form of "
+                                "a list of dictionaries (when parsing %s)" %
+                                (conf,))
+            if len(item.keys()) > 1:
+                raise Exception("Configuration item dictionaries must have "
+                                "a single key (when parsing %s)" %
+                                (conf,))
+            key, value = item.items()[0]
+            if key == 'project':
+                self.projects.append(value)
+            elif key == 'job':
+                if source_project is not None:
+                    value['_source_project'] = source_project
+                self.jobs.append(value)
+            elif key == 'project-template':
+                self.project_templates.append(value)
+            elif key == 'pipeline':
+                self.pipelines.append(value)
+            else:
+                raise Exception("Configuration item `%s` not recognized "
+                                "(when parsing %s)" %
+                                (item, conf,))
+
+
+class Layout(object):
+    """Holds all of the Pipelines."""
+
+    def __init__(self):
+        self.tenant = None
         self.projects = {}
+        self.project_configs = {}
+        self.project_templates = {}
         self.pipelines = OrderedDict()
+        # This is a dictionary of name -> [jobs].  The first element
+        # of the list is the first job added with that name.  It is
+        # the reference definition for a given job.  Subsequent
+        # elements are aspects of that job with different matchers
+        # that override some attribute of the job.  These aspects all
+        # inherit from the reference definition.
         self.jobs = {}
-        self.metajobs = []
 
     def getJob(self, name):
         if name in self.jobs:
-            return self.jobs[name]
-        job = Job(name)
-        if job.is_metajob:
-            regex = re.compile(name)
-            self.metajobs.append((regex, job))
+            return self.jobs[name][0]
+        raise Exception("Job %s not defined" % (name,))
+
+    def getJobs(self, name):
+        return self.jobs.get(name, [])
+
+    def addJob(self, job):
+        # We can have multiple variants of a job all with the same
+        # name, but these variants must all be defined in the same repo.
+        prior_jobs = [j for j in self.getJobs(job.name)
+                      if j.source_project != job.source_project]
+        if prior_jobs:
+            raise Exception("Job %s in %s is not permitted to shadow "
+                            "job %s in %s" % (job, job.source_project,
+                                              prior_jobs[0],
+                                              prior_jobs[0].source_project))
+
+        if job.name in self.jobs:
+            self.jobs[job.name].append(job)
         else:
-            # Apply attributes from matching meta-jobs
-            for regex, metajob in self.metajobs:
-                if regex.match(name):
-                    job.copy(metajob)
-            self.jobs[name] = job
-        return job
+            self.jobs[job.name] = [job]
+
+    def addPipeline(self, pipeline):
+        self.pipelines[pipeline.name] = pipeline
+
+    def addProjectTemplate(self, project_template):
+        self.project_templates[project_template.name] = project_template
+
+    def addProjectConfig(self, project_config, update_pipeline=True):
+        self.project_configs[project_config.name] = project_config
+        # TODOv3(jeblair): tidy up the relationship between pipelines
+        # and projects and projectconfigs.  Specifically, move
+        # job_trees out of the pipeline since they are more dynamic
+        # than pipelines.  Remove the update_pipeline argument
+        if not update_pipeline:
+            return
+        for pipeline_name, pipeline_config in project_config.pipelines.items():
+            pipeline = self.pipelines[pipeline_name]
+            project = pipeline.source.getProject(project_config.name)
+            pipeline.job_trees[project] = pipeline_config.job_tree
+
+    def _createJobTree(self, change, job_trees, parent):
+        for tree in job_trees:
+            job = tree.job
+            if not job.changeMatches(change):
+                continue
+            frozen_job = Job(job.name)
+            frozen_tree = JobTree(frozen_job)
+            inherited = set()
+            for variant in self.getJobs(job.name):
+                if variant.changeMatches(change):
+                    if variant not in inherited:
+                        frozen_job.inheritFrom(variant)
+                        inherited.add(variant)
+            if job not in inherited:
+                # Only update from the job in the tree if it is
+                # unique, otherwise we might unset an attribute we
+                # have overloaded.
+                frozen_job.inheritFrom(job)
+            parent.job_trees.append(frozen_tree)
+            self._createJobTree(change, tree.job_trees, frozen_tree)
+
+    def createJobTree(self, item):
+        project_config = self.project_configs[item.change.project.name]
+        project_tree = project_config.pipelines[item.pipeline.name].job_tree
+        ret = JobTree(None)
+        self._createJobTree(item.change, project_tree.job_trees, ret)
+        return ret
+
+
+class Tenant(object):
+    def __init__(self, name):
+        self.name = name
+        self.layout = None
+        # The list of repos from which we will read main
+        # configuration.  (source, project)
+        self.config_repos = []
+        # The unparsed config from those repos.
+        self.config_repos_config = None
+        # The list of projects from which we will read in-repo
+        # configuration.  (source, project)
+        self.project_repos = []
+        # The unparsed config from those repos.
+        self.project_repos_config = None
+
+
+class Abide(object):
+    def __init__(self):
+        self.tenants = OrderedDict()
 
 
 class JobTimeData(object):
diff --git a/zuul/nodepool.py b/zuul/nodepool.py
new file mode 100644
index 0000000..85a18f1
--- /dev/null
+++ b/zuul/nodepool.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from uuid import uuid4
+
+
+class Node(object):
+    def __init__(self, name, image):
+        self.name = name
+        self.image = image
+
+
+class Request(object):
+    def __init__(self, build_set, job, nodes):
+        self.build_set = build_set
+        self.job = job
+        self.nodes = nodes
+        self.id = uuid4().hex
+
+
+class Nodepool(object):
+    def __init__(self, scheduler):
+        self.requests = {}
+        self.sched = scheduler
+
+    def requestNodes(self, build_set, job):
+        nodes = job.nodes
+        nodes = [Node(node['name'], node['image']) for node in nodes]
+        req = Request(build_set, job, nodes)
+        self.requests[req.id] = req
+        self._requestComplete(req.id)
+        return req
+
+    def cancelRequest(self, request):
+        if request in self.requests:
+            self.requests.remove(request)
+
+    def returnNodes(self, nodes, used=True):
+        pass
+
+    def _requestComplete(self, id):
+        req = self.requests[id]
+        del self.requests[id]
+        self.sched.onNodesProvisioned(req)
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 0c9a8d8..d38eef2 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -27,18 +27,14 @@
 
     log = logging.getLogger("zuul.reporter.BaseReporter")
 
-    def __init__(self, reporter_config={}, sched=None, connection=None):
+    def __init__(self, reporter_config={}, connection=None):
         self.reporter_config = reporter_config
-        self.sched = sched
         self.connection = connection
         self._action = None
 
     def setAction(self, action):
         self._action = action
 
-    def stop(self):
-        """Stop the reporter."""
-
     @abc.abstractmethod
     def report(self, source, pipeline, item):
         """Send the compiled report message."""
@@ -64,6 +60,8 @@
         }
         return format_methods[self._action]
 
+    # TODOv3(jeblair): Consider removing pipeline argument in favor of
+    # item.pipeline
     def _formatItemReport(self, pipeline, item):
         """Format a report from the given items. Usually to provide results to
         a reporter taking free-form text."""
@@ -75,10 +73,7 @@
         return ret
 
     def _formatItemReportStart(self, pipeline, item):
-        msg = "Starting %s jobs." % pipeline.name
-        if self.sched.config.has_option('zuul', 'status_url'):
-            msg += "\n" + self.sched.config.get('zuul', 'status_url')
-        return msg
+        return pipeline.start_message.format(pipeline=pipeline)
 
     def _formatItemReportSuccess(self, pipeline, item):
         return (pipeline.success_message + '\n\n' +
@@ -87,7 +82,7 @@
     def _formatItemReportFailure(self, pipeline, item):
         if item.dequeued_needing_change:
             msg = 'This change depends on a change that failed to merge.\n'
-        elif not pipeline.didMergerSucceed(item):
+        elif item.didMergerFail():
             msg = pipeline.merge_failure_message
         else:
             msg = (pipeline.failure_message + '\n\n' +
@@ -109,12 +104,13 @@
         # Return the list of jobs portion of the report
         ret = ''
 
-        if self.sched.config.has_option('zuul', 'url_pattern'):
-            url_pattern = self.sched.config.get('zuul', 'url_pattern')
+        config = self.connection.sched.config
+        if config.has_option('zuul', 'url_pattern'):
+            url_pattern = config.get('zuul', 'url_pattern')
         else:
             url_pattern = None
 
-        for job in pipeline.getJobs(item):
+        for job in item.getJobs():
             build = item.current_build_set.getBuild(job.name)
             (result, url) = item.formatJobResult(job, url_pattern)
             if not job.voting:
@@ -122,9 +118,9 @@
             else:
                 voting = ''
 
-            if self.sched.config and self.sched.config.has_option(
+            if config and config.has_option(
                 'zuul', 'report_times'):
-                report_times = self.sched.config.getboolean(
+                report_times = config.getboolean(
                     'zuul', 'report_times')
             else:
                 report_times = True
@@ -142,9 +138,9 @@
             else:
                 elapsed = ''
             name = ''
-            if self.sched.config.has_option('zuul', 'job_name_in_report'):
-                if self.sched.config.getboolean('zuul',
-                                                'job_name_in_report'):
+            if config.has_option('zuul', 'job_name_in_report'):
+                if config.getboolean('zuul',
+                                     'job_name_in_report'):
                     name = job.name + ' '
             ret += '- %s%s : %s%s%s\n' % (name, url, result, elapsed,
                                           voting)
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index b52931e..8b7f50f 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -22,43 +22,15 @@
 import pickle
 import six
 from six.moves import queue as Queue
-import re
 import sys
 import threading
 import time
-import yaml
 
-from zuul import layoutvalidator
+from zuul import configloader
 from zuul import model
-from zuul.model import Pipeline, Project, ChangeQueue
-from zuul.model import ChangeishFilter, NullChange
-from zuul import change_matcher, exceptions
+from zuul import exceptions
 from zuul import version as zuul_version
 
-statsd = extras.try_import('statsd.statsd')
-
-
-def deep_format(obj, paramdict):
-    """Apply the paramdict via str.format() to all string objects found within
-       the supplied obj. Lists and dicts are traversed recursively.
-
-       Borrowed from Jenkins Job Builder project"""
-    if isinstance(obj, str):
-        ret = obj.format(**paramdict)
-    elif isinstance(obj, list):
-        ret = []
-        for item in obj:
-            ret.append(deep_format(item, paramdict))
-    elif isinstance(obj, dict):
-        ret = {}
-        for item in obj:
-            exp_item = item.format(**paramdict)
-
-            ret[exp_item] = deep_format(obj[item], paramdict)
-    else:
-        ret = obj
-    return ret
-
 
 class MutexHandler(object):
     log = logging.getLogger("zuul.MutexHandler")
@@ -216,12 +188,25 @@
     :arg str commit: The SHA of the merged commit (changes with refs).
     """
 
-    def __init__(self, build_set, zuul_url, merged, updated, commit):
+    def __init__(self, build_set, zuul_url, merged, updated, commit,
+                 files):
         self.build_set = build_set
         self.zuul_url = zuul_url
         self.merged = merged
         self.updated = updated
         self.commit = commit
+        self.files = files
+
+
+class NodesProvisionedEvent(ResultEvent):
+    """Nodes have been provisioned for a build_set
+
+    :arg BuildSet build_set: The build_set which has nodes.
+    :arg list of Node objects nodes: The provisioned nodes
+    """
+
+    def __init__(self, request):
+        self.request = request
 
 
 def toList(item):
@@ -246,8 +231,10 @@
         self._stopped = False
         self.launcher = None
         self.merger = None
+        self.connections = None
+        self.statsd = extras.try_import('statsd.statsd')
+        # TODO(jeblair): fix this
         self.mutex = MutexHandler()
-        self.connections = dict()
         # Despite triggers being part of the pipeline, there is one trigger set
         # per scheduler. The pipeline handles the trigger filters but since
         # the events are handled by the scheduler itself it needs to handle
@@ -259,7 +246,7 @@
         self.trigger_event_queue = Queue.Queue()
         self.result_event_queue = Queue.Queue()
         self.management_event_queue = Queue.Queue()
-        self.layout = model.Layout()
+        self.abide = model.Abide()
 
         if not testonly:
             time_dir = self._get_time_database_dir()
@@ -268,18 +255,8 @@
         self.zuul_version = zuul_version.version_info.release_string()
         self.last_reconfigured = None
 
-        # A set of reporter configuration keys to action mapping
-        self._reporter_actions = {
-            'start': 'start_actions',
-            'success': 'success_actions',
-            'failure': 'failure_actions',
-            'merge-failure': 'merge_failure_actions',
-            'disabled': 'disabled_actions',
-        }
-
     def stop(self):
         self._stopped = True
-        self._unloadDrivers()
         self.stopConnections()
         self.wake_event.set()
 
@@ -288,336 +265,14 @@
         # registerConnections as we don't want to do the onLoad event yet.
         return self._parseConfig(config_path, connections)
 
-    def _parseSkipIf(self, config_job):
-        cm = change_matcher
-        skip_matchers = []
-
-        for config_skip in config_job.get('skip-if', []):
-            nested_matchers = []
-
-            project_regex = config_skip.get('project')
-            if project_regex:
-                nested_matchers.append(cm.ProjectMatcher(project_regex))
-
-            branch_regex = config_skip.get('branch')
-            if branch_regex:
-                nested_matchers.append(cm.BranchMatcher(branch_regex))
-
-            file_regexes = toList(config_skip.get('all-files-match-any'))
-            if file_regexes:
-                file_matchers = [cm.FileMatcher(x) for x in file_regexes]
-                all_files_matcher = cm.MatchAllFiles(file_matchers)
-                nested_matchers.append(all_files_matcher)
-
-            # All patterns need to match a given skip-if predicate
-            skip_matchers.append(cm.MatchAll(nested_matchers))
-
-        if skip_matchers:
-            # Any skip-if predicate can be matched to trigger a skip
-            return cm.MatchAny(skip_matchers)
-
     def registerConnections(self, connections, load=True):
         # load: whether or not to trigger the onLoad for the connection. This
         # is useful for not doing a full load during layout validation.
         self.connections = connections
-        for connection_name, connection in self.connections.items():
-            connection.registerScheduler(self)
-            if load:
-                connection.onLoad()
+        self.connections.registerScheduler(self, load)
 
     def stopConnections(self):
-        for connection_name, connection in self.connections.items():
-            connection.onStop()
-
-    def _unloadDrivers(self):
-        for trigger in self.triggers.values():
-            trigger.stop()
-        self.triggers = {}
-        for pipeline in self.layout.pipelines.values():
-            pipeline.source.stop()
-            for action in self._reporter_actions.values():
-                for reporter in pipeline.__getattribute__(action):
-                    reporter.stop()
-
-    def _getDriver(self, dtype, connection_name, driver_config={}):
-        # Instantiate a driver such as a trigger, source or reporter
-        # TODO(jhesketh): Make this list dynamic or use entrypoints etc.
-        # Stevedore was not a good fit here due to the nature of triggers.
-        # Specifically we don't want to load a trigger per a pipeline as one
-        # trigger can listen to a stream (from gerrit, for example) and the
-        # scheduler decides which eventfilter to use. As such we want to load
-        # trigger+connection pairs uniquely.
-        drivers = {
-            'source': {
-                'gerrit': 'zuul.source.gerrit:GerritSource',
-            },
-            'trigger': {
-                'gerrit': 'zuul.trigger.gerrit:GerritTrigger',
-                'timer': 'zuul.trigger.timer:TimerTrigger',
-                'zuul': 'zuul.trigger.zuultrigger:ZuulTrigger',
-            },
-            'reporter': {
-                'gerrit': 'zuul.reporter.gerrit:GerritReporter',
-                'smtp': 'zuul.reporter.smtp:SMTPReporter',
-            },
-        }
-
-        # TODO(jhesketh): Check the connection_name exists
-        if connection_name in self.connections.keys():
-            driver_name = self.connections[connection_name].driver_name
-            connection = self.connections[connection_name]
-        else:
-            # In some cases a driver may not be related to a connection. For
-            # example, the 'timer' or 'zuul' triggers.
-            driver_name = connection_name
-            connection = None
-        driver = drivers[dtype][driver_name].split(':')
-        driver_instance = getattr(
-            __import__(driver[0], fromlist=['']), driver[1])(
-                driver_config, self, connection
-        )
-
-        if connection:
-            connection.registerUse(dtype, driver_instance)
-
-        return driver_instance
-
-    def _getSourceDriver(self, connection_name):
-        return self._getDriver('source', connection_name)
-
-    def _getReporterDriver(self, connection_name, driver_config={}):
-        return self._getDriver('reporter', connection_name, driver_config)
-
-    def _getTriggerDriver(self, connection_name, driver_config={}):
-        return self._getDriver('trigger', connection_name, driver_config)
-
-    def _parseConfig(self, config_path, connections):
-        layout = model.Layout()
-        project_templates = {}
-
-        if config_path:
-            config_path = os.path.expanduser(config_path)
-            if not os.path.exists(config_path):
-                raise Exception("Unable to read layout config file at %s" %
-                                config_path)
-        with open(config_path) as config_file:
-            data = yaml.load(config_file)
-
-        validator = layoutvalidator.LayoutValidator()
-        validator.validate(data, connections)
-
-        config_env = {}
-        for include in data.get('includes', []):
-            if 'python-file' in include:
-                fn = include['python-file']
-                if not os.path.isabs(fn):
-                    base = os.path.dirname(os.path.realpath(config_path))
-                    fn = os.path.join(base, fn)
-                fn = os.path.expanduser(fn)
-                with open(fn) as _f:
-                    code = compile(_f.read(), fn, 'exec')
-                    six.exec_(code, config_env)
-
-        for conf_pipeline in data.get('pipelines', []):
-            pipeline = Pipeline(conf_pipeline['name'])
-            pipeline.description = conf_pipeline.get('description')
-            # TODO(jeblair): remove backwards compatibility:
-            pipeline.source = self._getSourceDriver(
-                conf_pipeline.get('source', 'gerrit'))
-            precedence = model.PRECEDENCE_MAP[conf_pipeline.get('precedence')]
-            pipeline.precedence = precedence
-            pipeline.failure_message = conf_pipeline.get('failure-message',
-                                                         "Build failed.")
-            pipeline.merge_failure_message = conf_pipeline.get(
-                'merge-failure-message', "Merge Failed.\n\nThis change or one "
-                "of its cross-repo dependencies was unable to be "
-                "automatically merged with the current state of its "
-                "repository. Please rebase the change and upload a new "
-                "patchset.")
-            pipeline.success_message = conf_pipeline.get('success-message',
-                                                         "Build succeeded.")
-            pipeline.footer_message = conf_pipeline.get('footer-message', "")
-            pipeline.dequeue_on_new_patchset = conf_pipeline.get(
-                'dequeue-on-new-patchset', True)
-            pipeline.ignore_dependencies = conf_pipeline.get(
-                'ignore-dependencies', False)
-
-            for conf_key, action in self._reporter_actions.items():
-                reporter_set = []
-                if conf_pipeline.get(conf_key):
-                    for reporter_name, params \
-                        in conf_pipeline.get(conf_key).items():
-                        reporter = self._getReporterDriver(reporter_name,
-                                                           params)
-                        reporter.setAction(conf_key)
-                        reporter_set.append(reporter)
-                setattr(pipeline, action, reporter_set)
-
-            # If merge-failure actions aren't explicit, use the failure actions
-            if not pipeline.merge_failure_actions:
-                pipeline.merge_failure_actions = pipeline.failure_actions
-
-            pipeline.disable_at = conf_pipeline.get(
-                'disable-after-consecutive-failures', None)
-
-            pipeline.window = conf_pipeline.get('window', 20)
-            pipeline.window_floor = conf_pipeline.get('window-floor', 3)
-            pipeline.window_increase_type = conf_pipeline.get(
-                'window-increase-type', 'linear')
-            pipeline.window_increase_factor = conf_pipeline.get(
-                'window-increase-factor', 1)
-            pipeline.window_decrease_type = conf_pipeline.get(
-                'window-decrease-type', 'exponential')
-            pipeline.window_decrease_factor = conf_pipeline.get(
-                'window-decrease-factor', 2)
-
-            manager = globals()[conf_pipeline['manager']](self, pipeline)
-            pipeline.setManager(manager)
-            layout.pipelines[conf_pipeline['name']] = pipeline
-
-            if 'require' in conf_pipeline or 'reject' in conf_pipeline:
-                require = conf_pipeline.get('require', {})
-                reject = conf_pipeline.get('reject', {})
-                f = ChangeishFilter(
-                    open=require.get('open'),
-                    current_patchset=require.get('current-patchset'),
-                    statuses=toList(require.get('status')),
-                    required_approvals=toList(require.get('approval')),
-                    reject_approvals=toList(reject.get('approval'))
-                )
-                manager.changeish_filters.append(f)
-
-            for trigger_name, trigger_config\
-                in conf_pipeline.get('trigger').items():
-                if trigger_name not in self.triggers.keys():
-                    self.triggers[trigger_name] = \
-                        self._getTriggerDriver(trigger_name, trigger_config)
-
-            for trigger_name, trigger in self.triggers.items():
-                if trigger_name in conf_pipeline['trigger']:
-                    manager.event_filters += trigger.getEventFilters(
-                        conf_pipeline['trigger'][trigger_name])
-
-        for project_template in data.get('project-templates', []):
-            # Make sure the template only contains valid pipelines
-            tpl = dict(
-                (pipe_name, project_template.get(pipe_name))
-                for pipe_name in layout.pipelines.keys()
-                if pipe_name in project_template
-            )
-            project_templates[project_template.get('name')] = tpl
-
-        for config_job in data.get('jobs', []):
-            job = layout.getJob(config_job['name'])
-            # Be careful to only set attributes explicitly present on
-            # this job, to avoid squashing attributes set by a meta-job.
-            m = config_job.get('queue-name', None)
-            if m:
-                job.queue_name = m
-            m = config_job.get('failure-message', None)
-            if m:
-                job.failure_message = m
-            m = config_job.get('success-message', None)
-            if m:
-                job.success_message = m
-            m = config_job.get('failure-pattern', None)
-            if m:
-                job.failure_pattern = m
-            m = config_job.get('success-pattern', None)
-            if m:
-                job.success_pattern = m
-            m = config_job.get('hold-following-changes', False)
-            if m:
-                job.hold_following_changes = True
-            m = config_job.get('voting', None)
-            if m is not None:
-                job.voting = m
-            m = config_job.get('mutex', None)
-            if m is not None:
-                job.mutex = m
-            tags = toList(config_job.get('tags'))
-            if tags:
-                # Tags are merged via a union rather than a
-                # destructive copy because they are intended to
-                # accumulate onto any previously applied tags from
-                # metajobs.
-                job.tags = job.tags.union(set(tags))
-            fname = config_job.get('parameter-function', None)
-            if fname:
-                func = config_env.get(fname, None)
-                if not func:
-                    raise Exception("Unable to find function %s" % fname)
-                job.parameter_function = func
-            branches = toList(config_job.get('branch'))
-            if branches:
-                job._branches = branches
-                job.branches = [re.compile(x) for x in branches]
-            files = toList(config_job.get('files'))
-            if files:
-                job._files = files
-                job.files = [re.compile(x) for x in files]
-            skip_if_matcher = self._parseSkipIf(config_job)
-            if skip_if_matcher:
-                job.skip_if_matcher = skip_if_matcher
-            swift = toList(config_job.get('swift'))
-            if swift:
-                for s in swift:
-                    job.swift[s['name']] = s
-
-        def add_jobs(job_tree, config_jobs):
-            for job in config_jobs:
-                if isinstance(job, list):
-                    for x in job:
-                        add_jobs(job_tree, x)
-                if isinstance(job, dict):
-                    for parent, children in job.items():
-                        parent_tree = job_tree.addJob(layout.getJob(parent))
-                        add_jobs(parent_tree, children)
-                if isinstance(job, str):
-                    job_tree.addJob(layout.getJob(job))
-
-        for config_project in data.get('projects', []):
-            project = Project(config_project['name'])
-            shortname = config_project['name'].split('/')[-1]
-
-            # This is reversed due to the prepend operation below, so
-            # the ultimate order is templates (in order) followed by
-            # statically defined jobs.
-            for requested_template in reversed(
-                config_project.get('template', [])):
-                # Fetch the template from 'project-templates'
-                tpl = project_templates.get(
-                    requested_template.get('name'))
-                # Expand it with the project context
-                requested_template['name'] = shortname
-                expanded = deep_format(tpl, requested_template)
-                # Finally merge the expansion with whatever has been
-                # already defined for this project.  Prepend our new
-                # jobs to existing ones (which may have been
-                # statically defined or defined by other templates).
-                for pipeline in layout.pipelines.values():
-                    if pipeline.name in expanded:
-                        config_project.update(
-                            {pipeline.name: expanded[pipeline.name] +
-                             config_project.get(pipeline.name, [])})
-
-            layout.projects[config_project['name']] = project
-            mode = config_project.get('merge-mode', 'merge-resolve')
-            project.merge_mode = model.MERGER_MAP[mode]
-            for pipeline in layout.pipelines.values():
-                if pipeline.name in config_project:
-                    job_tree = pipeline.addProject(project)
-                    config_jobs = config_project[pipeline.name]
-                    add_jobs(job_tree, config_jobs)
-
-        # All jobs should be defined at this point, get rid of
-        # metajobs so that getJob isn't doing anything weird.
-        layout.metajobs = []
-
-        for pipeline in layout.pipelines.values():
-            pipeline.manager._postConfig(layout)
-
-        return layout
+        self.connections.stop()
 
     def setLauncher(self, launcher):
         self.launcher = launcher
@@ -625,24 +280,14 @@
     def setMerger(self, merger):
         self.merger = merger
 
-    def getProject(self, name):
-        self.layout_lock.acquire()
-        p = None
-        try:
-            p = self.layout.projects.get(name)
-            if p is None:
-                self.log.info("Registering foreign project: %s" % name)
-                p = Project(name, foreign=True)
-                self.layout.projects[name] = p
-        finally:
-            self.layout_lock.release()
-        return p
+    def setNodepool(self, nodepool):
+        self.nodepool = nodepool
 
     def addEvent(self, event):
         self.log.debug("Adding trigger event: %s" % event)
         try:
-            if statsd:
-                statsd.incr('gerrit.event.%s' % event.type)
+            if self.statsd:
+                self.statsd.incr('gerrit.event.%s' % event.type)
         except:
             self.log.exception("Exception reporting event stats")
         self.trigger_event_queue.put(event)
@@ -667,10 +312,10 @@
         # timing) is recorded before setting the result.
         build.result = result
         try:
-            if statsd and build.pipeline:
+            if self.statsd and build.pipeline:
                 jobname = build.job.name.replace('.', '_')
                 key = 'zuul.pipeline.%s.all_jobs' % build.pipeline.name
-                statsd.incr(key)
+                self.statsd.incr(key)
                 for label in build.node_labels:
                     # Jenkins includes the node name in its list of labels, so
                     # we filter it out here, since that is not statistically
@@ -680,18 +325,18 @@
                     dt = int((build.start_time - build.launch_time) * 1000)
                     key = 'zuul.pipeline.%s.label.%s.wait_time' % (
                         build.pipeline.name, label)
-                    statsd.timing(key, dt)
+                    self.statsd.timing(key, dt)
                 key = 'zuul.pipeline.%s.job.%s.%s' % (build.pipeline.name,
                                                       jobname, build.result)
                 if build.result in ['SUCCESS', 'FAILURE'] and build.start_time:
                     dt = int((build.end_time - build.start_time) * 1000)
-                    statsd.timing(key, dt)
-                statsd.incr(key)
+                    self.statsd.timing(key, dt)
+                self.statsd.incr(key)
 
                 key = 'zuul.pipeline.%s.job.%s.wait_time' % (
                     build.pipeline.name, jobname)
                 dt = int((build.start_time - build.launch_time) * 1000)
-                statsd.timing(key, dt)
+                self.statsd.timing(key, dt)
         except:
             self.log.exception("Exception reporting runtime stats")
         event = BuildCompletedEvent(build)
@@ -699,11 +344,19 @@
         self.wake_event.set()
         self.log.debug("Done adding complete event for build: %s" % build)
 
-    def onMergeCompleted(self, build_set, zuul_url, merged, updated, commit):
+    def onMergeCompleted(self, build_set, zuul_url, merged, updated,
+                         commit, files):
         self.log.debug("Adding merge complete event for build set: %s" %
                        build_set)
-        event = MergeCompletedEvent(build_set, zuul_url,
-                                    merged, updated, commit)
+        event = MergeCompletedEvent(build_set, zuul_url, merged,
+                                    updated, commit, files)
+        self.result_event_queue.put(event)
+        self.wake_event.set()
+
+    def onNodesProvisioned(self, req):
+        self.log.debug("Adding nodes provisioned event for build set: %s" %
+                       req.build_set)
+        event = NodesProvisionedEvent(req)
         self.result_event_queue.put(event)
         self.wake_event.set()
 
@@ -811,82 +464,87 @@
         self.config = event.config
         try:
             self.log.debug("Performing reconfiguration")
-            self._unloadDrivers()
-            layout = self._parseConfig(
-                self.config.get('zuul', 'layout_config'), self.connections)
-            for name, new_pipeline in layout.pipelines.items():
-                old_pipeline = self.layout.pipelines.get(name)
-                if not old_pipeline:
-                    if self.layout.pipelines:
-                        # Don't emit this warning on startup
-                        self.log.warning("No old pipeline matching %s found "
-                                         "when reconfiguring" % name)
-                    continue
-                self.log.debug("Re-enqueueing changes for pipeline %s" % name)
-                items_to_remove = []
-                builds_to_cancel = []
-                last_head = None
-                for shared_queue in old_pipeline.queues:
-                    for item in shared_queue.queue:
-                        if not item.item_ahead:
-                            last_head = item
-                        item.item_ahead = None
-                        item.items_behind = []
-                        item.pipeline = None
-                        item.queue = None
-                        project_name = item.change.project.name
-                        item.change.project = layout.projects.get(project_name)
-                        if not item.change.project:
-                            self.log.debug("Project %s not defined, "
-                                           "re-instantiating as foreign" %
-                                           project_name)
-                            project = Project(project_name, foreign=True)
-                            layout.projects[project_name] = project
-                            item.change.project = project
-                        item_jobs = new_pipeline.getJobs(item)
+            loader = configloader.ConfigLoader()
+            abide = loader.loadConfig(
+                self.config.get('zuul', 'tenant_config'),
+                self, self.merger, self.connections)
+            for tenant in abide.tenants.values():
+                self._reconfigureTenant(tenant)
+            self.abide = abide
+        finally:
+            self.layout_lock.release()
+
+    def _reenqueueTenant(self, old_tenant, tenant):
+        for name, new_pipeline in tenant.layout.pipelines.items():
+            old_pipeline = old_tenant.layout.pipelines.get(name)
+            if not old_pipeline:
+                self.log.warning("No old pipeline matching %s found "
+                                 "when reconfiguring" % name)
+                continue
+            self.log.debug("Re-enqueueing changes for pipeline %s" % name)
+            items_to_remove = []
+            builds_to_cancel = []
+            last_head = None
+            for shared_queue in old_pipeline.queues:
+                for item in shared_queue.queue:
+                    if not item.item_ahead:
+                        last_head = item
+                    item.item_ahead = None
+                    item.items_behind = []
+                    item.pipeline = None
+                    item.queue = None
+                    project_name = item.change.project.name
+                    item.change.project = new_pipeline.source.getProject(
+                        project_name)
+                    if new_pipeline.manager.reEnqueueItem(item,
+                                                          last_head):
+                        new_jobs = item.getJobs()
                         for build in item.current_build_set.getBuilds():
-                            job = layout.jobs.get(build.job.name)
-                            if job and job in item_jobs:
+                            job = item.layout.getJob(build.job.name)
+                            if job and job in new_jobs:
                                 build.job = job
                             else:
                                 item.removeBuild(build)
                                 builds_to_cancel.append(build)
-                        if not new_pipeline.manager.reEnqueueItem(item,
-                                                                  last_head):
-                            items_to_remove.append(item)
-                for item in items_to_remove:
-                    for build in item.current_build_set.getBuilds():
-                        builds_to_cancel.append(build)
-                for build in builds_to_cancel:
-                    self.log.warning(
-                        "Canceling build %s during reconfiguration" % (build,))
-                    try:
-                        self.launcher.cancel(build)
-                    except Exception:
-                        self.log.exception(
-                            "Exception while canceling build %s "
-                            "for change %s" % (build, item.change))
-            self.layout = layout
-            self.maintainConnectionCache()
-            for trigger in self.triggers.values():
-                trigger.postConfig()
-            for pipeline in self.layout.pipelines.values():
-                pipeline.source.postConfig()
-                for action in self._reporter_actions.values():
-                    for reporter in pipeline.__getattribute__(action):
-                        reporter.postConfig()
-            if statsd:
+                    else:
+                        items_to_remove.append(item)
+            for item in items_to_remove:
+                for build in item.current_build_set.getBuilds():
+                    builds_to_cancel.append(build)
+            for build in builds_to_cancel:
+                self.log.warning(
+                    "Canceling build %s during reconfiguration" % (build,))
                 try:
-                    for pipeline in self.layout.pipelines.values():
-                        items = len(pipeline.getAllItems())
-                        # stats.gauges.zuul.pipeline.NAME.current_changes
-                        key = 'zuul.pipeline.%s' % pipeline.name
-                        statsd.gauge(key + '.current_changes', items)
+                    self.launcher.cancel(build)
                 except Exception:
-                    self.log.exception("Exception reporting initial "
-                                       "pipeline stats:")
-        finally:
-            self.layout_lock.release()
+                    self.log.exception(
+                        "Exception while canceling build %s "
+                        "for change %s" % (build, item.change))
+
+    def _reconfigureTenant(self, tenant):
+        # This is called from _doReconfigureEvent while holding the
+        # layout lock
+        old_tenant = self.abide.tenants.get(tenant.name)
+        if old_tenant:
+            self._reenqueueTenant(old_tenant, tenant)
+        # TODOv3(jeblair): update for tenants
+        # self.maintainConnectionCache()
+        for pipeline in tenant.layout.pipelines.values():
+            pipeline.source.postConfig()
+            for trigger in pipeline.triggers:
+                trigger.postConfig(pipeline)
+            for reporter in pipeline.actions:
+                reporter.postConfig()
+        if self.statsd:
+            try:
+                for pipeline in tenant.layout.pipelines.values():
+                    items = len(pipeline.getAllItems())
+                    # stats.gauges.zuul.pipeline.NAME.current_changes
+                    key = 'zuul.pipeline.%s' % pipeline.name
+                    self.statsd.gauge(key + '.current_changes', items)
+            except Exception:
+                self.log.exception("Exception reporting initial "
+                                   "pipeline stats:")
 
     def _doPromoteEvent(self, event):
         pipeline = self.layout.pipelines[event.pipeline_name]
@@ -933,15 +591,16 @@
         change = pipeline.source.getChange(event, project)
         self.log.debug("Event %s for change %s was directly assigned "
                        "to pipeline %s" % (event, change, self))
-        self.log.info("Adding %s, %s to %s" %
+        self.log.info("Adding %s %s to %s" %
                       (project, change, pipeline))
         pipeline.manager.addChange(change, ignore_requirements=True)
 
     def _areAllBuildsComplete(self):
         self.log.debug("Checking if all builds are complete")
-        waiting = False
         if self.merger.areMergesOutstanding():
-            waiting = True
+            self.log.debug("Waiting on merger")
+            return False
+        waiting = False
         for pipeline in self.layout.pipelines.values():
             for item in pipeline.getAllItems():
                 for build in item.current_build_set.getBuilds():
@@ -952,11 +611,10 @@
         if not waiting:
             self.log.debug("All builds are complete")
             return True
-        self.log.debug("All builds are not complete")
         return False
 
     def run(self):
-        if statsd:
+        if self.statsd:
             self.log.debug("Statsd enabled")
         else:
             self.log.debug("Statsd disabled because python statsd "
@@ -975,7 +633,7 @@
                     self.process_management_queue()
 
                 # Give result events priority -- they let us stop builds,
-                # whereas trigger evensts cause us to launch builds.
+                # whereas trigger events cause us to launch builds.
                 while not self.result_event_queue.empty():
                     self.process_result_queue()
 
@@ -986,9 +644,10 @@
                 if self._pause and self._areAllBuildsComplete():
                     self._doPauseEvent()
 
-                for pipeline in self.layout.pipelines.values():
-                    while pipeline.manager.processQueue():
-                        pass
+                for tenant in self.abide.tenants.values():
+                    for pipeline in tenant.layout.pipelines.values():
+                        while pipeline.manager.processQueue():
+                            pass
 
             except Exception:
                 self.log.exception("Exception in run handler:")
@@ -998,12 +657,16 @@
                 self.run_handler_lock.release()
 
     def maintainConnectionCache(self):
+        # TODOv3(jeblair): update for tenants
         relevant = set()
-        for pipeline in self.layout.pipelines.values():
-            self.log.debug("Gather relevant cache items for: %s" % pipeline)
-            for item in pipeline.getAllItems():
-                relevant.add(item.change)
-                relevant.update(item.change.getRelatedChanges())
+        for tenant in self.abide.tenants.values():
+            for pipeline in tenant.layout.pipelines.values():
+                self.log.debug("Gather relevant cache items for: %s" %
+                               pipeline)
+
+                for item in pipeline.getAllItems():
+                    relevant.add(item.change)
+                    relevant.update(item.change.getRelatedChanges())
         for connection in self.connections.values():
             connection.maintainCache(relevant)
             self.log.debug(
@@ -1015,31 +678,28 @@
         event = self.trigger_event_queue.get()
         self.log.debug("Processing trigger event %s" % event)
         try:
-            project = self.layout.projects.get(event.project_name)
-
-            for pipeline in self.layout.pipelines.values():
-                # Get the change even if the project is unknown to us for the
-                # use of updating the cache if there is another change
-                # depending on this foreign one.
-                try:
-                    change = pipeline.source.getChange(event, project)
-                except exceptions.ChangeNotFound as e:
-                    self.log.debug("Unable to get change %s from source %s. "
-                                   "(most likely looking for a change from "
-                                   "another connection trigger)",
-                                   e.change, pipeline.source)
-                    continue
-                if not project or project.foreign:
-                    self.log.debug("Project %s not found" % event.project_name)
-                    continue
-                if event.type == 'patchset-created':
-                    pipeline.manager.removeOldVersionsOfChange(change)
-                elif event.type == 'change-abandoned':
-                    pipeline.manager.removeAbandonedChange(change)
-                if pipeline.manager.eventMatches(event, change):
-                    self.log.info("Adding %s, %s to %s" %
-                                  (project, change, pipeline))
-                    pipeline.manager.addChange(change)
+            for tenant in self.abide.tenants.values():
+                for pipeline in tenant.layout.pipelines.values():
+                    # Get the change even if the project is unknown to
+                    # us for the use of updating the cache if there is
+                    # another change depending on this foreign one.
+                    try:
+                        change = pipeline.source.getChange(event)
+                    except exceptions.ChangeNotFound as e:
+                        self.log.debug("Unable to get change %s from "
+                                       "source %s (most likely looking "
+                                       "for a change from another "
+                                       "connection trigger)",
+                                       e.change, pipeline.source)
+                        continue
+                    if event.type == 'patchset-created':
+                        pipeline.manager.removeOldVersionsOfChange(change)
+                    elif event.type == 'change-abandoned':
+                        pipeline.manager.removeAbandonedChange(change)
+                    if pipeline.manager.eventMatches(event, change):
+                        self.log.info("Adding %s %s to %s" %
+                                      (change.project, change, pipeline))
+                        pipeline.manager.addChange(change)
         finally:
             self.trigger_event_queue.task_done()
 
@@ -1072,6 +732,8 @@
                 self._doBuildCompletedEvent(event)
             elif isinstance(event, MergeCompletedEvent):
                 self._doMergeCompletedEvent(event)
+            elif isinstance(event, NodesProvisionedEvent):
+                self._doNodesProvisionedEvent(event)
             else:
                 self.log.error("Unable to handle event %s" % event)
         finally:
@@ -1127,7 +789,22 @@
             return
         pipeline.manager.onMergeCompleted(event)
 
+    def _doNodesProvisionedEvent(self, event):
+        request = event.request
+        build_set = request.build_set
+        if build_set is not build_set.item.current_build_set:
+            self.log.warning("Build set %s is not current" % (build_set,))
+            self.nodepool.returnNodes(request.nodes, used=False)
+            return
+        pipeline = build_set.item.pipeline
+        if not pipeline:
+            self.log.warning("Build set %s is not associated with a pipeline" %
+                             (build_set,))
+            return
+        pipeline.manager.onNodesProvisioned(event)
+
     def formatStatusJSON(self):
+        # TODOv3(jeblair): use tenants
         if self.config.has_option('zuul', 'url_pattern'):
             url_pattern = self.config.get('zuul', 'url_pattern')
         else:
@@ -1160,1030 +837,3 @@
         for pipeline in self.layout.pipelines.values():
             pipelines.append(pipeline.formatStatusJSON(url_pattern))
         return json.dumps(data)
-
-
-class BasePipelineManager(object):
-    log = logging.getLogger("zuul.BasePipelineManager")
-
-    def __init__(self, sched, pipeline):
-        self.sched = sched
-        self.pipeline = pipeline
-        self.event_filters = []
-        self.changeish_filters = []
-
-    def __str__(self):
-        return "<%s %s>" % (self.__class__.__name__, self.pipeline.name)
-
-    def _postConfig(self, layout):
-        self.log.info("Configured Pipeline Manager %s" % self.pipeline.name)
-        self.log.info("  Source: %s" % self.pipeline.source)
-        self.log.info("  Requirements:")
-        for f in self.changeish_filters:
-            self.log.info("    %s" % f)
-        self.log.info("  Events:")
-        for e in self.event_filters:
-            self.log.info("    %s" % e)
-        self.log.info("  Projects:")
-
-        def log_jobs(tree, indent=0):
-            istr = '    ' + ' ' * indent
-            if tree.job:
-                efilters = ''
-                for b in tree.job._branches:
-                    efilters += str(b)
-                for f in tree.job._files:
-                    efilters += str(f)
-                if tree.job.skip_if_matcher:
-                    efilters += str(tree.job.skip_if_matcher)
-                if efilters:
-                    efilters = ' ' + efilters
-                tags = []
-                if tree.job.hold_following_changes:
-                    tags.append('[hold]')
-                if not tree.job.voting:
-                    tags.append('[nonvoting]')
-                if tree.job.mutex:
-                    tags.append('[mutex: %s]' % tree.job.mutex)
-                tags = ' '.join(tags)
-                self.log.info("%s%s%s %s" % (istr, repr(tree.job),
-                                             efilters, tags))
-            for x in tree.job_trees:
-                log_jobs(x, indent + 2)
-
-        for p in layout.projects.values():
-            tree = self.pipeline.getJobTree(p)
-            if tree:
-                self.log.info("    %s" % p)
-                log_jobs(tree)
-        self.log.info("  On start:")
-        self.log.info("    %s" % self.pipeline.start_actions)
-        self.log.info("  On success:")
-        self.log.info("    %s" % self.pipeline.success_actions)
-        self.log.info("  On failure:")
-        self.log.info("    %s" % self.pipeline.failure_actions)
-        self.log.info("  On merge-failure:")
-        self.log.info("    %s" % self.pipeline.merge_failure_actions)
-        self.log.info("  When disabled:")
-        self.log.info("    %s" % self.pipeline.disabled_actions)
-
-    def getSubmitAllowNeeds(self):
-        # Get a list of code review labels that are allowed to be
-        # "needed" in the submit records for a change, with respect
-        # to this queue.  In other words, the list of review labels
-        # this queue itself is likely to set before submitting.
-        allow_needs = set()
-        for action_reporter in self.pipeline.success_actions:
-            allow_needs.update(action_reporter.getSubmitAllowNeeds())
-        return allow_needs
-
-    def eventMatches(self, event, change):
-        if event.forced_pipeline:
-            if event.forced_pipeline == self.pipeline.name:
-                self.log.debug("Event %s for change %s was directly assigned "
-                               "to pipeline %s" % (event, change, self))
-                return True
-            else:
-                return False
-        for ef in self.event_filters:
-            if ef.matches(event, change):
-                self.log.debug("Event %s for change %s matched %s "
-                               "in pipeline %s" % (event, change, ef, self))
-                return True
-        return False
-
-    def isChangeAlreadyInPipeline(self, change):
-        # Checks live items in the pipeline
-        for item in self.pipeline.getAllItems():
-            if item.live and change.equals(item.change):
-                return True
-        return False
-
-    def isChangeAlreadyInQueue(self, change, change_queue):
-        # Checks any item in the specified change queue
-        for item in change_queue.queue:
-            if change.equals(item.change):
-                return True
-        return False
-
-    def reportStart(self, item):
-        if not self.pipeline._disabled:
-            try:
-                self.log.info("Reporting start, action %s item %s" %
-                              (self.pipeline.start_actions, item))
-                ret = self.sendReport(self.pipeline.start_actions,
-                                      self.pipeline.source, item)
-                if ret:
-                    self.log.error("Reporting item start %s received: %s" %
-                                   (item, ret))
-            except:
-                self.log.exception("Exception while reporting start:")
-
-    def sendReport(self, action_reporters, source, item,
-                   message=None):
-        """Sends the built message off to configured reporters.
-
-        Takes the action_reporters, item, message and extra options and
-        sends them to the pluggable reporters.
-        """
-        report_errors = []
-        if len(action_reporters) > 0:
-            for reporter in action_reporters:
-                ret = reporter.report(source, self.pipeline, item)
-                if ret:
-                    report_errors.append(ret)
-            if len(report_errors) == 0:
-                return
-        return report_errors
-
-    def isChangeReadyToBeEnqueued(self, change):
-        return True
-
-    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
-                            change_queue):
-        return True
-
-    def enqueueChangesBehind(self, change, quiet, ignore_requirements,
-                             change_queue):
-        return True
-
-    def checkForChangesNeededBy(self, change, change_queue):
-        return True
-
-    def getFailingDependentItems(self, item):
-        return None
-
-    def getDependentItems(self, item):
-        orig_item = item
-        items = []
-        while item.item_ahead:
-            items.append(item.item_ahead)
-            item = item.item_ahead
-        self.log.info("Change %s depends on changes %s" %
-                      (orig_item.change,
-                       [x.change for x in items]))
-        return items
-
-    def getItemForChange(self, change):
-        for item in self.pipeline.getAllItems():
-            if item.change.equals(change):
-                return item
-        return None
-
-    def findOldVersionOfChangeAlreadyInQueue(self, change):
-        for item in self.pipeline.getAllItems():
-            if not item.live:
-                continue
-            if change.isUpdateOf(item.change):
-                return item
-        return None
-
-    def removeOldVersionsOfChange(self, change):
-        if not self.pipeline.dequeue_on_new_patchset:
-            return
-        old_item = self.findOldVersionOfChangeAlreadyInQueue(change)
-        if old_item:
-            self.log.debug("Change %s is a new version of %s, removing %s" %
-                           (change, old_item.change, old_item))
-            self.removeItem(old_item)
-
-    def removeAbandonedChange(self, change):
-        self.log.debug("Change %s abandoned, removing." % change)
-        for item in self.pipeline.getAllItems():
-            if not item.live:
-                continue
-            if item.change.equals(change):
-                self.removeItem(item)
-
-    def reEnqueueItem(self, item, last_head):
-        with self.getChangeQueue(item.change, last_head.queue) as change_queue:
-            if change_queue:
-                self.log.debug("Re-enqueing change %s in queue %s" %
-                               (item.change, change_queue))
-                change_queue.enqueueItem(item)
-
-                # Re-set build results in case any new jobs have been
-                # added to the tree.
-                for build in item.current_build_set.getBuilds():
-                    if build.result:
-                        self.pipeline.setResult(item, build)
-                # Similarly, reset the item state.
-                if item.current_build_set.unable_to_merge:
-                    self.pipeline.setUnableToMerge(item)
-                if item.dequeued_needing_change:
-                    self.pipeline.setDequeuedNeedingChange(item)
-
-                self.reportStats(item)
-                return True
-            else:
-                self.log.error("Unable to find change queue for project %s" %
-                               item.change.project)
-                return False
-
-    def addChange(self, change, quiet=False, enqueue_time=None,
-                  ignore_requirements=False, live=True,
-                  change_queue=None):
-        self.log.debug("Considering adding change %s" % change)
-
-        # If we are adding a live change, check if it's a live item
-        # anywhere in the pipeline.  Otherwise, we will perform the
-        # duplicate check below on the specific change_queue.
-        if live and self.isChangeAlreadyInPipeline(change):
-            self.log.debug("Change %s is already in pipeline, "
-                           "ignoring" % change)
-            return True
-
-        if not self.isChangeReadyToBeEnqueued(change):
-            self.log.debug("Change %s is not ready to be enqueued, ignoring" %
-                           change)
-            return False
-
-        if not ignore_requirements:
-            for f in self.changeish_filters:
-                if not f.matches(change):
-                    self.log.debug("Change %s does not match pipeline "
-                                   "requirement %s" % (change, f))
-                    return False
-
-        with self.getChangeQueue(change, change_queue) as change_queue:
-            if not change_queue:
-                self.log.debug("Unable to find change queue for "
-                               "change %s in project %s" %
-                               (change, change.project))
-                return False
-
-            if not self.enqueueChangesAhead(change, quiet, ignore_requirements,
-                                            change_queue):
-                self.log.debug("Failed to enqueue changes "
-                               "ahead of %s" % change)
-                return False
-
-            if self.isChangeAlreadyInQueue(change, change_queue):
-                self.log.debug("Change %s is already in queue, "
-                               "ignoring" % change)
-                return True
-
-            self.log.debug("Adding change %s to queue %s" %
-                           (change, change_queue))
-            item = change_queue.enqueueChange(change)
-            if enqueue_time:
-                item.enqueue_time = enqueue_time
-            item.live = live
-            self.reportStats(item)
-            if not quiet:
-                if len(self.pipeline.start_actions) > 0:
-                    self.reportStart(item)
-            self.enqueueChangesBehind(change, quiet, ignore_requirements,
-                                      change_queue)
-            for trigger in self.sched.triggers.values():
-                trigger.onChangeEnqueued(item.change, self.pipeline)
-            return True
-
-    def dequeueItem(self, item):
-        self.log.debug("Removing change %s from queue" % item.change)
-        item.queue.dequeueItem(item)
-
-    def removeItem(self, item):
-        # Remove an item from the queue, probably because it has been
-        # superseded by another change.
-        self.log.debug("Canceling builds behind change: %s "
-                       "because it is being removed." % item.change)
-        self.cancelJobs(item)
-        self.dequeueItem(item)
-        self.reportStats(item)
-
-    def _makeMergerItem(self, item):
-        # Create a dictionary with all info about the item needed by
-        # the merger.
-        number = None
-        patchset = None
-        oldrev = None
-        newrev = None
-        if hasattr(item.change, 'number'):
-            number = item.change.number
-            patchset = item.change.patchset
-        elif hasattr(item.change, 'newrev'):
-            oldrev = item.change.oldrev
-            newrev = item.change.newrev
-        connection_name = self.pipeline.source.connection.connection_name
-        return dict(project=item.change.project.name,
-                    url=self.pipeline.source.getGitUrl(
-                        item.change.project),
-                    connection_name=connection_name,
-                    merge_mode=item.change.project.merge_mode,
-                    refspec=item.change.refspec,
-                    branch=item.change.branch,
-                    ref=item.current_build_set.ref,
-                    number=number,
-                    patchset=patchset,
-                    oldrev=oldrev,
-                    newrev=newrev,
-                    )
-
-    def prepareRef(self, item):
-        # Returns True if the ref is ready, false otherwise
-        build_set = item.current_build_set
-        if build_set.merge_state == build_set.COMPLETE:
-            return True
-        if build_set.merge_state == build_set.PENDING:
-            return False
-        ref = build_set.ref
-        if hasattr(item.change, 'refspec') and not ref:
-            self.log.debug("Preparing ref for: %s" % item.change)
-            item.current_build_set.setConfiguration()
-            dependent_items = self.getDependentItems(item)
-            dependent_items.reverse()
-            all_items = dependent_items + [item]
-            merger_items = map(self._makeMergerItem, all_items)
-            self.sched.merger.mergeChanges(merger_items,
-                                           item.current_build_set,
-                                           self.pipeline.precedence)
-        else:
-            self.log.debug("Preparing update repo for: %s" % item.change)
-            url = self.pipeline.source.getGitUrl(item.change.project)
-            self.sched.merger.updateRepo(item.change.project.name,
-                                         url, build_set,
-                                         self.pipeline.precedence)
-        # merge:merge has been emitted properly:
-        build_set.merge_state = build_set.PENDING
-        return False
-
-    def _launchJobs(self, item, jobs):
-        self.log.debug("Launching jobs for change %s" % item.change)
-        dependent_items = self.getDependentItems(item)
-        for job in jobs:
-            self.log.debug("Found job %s for change %s" % (job, item.change))
-            try:
-                build = self.sched.launcher.launch(job, item,
-                                                   self.pipeline,
-                                                   dependent_items)
-                self.log.debug("Adding build %s of job %s to item %s" %
-                               (build, job, item))
-                item.addBuild(build)
-            except:
-                self.log.exception("Exception while launching job %s "
-                                   "for change %s:" % (job, item.change))
-
-    def launchJobs(self, item):
-        jobs = self.pipeline.findJobsToRun(item, self.sched.mutex)
-        if jobs:
-            self._launchJobs(item, jobs)
-
-    def cancelJobs(self, item, prime=True):
-        self.log.debug("Cancel jobs for change %s" % item.change)
-        canceled = False
-        old_build_set = item.current_build_set
-        if prime and item.current_build_set.ref:
-            item.resetAllBuilds()
-        for build in old_build_set.getBuilds():
-            try:
-                self.sched.launcher.cancel(build)
-            except:
-                self.log.exception("Exception while canceling build %s "
-                                   "for change %s" % (build, item.change))
-            build.result = 'CANCELED'
-            canceled = True
-        self.updateBuildDescriptions(old_build_set)
-        for item_behind in item.items_behind:
-            self.log.debug("Canceling jobs for change %s, behind change %s" %
-                           (item_behind.change, item.change))
-            if self.cancelJobs(item_behind, prime=prime):
-                canceled = True
-        return canceled
-
-    def _processOneItem(self, item, nnfi):
-        changed = False
-        item_ahead = item.item_ahead
-        if item_ahead and (not item_ahead.live):
-            item_ahead = None
-        change_queue = item.queue
-        failing_reasons = []  # Reasons this item is failing
-
-        if self.checkForChangesNeededBy(item.change, change_queue) is not True:
-            # It's not okay to enqueue this change, we should remove it.
-            self.log.info("Dequeuing change %s because "
-                          "it can no longer merge" % item.change)
-            self.cancelJobs(item)
-            self.dequeueItem(item)
-            self.pipeline.setDequeuedNeedingChange(item)
-            if item.live:
-                try:
-                    self.reportItem(item)
-                except exceptions.MergeFailure:
-                    pass
-            return (True, nnfi)
-        dep_items = self.getFailingDependentItems(item)
-        actionable = change_queue.isActionable(item)
-        item.active = actionable
-        ready = False
-        if dep_items:
-            failing_reasons.append('a needed change is failing')
-            self.cancelJobs(item, prime=False)
-        else:
-            item_ahead_merged = False
-            if (item_ahead and item_ahead.change.is_merged):
-                item_ahead_merged = True
-            if (item_ahead != nnfi and not item_ahead_merged):
-                # Our current base is different than what we expected,
-                # and it's not because our current base merged.  Something
-                # ahead must have failed.
-                self.log.info("Resetting builds for change %s because the "
-                              "item ahead, %s, is not the nearest non-failing "
-                              "item, %s" % (item.change, item_ahead, nnfi))
-                change_queue.moveItem(item, nnfi)
-                changed = True
-                self.cancelJobs(item)
-            if actionable:
-                ready = self.prepareRef(item)
-                if item.current_build_set.unable_to_merge:
-                    failing_reasons.append("it has a merge conflict")
-                    ready = False
-        if actionable and ready and self.launchJobs(item):
-            changed = True
-        if self.pipeline.didAnyJobFail(item):
-            failing_reasons.append("at least one job failed")
-        if (not item.live) and (not item.items_behind):
-            failing_reasons.append("is a non-live item with no items behind")
-            self.dequeueItem(item)
-            changed = True
-        if ((not item_ahead) and self.pipeline.areAllJobsComplete(item)
-            and item.live):
-            try:
-                self.reportItem(item)
-            except exceptions.MergeFailure:
-                failing_reasons.append("it did not merge")
-                for item_behind in item.items_behind:
-                    self.log.info("Resetting builds for change %s because the "
-                                  "item ahead, %s, failed to merge" %
-                                  (item_behind.change, item))
-                    self.cancelJobs(item_behind)
-            self.dequeueItem(item)
-            changed = True
-        elif not failing_reasons and item.live:
-            nnfi = item
-        item.current_build_set.failing_reasons = failing_reasons
-        if failing_reasons:
-            self.log.debug("%s is a failing item because %s" %
-                           (item, failing_reasons))
-        return (changed, nnfi)
-
-    def processQueue(self):
-        # Do whatever needs to be done for each change in the queue
-        self.log.debug("Starting queue processor: %s" % self.pipeline.name)
-        changed = False
-        for queue in self.pipeline.queues:
-            queue_changed = False
-            nnfi = None  # Nearest non-failing item
-            for item in queue.queue[:]:
-                item_changed, nnfi = self._processOneItem(
-                    item, nnfi)
-                if item_changed:
-                    queue_changed = True
-                self.reportStats(item)
-            if queue_changed:
-                changed = True
-                status = ''
-                for item in queue.queue:
-                    status += item.formatStatus()
-                if status:
-                    self.log.debug("Queue %s status is now:\n %s" %
-                                   (queue.name, status))
-        self.log.debug("Finished queue processor: %s (changed: %s)" %
-                       (self.pipeline.name, changed))
-        return changed
-
-    def updateBuildDescriptions(self, build_set):
-        for build in build_set.getBuilds():
-            try:
-                desc = self.formatDescription(build)
-                self.sched.launcher.setBuildDescription(build, desc)
-            except:
-                # Log the failure and let loop continue
-                self.log.error("Failed to update description for build %s" %
-                               (build))
-
-        if build_set.previous_build_set:
-            for build in build_set.previous_build_set.getBuilds():
-                try:
-                    desc = self.formatDescription(build)
-                    self.sched.launcher.setBuildDescription(build, desc)
-                except:
-                    # Log the failure and let loop continue
-                    self.log.error("Failed to update description for "
-                                   "build %s in previous build set" % (build))
-
-    def onBuildStarted(self, build):
-        self.log.debug("Build %s started" % build)
-        return True
-
-    def onBuildCompleted(self, build):
-        self.log.debug("Build %s completed" % build)
-        item = build.build_set.item
-
-        self.pipeline.setResult(item, build)
-        self.sched.mutex.release(item, build.job)
-        self.log.debug("Item %s status is now:\n %s" %
-                       (item, item.formatStatus()))
-        return True
-
-    def onMergeCompleted(self, event):
-        build_set = event.build_set
-        item = build_set.item
-        build_set.merge_state = build_set.COMPLETE
-        build_set.zuul_url = event.zuul_url
-        if event.merged:
-            build_set.commit = event.commit
-        elif event.updated:
-            if not isinstance(item.change, NullChange):
-                build_set.commit = item.change.newrev
-        if not build_set.commit and not isinstance(item.change, NullChange):
-            self.log.info("Unable to merge change %s" % item.change)
-            self.pipeline.setUnableToMerge(item)
-
-    def reportItem(self, item):
-        if not item.reported:
-            # _reportItem() returns True if it failed to report.
-            item.reported = not self._reportItem(item)
-        if self.changes_merge:
-            succeeded = self.pipeline.didAllJobsSucceed(item)
-            merged = item.reported
-            if merged:
-                merged = self.pipeline.source.isMerged(item.change,
-                                                       item.change.branch)
-            self.log.info("Reported change %s status: all-succeeded: %s, "
-                          "merged: %s" % (item.change, succeeded, merged))
-            change_queue = item.queue
-            if not (succeeded and merged):
-                self.log.debug("Reported change %s failed tests or failed "
-                               "to merge" % (item.change))
-                change_queue.decreaseWindowSize()
-                self.log.debug("%s window size decreased to %s" %
-                               (change_queue, change_queue.window))
-                raise exceptions.MergeFailure(
-                    "Change %s failed to merge" % item.change)
-            else:
-                change_queue.increaseWindowSize()
-                self.log.debug("%s window size increased to %s" %
-                               (change_queue, change_queue.window))
-
-                for trigger in self.sched.triggers.values():
-                    trigger.onChangeMerged(item.change, self.pipeline.source)
-
-    def _reportItem(self, item):
-        self.log.debug("Reporting change %s" % item.change)
-        ret = True  # Means error as returned by trigger.report
-        if not self.pipeline.getJobs(item):
-            # We don't send empty reports with +1,
-            # and the same for -1's (merge failures or transient errors)
-            # as they cannot be followed by +1's
-            self.log.debug("No jobs for change %s" % item.change)
-            actions = []
-        elif self.pipeline.didAllJobsSucceed(item):
-            self.log.debug("success %s" % (self.pipeline.success_actions))
-            actions = self.pipeline.success_actions
-            item.setReportedResult('SUCCESS')
-            self.pipeline._consecutive_failures = 0
-        elif not self.pipeline.didMergerSucceed(item):
-            actions = self.pipeline.merge_failure_actions
-            item.setReportedResult('MERGER_FAILURE')
-        else:
-            actions = self.pipeline.failure_actions
-            item.setReportedResult('FAILURE')
-            self.pipeline._consecutive_failures += 1
-        if self.pipeline._disabled:
-            actions = self.pipeline.disabled_actions
-        # Check here if we should disable so that we only use the disabled
-        # reporters /after/ the last disable_at failure is still reported as
-        # normal.
-        if (self.pipeline.disable_at and not self.pipeline._disabled and
-            self.pipeline._consecutive_failures >= self.pipeline.disable_at):
-            self.pipeline._disabled = True
-        if actions:
-            try:
-                self.log.info("Reporting item %s, actions: %s" %
-                              (item, actions))
-                ret = self.sendReport(actions, self.pipeline.source, item)
-                if ret:
-                    self.log.error("Reporting item %s received: %s" %
-                                   (item, ret))
-            except:
-                self.log.exception("Exception while reporting:")
-                item.setReportedResult('ERROR')
-        self.updateBuildDescriptions(item.current_build_set)
-        return ret
-
-    def formatDescription(self, build):
-        concurrent_changes = ''
-        concurrent_builds = ''
-        other_builds = ''
-
-        for change in build.build_set.other_changes:
-            concurrent_changes += '<li><a href="{change.url}">\
-              {change.number},{change.patchset}</a></li>'.format(
-                change=change)
-
-        change = build.build_set.item.change
-
-        for build in build.build_set.getBuilds():
-            if build.url:
-                concurrent_builds += """\
-<li>
-  <a href="{build.url}">
-  {build.job.name} #{build.number}</a>: {build.result}
-</li>
-""".format(build=build)
-            else:
-                concurrent_builds += """\
-<li>
-  {build.job.name}: {build.result}
-</li>""".format(build=build)
-
-        if build.build_set.previous_build_set:
-            other_build = build.build_set.previous_build_set.getBuild(
-                build.job.name)
-            if other_build:
-                other_builds += """\
-<li>
-  Preceded by: <a href="{build.url}">
-  {build.job.name} #{build.number}</a>
-</li>
-""".format(build=other_build)
-
-        if build.build_set.next_build_set:
-            other_build = build.build_set.next_build_set.getBuild(
-                build.job.name)
-            if other_build:
-                other_builds += """\
-<li>
-  Succeeded by: <a href="{build.url}">
-  {build.job.name} #{build.number}</a>
-</li>
-""".format(build=other_build)
-
-        result = build.build_set.result
-
-        if hasattr(change, 'number'):
-            ret = """\
-<p>
-  Triggered by change:
-    <a href="{change.url}">{change.number},{change.patchset}</a><br/>
-  Branch: <b>{change.branch}</b><br/>
-  Pipeline: <b>{self.pipeline.name}</b>
-</p>"""
-        elif hasattr(change, 'ref'):
-            ret = """\
-<p>
-  Triggered by reference:
-    {change.ref}</a><br/>
-  Old revision: <b>{change.oldrev}</b><br/>
-  New revision: <b>{change.newrev}</b><br/>
-  Pipeline: <b>{self.pipeline.name}</b>
-</p>"""
-        else:
-            ret = ""
-
-        if concurrent_changes:
-            ret += """\
-<p>
-  Other changes tested concurrently with this change:
-  <ul>{concurrent_changes}</ul>
-</p>
-"""
-        if concurrent_builds:
-            ret += """\
-<p>
-  All builds for this change set:
-  <ul>{concurrent_builds}</ul>
-</p>
-"""
-
-        if other_builds:
-            ret += """\
-<p>
-  Other build sets for this change:
-  <ul>{other_builds}</ul>
-</p>
-"""
-        if result:
-            ret += """\
-<p>
-  Reported result: <b>{result}</b>
-</p>
-"""
-
-        ret = ret.format(**locals())
-        return ret
-
-    def reportStats(self, item):
-        if not statsd:
-            return
-        try:
-            # Update the gauge on enqueue and dequeue, but timers only
-            # when dequeing.
-            if item.dequeue_time:
-                dt = int((item.dequeue_time - item.enqueue_time) * 1000)
-            else:
-                dt = None
-            items = len(self.pipeline.getAllItems())
-
-            # stats.timers.zuul.pipeline.NAME.resident_time
-            # stats_counts.zuul.pipeline.NAME.total_changes
-            # stats.gauges.zuul.pipeline.NAME.current_changes
-            key = 'zuul.pipeline.%s' % self.pipeline.name
-            statsd.gauge(key + '.current_changes', items)
-            if dt:
-                statsd.timing(key + '.resident_time', dt)
-                statsd.incr(key + '.total_changes')
-
-            # stats.timers.zuul.pipeline.NAME.ORG.PROJECT.resident_time
-            # stats_counts.zuul.pipeline.NAME.ORG.PROJECT.total_changes
-            project_name = item.change.project.name.replace('/', '.')
-            key += '.%s' % project_name
-            if dt:
-                statsd.timing(key + '.resident_time', dt)
-                statsd.incr(key + '.total_changes')
-        except:
-            self.log.exception("Exception reporting pipeline stats")
-
-
-class DynamicChangeQueueContextManager(object):
-    def __init__(self, change_queue):
-        self.change_queue = change_queue
-
-    def __enter__(self):
-        return self.change_queue
-
-    def __exit__(self, etype, value, tb):
-        if self.change_queue and not self.change_queue.queue:
-            self.change_queue.pipeline.removeQueue(self.change_queue.queue)
-
-
-class IndependentPipelineManager(BasePipelineManager):
-    log = logging.getLogger("zuul.IndependentPipelineManager")
-    changes_merge = False
-
-    def _postConfig(self, layout):
-        super(IndependentPipelineManager, self)._postConfig(layout)
-
-    def getChangeQueue(self, change, existing=None):
-        # creates a new change queue for every change
-        if existing:
-            return DynamicChangeQueueContextManager(existing)
-        if change.project not in self.pipeline.getProjects():
-            self.pipeline.addProject(change.project)
-        change_queue = ChangeQueue(self.pipeline)
-        change_queue.addProject(change.project)
-        self.pipeline.addQueue(change_queue)
-        self.log.debug("Dynamically created queue %s", change_queue)
-        return DynamicChangeQueueContextManager(change_queue)
-
-    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
-                            change_queue):
-        ret = self.checkForChangesNeededBy(change, change_queue)
-        if ret in [True, False]:
-            return ret
-        self.log.debug("  Changes %s must be merged ahead of %s" %
-                       (ret, change))
-        for needed_change in ret:
-            # This differs from the dependent pipeline by enqueuing
-            # changes ahead as "not live", that is, not intended to
-            # have jobs run.  Also, pipeline requirements are always
-            # ignored (which is safe because the changes are not
-            # live).
-            r = self.addChange(needed_change, quiet=True,
-                               ignore_requirements=True,
-                               live=False, change_queue=change_queue)
-            if not r:
-                return False
-        return True
-
-    def checkForChangesNeededBy(self, change, change_queue):
-        if self.pipeline.ignore_dependencies:
-            return True
-        self.log.debug("Checking for changes needed by %s:" % change)
-        # Return true if okay to proceed enqueing this change,
-        # false if the change should not be enqueued.
-        if not hasattr(change, 'needs_changes'):
-            self.log.debug("  Changeish does not support dependencies")
-            return True
-        if not change.needs_changes:
-            self.log.debug("  No changes needed")
-            return True
-        changes_needed = []
-        for needed_change in change.needs_changes:
-            self.log.debug("  Change %s needs change %s:" % (
-                change, needed_change))
-            if needed_change.is_merged:
-                self.log.debug("  Needed change is merged")
-                continue
-            if self.isChangeAlreadyInQueue(needed_change, change_queue):
-                self.log.debug("  Needed change is already ahead in the queue")
-                continue
-            self.log.debug("  Change %s is needed" % needed_change)
-            if needed_change not in changes_needed:
-                changes_needed.append(needed_change)
-                continue
-            # This differs from the dependent pipeline check in not
-            # verifying that the dependent change is mergable.
-        if changes_needed:
-            return changes_needed
-        return True
-
-    def dequeueItem(self, item):
-        super(IndependentPipelineManager, self).dequeueItem(item)
-        # An independent pipeline manager dynamically removes empty
-        # queues
-        if not item.queue.queue:
-            self.pipeline.removeQueue(item.queue)
-
-
-class StaticChangeQueueContextManager(object):
-    def __init__(self, change_queue):
-        self.change_queue = change_queue
-
-    def __enter__(self):
-        return self.change_queue
-
-    def __exit__(self, etype, value, tb):
-        pass
-
-
-class DependentPipelineManager(BasePipelineManager):
-    log = logging.getLogger("zuul.DependentPipelineManager")
-    changes_merge = True
-
-    def __init__(self, *args, **kwargs):
-        super(DependentPipelineManager, self).__init__(*args, **kwargs)
-
-    def _postConfig(self, layout):
-        super(DependentPipelineManager, self)._postConfig(layout)
-        self.buildChangeQueues()
-
-    def buildChangeQueues(self):
-        self.log.debug("Building shared change queues")
-        change_queues = []
-
-        for project in self.pipeline.getProjects():
-            change_queue = ChangeQueue(
-                self.pipeline,
-                window=self.pipeline.window,
-                window_floor=self.pipeline.window_floor,
-                window_increase_type=self.pipeline.window_increase_type,
-                window_increase_factor=self.pipeline.window_increase_factor,
-                window_decrease_type=self.pipeline.window_decrease_type,
-                window_decrease_factor=self.pipeline.window_decrease_factor)
-            change_queue.addProject(project)
-            change_queues.append(change_queue)
-            self.log.debug("Created queue: %s" % change_queue)
-
-        # Iterate over all queues trying to combine them, and keep doing
-        # so until they can not be combined further.
-        last_change_queues = change_queues
-        while True:
-            new_change_queues = self.combineChangeQueues(last_change_queues)
-            if len(last_change_queues) == len(new_change_queues):
-                break
-            last_change_queues = new_change_queues
-
-        self.log.info("  Shared change queues:")
-        for queue in new_change_queues:
-            self.pipeline.addQueue(queue)
-            self.log.info("    %s containing %s" % (
-                queue, queue.generated_name))
-
-    def combineChangeQueues(self, change_queues):
-        self.log.debug("Combining shared queues")
-        new_change_queues = []
-        for a in change_queues:
-            merged_a = False
-            for b in new_change_queues:
-                if not a.getJobs().isdisjoint(b.getJobs()):
-                    self.log.debug("Merging queue %s into %s" % (a, b))
-                    b.mergeChangeQueue(a)
-                    merged_a = True
-                    break  # this breaks out of 'for b' and continues 'for a'
-            if not merged_a:
-                self.log.debug("Keeping queue %s" % (a))
-                new_change_queues.append(a)
-        return new_change_queues
-
-    def getChangeQueue(self, change, existing=None):
-        if existing:
-            return StaticChangeQueueContextManager(existing)
-        return StaticChangeQueueContextManager(
-            self.pipeline.getQueue(change.project))
-
-    def isChangeReadyToBeEnqueued(self, change):
-        if not self.pipeline.source.canMerge(change,
-                                             self.getSubmitAllowNeeds()):
-            self.log.debug("Change %s can not merge, ignoring" % change)
-            return False
-        return True
-
-    def enqueueChangesBehind(self, change, quiet, ignore_requirements,
-                             change_queue):
-        to_enqueue = []
-        self.log.debug("Checking for changes needing %s:" % change)
-        if not hasattr(change, 'needed_by_changes'):
-            self.log.debug("  Changeish does not support dependencies")
-            return
-        for other_change in change.needed_by_changes:
-            with self.getChangeQueue(other_change) as other_change_queue:
-                if other_change_queue != change_queue:
-                    self.log.debug("  Change %s in project %s can not be "
-                                   "enqueued in the target queue %s" %
-                                   (other_change, other_change.project,
-                                    change_queue))
-                    continue
-            if self.pipeline.source.canMerge(other_change,
-                                             self.getSubmitAllowNeeds()):
-                self.log.debug("  Change %s needs %s and is ready to merge" %
-                               (other_change, change))
-                to_enqueue.append(other_change)
-
-        if not to_enqueue:
-            self.log.debug("  No changes need %s" % change)
-
-        for other_change in to_enqueue:
-            self.addChange(other_change, quiet=quiet,
-                           ignore_requirements=ignore_requirements,
-                           change_queue=change_queue)
-
-    def enqueueChangesAhead(self, change, quiet, ignore_requirements,
-                            change_queue):
-        ret = self.checkForChangesNeededBy(change, change_queue)
-        if ret in [True, False]:
-            return ret
-        self.log.debug("  Changes %s must be merged ahead of %s" %
-                       (ret, change))
-        for needed_change in ret:
-            r = self.addChange(needed_change, quiet=quiet,
-                               ignore_requirements=ignore_requirements,
-                               change_queue=change_queue)
-            if not r:
-                return False
-        return True
-
-    def checkForChangesNeededBy(self, change, change_queue):
-        self.log.debug("Checking for changes needed by %s:" % change)
-        # Return true if okay to proceed enqueing this change,
-        # false if the change should not be enqueued.
-        if not hasattr(change, 'needs_changes'):
-            self.log.debug("  Changeish does not support dependencies")
-            return True
-        if not change.needs_changes:
-            self.log.debug("  No changes needed")
-            return True
-        changes_needed = []
-        # Ignore supplied change_queue
-        with self.getChangeQueue(change) as change_queue:
-            for needed_change in change.needs_changes:
-                self.log.debug("  Change %s needs change %s:" % (
-                    change, needed_change))
-                if needed_change.is_merged:
-                    self.log.debug("  Needed change is merged")
-                    continue
-                with self.getChangeQueue(needed_change) as needed_change_queue:
-                    if needed_change_queue != change_queue:
-                        self.log.debug("  Change %s in project %s does not "
-                                       "share a change queue with %s "
-                                       "in project %s" %
-                                       (needed_change, needed_change.project,
-                                        change, change.project))
-                        return False
-                if not needed_change.is_current_patchset:
-                    self.log.debug("  Needed change is not the "
-                                   "current patchset")
-                    return False
-                if self.isChangeAlreadyInQueue(needed_change, change_queue):
-                    self.log.debug("  Needed change is already ahead "
-                                   "in the queue")
-                    continue
-                if self.pipeline.source.canMerge(needed_change,
-                                                 self.getSubmitAllowNeeds()):
-                    self.log.debug("  Change %s is needed" % needed_change)
-                    if needed_change not in changes_needed:
-                        changes_needed.append(needed_change)
-                        continue
-                # The needed change can't be merged.
-                self.log.debug("  Change %s is needed but can not be merged" %
-                               needed_change)
-                return False
-        if changes_needed:
-            return changes_needed
-        return True
-
-    def getFailingDependentItems(self, item):
-        if not hasattr(item.change, 'needs_changes'):
-            return None
-        if not item.change.needs_changes:
-            return None
-        failing_items = set()
-        for needed_change in item.change.needs_changes:
-            needed_item = self.getItemForChange(needed_change)
-            if not needed_item:
-                continue
-            if needed_item.current_build_set.failing_reasons:
-                failing_items.add(needed_item)
-        if failing_items:
-            return failing_items
-        return None
diff --git a/zuul/source/__init__.py b/zuul/source/__init__.py
index cb4501a..d92d47a 100644
--- a/zuul/source/__init__.py
+++ b/zuul/source/__init__.py
@@ -27,14 +27,10 @@
 
     Defines the exact public methods that must be supplied."""
 
-    def __init__(self, source_config={}, sched=None, connection=None):
+    def __init__(self, source_config={}, connection=None):
         self.source_config = source_config
-        self.sched = sched
         self.connection = connection
 
-    def stop(self):
-        """Stop the source."""
-
     @abc.abstractmethod
     def getRefSha(self, project, ref):
         """Return a sha for a given project ref."""
@@ -53,7 +49,7 @@
         """Called after configuration has been processed."""
 
     @abc.abstractmethod
-    def getChange(self, event, project):
+    def getChange(self, event):
         """Get the change representing an event."""
 
     @abc.abstractmethod
@@ -63,3 +59,7 @@
     @abc.abstractmethod
     def getGitUrl(self, project):
         """Get the git url for a project."""
+
+    @abc.abstractmethod
+    def getProject(self, name):
+        """Get a project."""
diff --git a/zuul/source/gerrit.py b/zuul/source/gerrit.py
index 463f315..0d28898 100644
--- a/zuul/source/gerrit.py
+++ b/zuul/source/gerrit.py
@@ -13,338 +13,33 @@
 # under the License.
 
 import logging
-import re
-import time
-from zuul import exceptions
-from zuul.model import Change, Ref, NullChange
 from zuul.source import BaseSource
 
 
-# Walk the change dependency tree to find a cycle
-def detect_cycle(change, history=None):
-    if history is None:
-        history = []
-    else:
-        history = history[:]
-    history.append(change.number)
-    for dep in change.needs_changes:
-        if dep.number in history:
-            raise Exception("Dependency cycle detected: %s in %s" % (
-                dep.number, history))
-        detect_cycle(dep, history)
-
-
 class GerritSource(BaseSource):
     name = 'gerrit'
     log = logging.getLogger("zuul.source.Gerrit")
-    replication_timeout = 300
-    replication_retry_interval = 5
-
-    depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
-                               re.MULTILINE | re.IGNORECASE)
 
     def getRefSha(self, project, ref):
-        refs = {}
-        try:
-            refs = self.connection.getInfoRefs(project)
-        except:
-            self.log.exception("Exception looking for ref %s" %
-                               ref)
-        sha = refs.get(ref, '')
-        return sha
-
-    def _waitForRefSha(self, project, ref, old_sha=''):
-        # Wait for the ref to show up in the repo
-        start = time.time()
-        while time.time() - start < self.replication_timeout:
-            sha = self.getRefSha(project.name, ref)
-            if old_sha != sha:
-                return True
-            time.sleep(self.replication_retry_interval)
-        return False
+        return self.connection.getRefSha(project, ref)
 
     def isMerged(self, change, head=None):
-        self.log.debug("Checking if change %s is merged" % change)
-        if not change.number:
-            self.log.debug("Change has no number; considering it merged")
-            # Good question.  It's probably ref-updated, which, ah,
-            # means it's merged.
-            return True
-
-        data = self.connection.query(change.number)
-        change._data = data
-        change.is_merged = self._isMerged(change)
-        if change.is_merged:
-            self.log.debug("Change %s is merged" % (change,))
-        else:
-            self.log.debug("Change %s is not merged" % (change,))
-        if not head:
-            return change.is_merged
-        if not change.is_merged:
-            return False
-
-        ref = 'refs/heads/' + change.branch
-        self.log.debug("Waiting for %s to appear in git repo" % (change))
-        if self._waitForRefSha(change.project, ref, change._ref_sha):
-            self.log.debug("Change %s is in the git repo" %
-                           (change))
-            return True
-        self.log.debug("Change %s did not appear in the git repo" %
-                       (change))
-        return False
-
-    def _isMerged(self, change):
-        data = change._data
-        if not data:
-            return False
-        status = data.get('status')
-        if not status:
-            return False
-        if status == 'MERGED':
-            return True
-        return False
+        return self.connection.isMerged(change, head)
 
     def canMerge(self, change, allow_needs):
-        if not change.number:
-            self.log.debug("Change has no number; considering it merged")
-            # Good question.  It's probably ref-updated, which, ah,
-            # means it's merged.
-            return True
-        data = change._data
-        if not data:
-            return False
-        if 'submitRecords' not in data:
-            return False
-        try:
-            for sr in data['submitRecords']:
-                if sr['status'] == 'OK':
-                    return True
-                elif sr['status'] == 'NOT_READY':
-                    for label in sr['labels']:
-                        if label['status'] in ['OK', 'MAY']:
-                            continue
-                        elif label['status'] in ['NEED', 'REJECT']:
-                            # It may be our own rejection, so we ignore
-                            if label['label'].lower() not in allow_needs:
-                                return False
-                            continue
-                        else:
-                            # IMPOSSIBLE
-                            return False
-                else:
-                    # CLOSED, RULE_ERROR
-                    return False
-        except:
-            self.log.exception("Exception determining whether change"
-                               "%s can merge:" % change)
-            return False
-        return True
+        return self.connection.canMerge(change, allow_needs)
 
     def postConfig(self):
         pass
 
-    def getChange(self, event, project):
-        if event.change_number:
-            refresh = False
-            change = self._getChange(event.change_number, event.patch_number,
-                                     refresh=refresh)
-        elif event.ref:
-            change = Ref(project)
-            change.ref = event.ref
-            change.oldrev = event.oldrev
-            change.newrev = event.newrev
-            change.url = self._getGitwebUrl(project, sha=event.newrev)
-        else:
-            change = NullChange(project)
-        return change
+    def getChange(self, event):
+        return self.connection.getChange(event)
 
-    def _getChange(self, number, patchset, refresh=False, history=None):
-        key = '%s,%s' % (number, patchset)
-        change = self.connection.getCachedChange(key)
-        if change and not refresh:
-            return change
-        if not change:
-            change = Change(None)
-            change.number = number
-            change.patchset = patchset
-        key = '%s,%s' % (change.number, change.patchset)
-        self.connection.updateChangeCache(key, change)
-        try:
-            self._updateChange(change, history)
-        except Exception:
-            self.connection.deleteCachedChange(key)
-            raise
-        return change
+    def getProject(self, name):
+        return self.connection.getProject(name)
 
     def getProjectOpenChanges(self, project):
-        # This is a best-effort function in case Gerrit is unable to return
-        # a particular change.  It happens.
-        query = "project:%s status:open" % (project.name,)
-        self.log.debug("Running query %s to get project open changes" %
-                       (query,))
-        data = self.connection.simpleQuery(query)
-        changes = []
-        for record in data:
-            try:
-                changes.append(
-                    self._getChange(record['number'],
-                                    record['currentPatchSet']['number']))
-            except Exception:
-                self.log.exception("Unable to query change %s" %
-                                   (record.get('number'),))
-        return changes
-
-    def _getDependsOnFromCommit(self, message, change):
-        records = []
-        seen = set()
-        for match in self.depends_on_re.findall(message):
-            if match in seen:
-                self.log.debug("Ignoring duplicate Depends-On: %s" %
-                               (match,))
-                continue
-            seen.add(match)
-            query = "change:%s" % (match,)
-            self.log.debug("Updating %s: Running query %s "
-                           "to find needed changes" %
-                           (change, query,))
-            records.extend(self.connection.simpleQuery(query))
-        return records
-
-    def _getNeededByFromCommit(self, change_id, change):
-        records = []
-        seen = set()
-        query = 'message:%s' % change_id
-        self.log.debug("Updating %s: Running query %s "
-                       "to find changes needed-by" %
-                       (change, query,))
-        results = self.connection.simpleQuery(query)
-        for result in results:
-            for match in self.depends_on_re.findall(
-                result['commitMessage']):
-                if match != change_id:
-                    continue
-                key = (result['number'], result['currentPatchSet']['number'])
-                if key in seen:
-                    continue
-                self.log.debug("Updating %s: Found change %s,%s "
-                               "needs %s from commit" %
-                               (change, key[0], key[1], change_id))
-                seen.add(key)
-                records.append(result)
-        return records
-
-    def _updateChange(self, change, history=None):
-        self.log.info("Updating %s" % (change,))
-        data = self.connection.query(change.number)
-        change._data = data
-
-        if change.patchset is None:
-            change.patchset = data['currentPatchSet']['number']
-
-        if 'project' not in data:
-            raise exceptions.ChangeNotFound(change.number, change.patchset)
-        change.project = self.sched.getProject(data['project'])
-        change.branch = data['branch']
-        change.url = data['url']
-        max_ps = 0
-        files = []
-        for ps in data['patchSets']:
-            if ps['number'] == change.patchset:
-                change.refspec = ps['ref']
-                for f in ps.get('files', []):
-                    files.append(f['file'])
-            if int(ps['number']) > int(max_ps):
-                max_ps = ps['number']
-        if max_ps == change.patchset:
-            change.is_current_patchset = True
-        else:
-            change.is_current_patchset = False
-        change.files = files
-
-        change.is_merged = self._isMerged(change)
-        change.approvals = data['currentPatchSet'].get('approvals', [])
-        change.open = data['open']
-        change.status = data['status']
-        change.owner = data['owner']
-
-        if change.is_merged:
-            # This change is merged, so we don't need to look any further
-            # for dependencies.
-            self.log.debug("Updating %s: change is merged" % (change,))
-            return change
-
-        if history is None:
-            history = []
-        else:
-            history = history[:]
-        history.append(change.number)
-
-        needs_changes = []
-        if 'dependsOn' in data:
-            parts = data['dependsOn'][0]['ref'].split('/')
-            dep_num, dep_ps = parts[3], parts[4]
-            if dep_num in history:
-                raise Exception("Dependency cycle detected: %s in %s" % (
-                    dep_num, history))
-            self.log.debug("Updating %s: Getting git-dependent change %s,%s" %
-                           (change, dep_num, dep_ps))
-            dep = self._getChange(dep_num, dep_ps, history=history)
-            # Because we are not forcing a refresh in _getChange, it
-            # may return without executing this code, so if we are
-            # updating our change to add ourselves to a dependency
-            # cycle, we won't detect it.  By explicitly performing a
-            # walk of the dependency tree, we will.
-            detect_cycle(dep, history)
-            if (not dep.is_merged) and dep not in needs_changes:
-                needs_changes.append(dep)
-
-        for record in self._getDependsOnFromCommit(data['commitMessage'],
-                                                   change):
-            dep_num = record['number']
-            dep_ps = record['currentPatchSet']['number']
-            if dep_num in history:
-                raise Exception("Dependency cycle detected: %s in %s" % (
-                    dep_num, history))
-            self.log.debug("Updating %s: Getting commit-dependent "
-                           "change %s,%s" %
-                           (change, dep_num, dep_ps))
-            dep = self._getChange(dep_num, dep_ps, history=history)
-            # Because we are not forcing a refresh in _getChange, it
-            # may return without executing this code, so if we are
-            # updating our change to add ourselves to a dependency
-            # cycle, we won't detect it.  By explicitly performing a
-            # walk of the dependency tree, we will.
-            detect_cycle(dep, history)
-            if (not dep.is_merged) and dep not in needs_changes:
-                needs_changes.append(dep)
-        change.needs_changes = needs_changes
-
-        needed_by_changes = []
-        if 'neededBy' in data:
-            for needed in data['neededBy']:
-                parts = needed['ref'].split('/')
-                dep_num, dep_ps = parts[3], parts[4]
-                self.log.debug("Updating %s: Getting git-needed change %s,%s" %
-                               (change, dep_num, dep_ps))
-                dep = self._getChange(dep_num, dep_ps)
-                if (not dep.is_merged) and dep.is_current_patchset:
-                    needed_by_changes.append(dep)
-
-        for record in self._getNeededByFromCommit(data['id'], change):
-            dep_num = record['number']
-            dep_ps = record['currentPatchSet']['number']
-            self.log.debug("Updating %s: Getting commit-needed change %s,%s" %
-                           (change, dep_num, dep_ps))
-            # Because a commit needed-by may be a cross-repo
-            # dependency, cause that change to refresh so that it will
-            # reference the latest patchset of its Depends-On (this
-            # change).
-            dep = self._getChange(dep_num, dep_ps, refresh=True)
-            if (not dep.is_merged) and dep.is_current_patchset:
-                needed_by_changes.append(dep)
-        change.needed_by_changes = needed_by_changes
-
-        return change
+        return self.connection.getProjectOpenChanges(project)
 
     def getGitUrl(self, project):
         return self.connection.getGitUrl(project)
diff --git a/zuul/trigger/__init__.py b/zuul/trigger/__init__.py
index 16fb0b1..067e478 100644
--- a/zuul/trigger/__init__.py
+++ b/zuul/trigger/__init__.py
@@ -23,20 +23,16 @@
 
     Defines the exact public methods that must be supplied."""
 
-    def __init__(self, trigger_config={}, sched=None, connection=None):
+    def __init__(self, trigger_config={}, connection=None):
         self.trigger_config = trigger_config
-        self.sched = sched
         self.connection = connection
 
-    def stop(self):
-        """Stop the trigger."""
-
     @abc.abstractmethod
     def getEventFilters(self, trigger_conf):
         """Return a list of EventFilter's for the scheduler to match against.
         """
 
-    def postConfig(self):
+    def postConfig(self, pipeline):
         """Called after config is loaded."""
 
     def onChangeMerged(self, change, source):
diff --git a/zuul/trigger/timer.py b/zuul/trigger/timer.py
index f81312e..94a406b 100644
--- a/zuul/trigger/timer.py
+++ b/zuul/trigger/timer.py
@@ -25,8 +25,8 @@
     name = 'timer'
     log = logging.getLogger("zuul.Timer")
 
-    def __init__(self, trigger_config={}, sched=None, connection=None):
-        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
+    def __init__(self, trigger_config={}, connection=None):
+        super(TimerTrigger, self).__init__(trigger_config, connection)
         self.apsched = BackgroundScheduler()
         self.apsched.start()
 
@@ -38,7 +38,7 @@
             event.forced_pipeline = pipeline_name
             event.project_name = project.name
             self.log.debug("Adding event %s" % event)
-            self.sched.addEvent(event)
+            self.connection.sched.addEvent(event)
 
     def stop(self):
         self.apsched.shutdown()
@@ -61,32 +61,31 @@
 
         return efilters
 
-    def postConfig(self):
+    def postConfig(self, pipeline):
         for job in self.apsched.get_jobs():
             job.remove()
-        for pipeline in self.sched.layout.pipelines.values():
-            for ef in pipeline.manager.event_filters:
-                if ef.trigger != self:
+        for ef in pipeline.manager.event_filters:
+            if ef.trigger != self:
+                continue
+            for timespec in ef.timespecs:
+                parts = timespec.split()
+                if len(parts) < 5 or len(parts) > 6:
+                    self.log.error(
+                        "Unable to parse time value '%s' "
+                        "defined in pipeline %s" % (
+                            timespec,
+                            pipeline.name))
                     continue
-                for timespec in ef.timespecs:
-                    parts = timespec.split()
-                    if len(parts) < 5 or len(parts) > 6:
-                        self.log.error(
-                            "Unable to parse time value '%s' "
-                            "defined in pipeline %s" % (
-                                timespec,
-                                pipeline.name))
-                        continue
-                    minute, hour, dom, month, dow = parts[:5]
-                    if len(parts) > 5:
-                        second = parts[5]
-                    else:
-                        second = None
-                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour,
-                                          minute=minute, second=second)
+                minute, hour, dom, month, dow = parts[:5]
+                if len(parts) > 5:
+                    second = parts[5]
+                else:
+                    second = None
+                trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour,
+                                      minute=minute, second=second)
 
-                    self.apsched.add_job(self._onTrigger, trigger=trigger,
-                                         args=(pipeline.name, timespec,))
+                self.apsched.add_job(self._onTrigger, trigger=trigger,
+                                     args=(pipeline.name, timespec,))
 
 
 def getSchema():
diff --git a/zuul/trigger/zuultrigger.py b/zuul/trigger/zuultrigger.py
index 00b21f2..d768941 100644
--- a/zuul/trigger/zuultrigger.py
+++ b/zuul/trigger/zuultrigger.py
@@ -23,8 +23,8 @@
     name = 'zuul'
     log = logging.getLogger("zuul.ZuulTrigger")
 
-    def __init__(self, trigger_config={}, sched=None, connection=None):
-        super(ZuulTrigger, self).__init__(trigger_config, sched, connection)
+    def __init__(self, trigger_config={}, connection=None):
+        super(ZuulTrigger, self).__init__(trigger_config, connection)
         self._handle_parent_change_enqueued_events = False
         self._handle_project_change_merged_events = False
 
@@ -89,7 +89,7 @@
         event.change_url = change.url
         event.patch_number = change.patchset
         event.refspec = change.refspec
-        self.sched.addEvent(event)
+        self.connection.sched.addEvent(event)
 
     def _createParentChangeEnqueuedEvents(self, change, pipeline):
         self.log.debug("Checking for changes needing %s:" % change)
@@ -110,19 +110,18 @@
         event.change_url = change.url
         event.patch_number = change.patchset
         event.refspec = change.refspec
-        self.sched.addEvent(event)
+        self.connection.sched.addEvent(event)
 
-    def postConfig(self):
+    def postConfig(self, pipeline):
         self._handle_parent_change_enqueued_events = False
         self._handle_project_change_merged_events = False
-        for pipeline in self.sched.layout.pipelines.values():
-            for ef in pipeline.manager.event_filters:
-                if ef.trigger != self:
-                    continue
-                if 'parent-change-enqueued' in ef._types:
-                    self._handle_parent_change_enqueued_events = True
-                elif 'project-change-merged' in ef._types:
-                    self._handle_project_change_merged_events = True
+        for ef in pipeline.manager.event_filters:
+            if ef.trigger != self:
+                continue
+            if 'parent-change-enqueued' in ef._types:
+                self._handle_parent_change_enqueued_events = True
+            elif 'project-change-merged' in ef._types:
+                self._handle_project_change_merged_events = True
 
 
 def getSchema():