Merge "Remove install-distro step for nodepool integration"
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index ba14752..84ebc10 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -660,6 +660,16 @@
       Base URL on which the websocket service is exposed, if different
       than the base URL of the web app.
 
+   .. attr:: stats_url
+
+      Base URL from which statistics emitted via statsd can be queried.
+
+   .. attr:: stats_type
+      :default: graphite
+
+      Type of server hosting the statistics information. Currently only
+      'graphite' is supported by the dashboard.
+
    .. attr:: static_cache_expiry
       :default: 3600
 
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 8492423..0932c56 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -710,6 +710,21 @@
       timeout is supplied, the job may run indefinitely.  Supplying a
       timeout is highly recommended.
 
+      This timeout only applies to the pre-run and run playbooks in a
+      job.
+
+   .. attr:: post-timeout
+
+      The time in seconds that each post playbook should be allowed to run
+      before it is automatically aborted and failure is reported.  If no
+      post-timeout is supplied, the job may run indefinitely.  Supplying a
+      post-timeout is highly recommended.
+
+      The post-timeout is handled separately from the above timeout because
+      the post playbooks are typically where you will copy jobs logs.
+      In the event of the pre-run or run playbooks timing out we want to
+      do our best to copy the job logs in the post-run playbooks.
+
    .. attr:: attempts
       :default: 3
 
@@ -862,6 +877,48 @@
       same name will override a previously defined variable, but new
       variable names will be added to the set of defined variables.
 
+   .. attr:: host_vars
+
+      A dictionary of host variables to supply to Ansible.  The keys
+      of this dictionary are node names as defined in a
+      :ref:`nodeset`, and the values are dictionaries of variables,
+      just as in :attr:`job.vars`.
+
+   .. attr:: group_vars
+
+      A dictionary of group variables to supply to Ansible.  The keys
+      of this dictionary are node groups as defined in a
+      :ref:`nodeset`, and the values are dictionaries of variables,
+      just as in :attr:`job.vars`.
+
+   An example of three kinds of variables:
+
+   .. code-block:: yaml
+
+      - job:
+          name: variable-example
+          nodeset:
+            nodes:
+              - name: controller
+                label: fedora-27
+              - name: api1
+                label: centos-7
+              - name: api2
+                label: centos-7
+            groups:
+              - name: api
+                nodes:
+                  - api1
+                  - api2
+         vars:
+           foo: "this variable is visible to all nodes"
+         host_vars:
+           controller:
+             bar: "this variable is visible only on the controller node"
+         group_vars:
+           api:
+             baz: "this variable is visible on api1 and api2"
+
    .. attr:: dependencies
 
       A list of other jobs upon which this job depends.  Zuul will not
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 820e316..4e1c33d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -289,6 +289,10 @@
 
       The job timeout, in seconds.
 
+   .. var:: post_timeout
+
+      The post-run playbook timeout, in seconds.
+
    .. var:: jobtags
 
       A list of tags associated with the job.  Not to be confused with
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index 50dbed5..ac8a302 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -49,7 +49,7 @@
         options = $.extend({
             'enabled': true,
             'graphite_url': '',
-            'source': 'status.json',
+            'source': 'status',
             'msg_id': '#zuul_msg',
             'pipelines_id': '#zuul_pipelines',
             'queue_events_num': '#zuul_queue_events_num',
diff --git a/etc/status/public_html/zuul.app.js b/etc/status/public_html/zuul.app.js
index bf90a4d..6e35eb3 100644
--- a/etc/status/public_html/zuul.app.js
+++ b/etc/status/public_html/zuul.app.js
@@ -55,7 +55,7 @@
     var demo = location.search.match(/[?&]demo=([^?&]*)/),
         source_url = location.search.match(/[?&]source_url=([^?&]*)/),
         source = demo ? './status-' + (demo[1] || 'basic') + '.json-sample' :
-            'status.json';
+            'status';
     source = source_url ? source_url[1] : source;
 
     var zuul = $.zuul({
diff --git a/requirements.txt b/requirements.txt
index 7057c5a..115b096 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,8 +4,6 @@
 # is in a release
 git+https://github.com/sigmavirus24/github3.py.git@develop#egg=Github3.py
 PyYAML>=3.1.0
-Paste
-WebOb>=1.2.3
 paramiko>=2.0.1
 GitPython>=2.1.8
 python-daemon>=2.0.4,<2.1.0
diff --git a/tests/base.py b/tests/base.py
index 2e0ea1c..013a6e1 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1350,7 +1350,7 @@
                 host['host_vars']['ansible_connection'] = 'local'
 
         hosts.append(dict(
-            name=['localhost'],
+            name='localhost',
             host_vars=dict(ansible_connection='local'),
             host_keys=[]))
         return hosts
@@ -1857,7 +1857,7 @@
         # from libraries that zuul depends on such as gear.
         log_defaults_from_env = os.environ.get(
             'OS_LOG_DEFAULTS',
-            'git.cmd=INFO,kazoo.client=WARNING,gear=INFO,paste=INFO')
+            'git.cmd=INFO,kazoo.client=WARNING,gear=INFO')
 
         if log_defaults_from_env:
             for default in log_defaults_from_env.split(','):
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
new file mode 100644
index 0000000..36e0eca
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
@@ -0,0 +1,26 @@
+- hosts: host1
+  tasks:
+    - name: Assert hostvar is present.
+      assert:
+        that:
+          - allvar == 'all'
+          - hostvar == 'host'
+          - groupvar is not defined
+
+- hosts: host2
+  tasks:
+    - name: Assert groupvar is present.
+      assert:
+        that:
+          - allvar == 'all'
+          - hostvar is not defined
+          - groupvar == 'group'
+
+- hosts: host3
+  tasks:
+    - name: Assert groupvar is present.
+      assert:
+        that:
+          - allvar == 'all'
+          - hostvar is not defined
+          - groupvar == 'group'
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index d0a8f7b..13a19da 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -99,6 +99,12 @@
 
 - job:
     parent: python27
+    name: post-timeout
+    post-run: playbooks/timeout.yaml
+    post-timeout: 1
+
+- job:
+    parent: python27
     name: check-vars
     run: playbooks/check-vars.yaml
     nodeset:
@@ -115,6 +121,32 @@
 
 - job:
     parent: python27
+    name: check-hostvars
+    run: playbooks/check-hostvars.yaml
+    nodeset:
+      nodes:
+        - name: host1
+          label: ubuntu-xenial
+        - name: host2
+          label: ubuntu-xenial
+        - name: host3
+          label: ubuntu-xenial
+      groups:
+        - name: group1
+          nodes:
+            - host2
+            - host3
+    vars:
+      allvar: all
+    host_vars:
+      host1:
+        hostvar: host
+    group_vars:
+      group1:
+        groupvar: group
+
+- job:
+    parent: python27
     name: check-secret-names
     run: playbooks/check-secret-names.yaml
     nodeset:
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index 447f6cd..e332924 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -15,7 +15,9 @@
         - python27
         - faillocal
         - check-vars
+        - check-hostvars
         - check-secret-names
         - timeout
+        - post-timeout
         - hello-world
         - failpost
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index 46e1d99..c67eb55 100755
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -425,12 +425,14 @@
         node = {'name': 'fake-host',
                 'host_keys': ['fake-host-key'],
                 'interface_ip': 'localhost'}
-        keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+        keys = self.test_job.getHostList({'nodes': [node],
+                                          'host_vars': {}})[0]['host_keys']
         self.assertEqual(keys[0], 'localhost fake-host-key')
 
         # Test with custom connection_port set
         node['connection_port'] = 22022
-        keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+        keys = self.test_job.getHostList({'nodes': [node],
+                                          'host_vars': {}})[0]['host_keys']
         self.assertEqual(keys[0], '[localhost]:22022 fake-host-key')
 
 
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 5c586ca..6ec5232 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -23,6 +23,7 @@
 from zuul import configloader
 from zuul.lib import encryption
 from zuul.lib import yamlutil as yaml
+import zuul.lib.connections
 
 from tests.base import BaseTestCase, FIXTURE_DIR
 
@@ -36,6 +37,8 @@
 class TestJob(BaseTestCase):
     def setUp(self):
         super(TestJob, self).setUp()
+        self.connections = zuul.lib.connections.ConnectionRegistry()
+        self.addCleanup(self.connections.stop)
         self.connection = Dummy(connection_name='dummy_connection')
         self.source = Dummy(canonical_hostname='git.example.com',
                             connection=self.connection)
@@ -47,6 +50,9 @@
         self.pipeline = model.Pipeline('gate', self.layout)
         self.layout.addPipeline(self.pipeline)
         self.queue = model.ChangeQueue(self.pipeline)
+        self.pcontext = configloader.ParseContext(
+            self.connections, None, self.tenant, self.layout)
+        self.pcontext.setPipelines()
 
         private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
         with open(private_key_file, "rb") as f:
@@ -61,9 +67,7 @@
 
     @property
     def job(self):
-        tenant = model.Tenant('tenant')
-        layout = model.Layout(tenant)
-        job = configloader.JobParser.fromYaml(tenant, layout, {
+        job = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'job',
@@ -147,33 +151,27 @@
             job.applyVariant(bad_final)
 
     def test_job_inheritance_job_tree(self):
-        tenant = model.Tenant('tenant')
-        layout = model.Layout(tenant)
-
-        tpc = model.TenantProjectConfig(self.project)
-        tenant.addUntrustedProject(tpc)
-
-        pipeline = model.Pipeline('gate', layout)
-        layout.addPipeline(pipeline)
+        pipeline = model.Pipeline('gate', self.layout)
+        self.layout.addPipeline(pipeline)
         queue = model.ChangeQueue(pipeline)
 
-        base = configloader.JobParser.fromYaml(tenant, layout, {
+        base = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'base',
             'parent': None,
             'timeout': 30,
         })
-        layout.addJob(base)
-        python27 = configloader.JobParser.fromYaml(tenant, layout, {
+        self.layout.addJob(base)
+        python27 = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'python27',
             'parent': 'base',
             'timeout': 40,
         })
-        layout.addJob(python27)
-        python27diablo = configloader.JobParser.fromYaml(tenant, layout, {
+        self.layout.addJob(python27)
+        python27diablo = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'python27',
@@ -182,13 +180,9 @@
             ],
             'timeout': 50,
         })
-        layout.addJob(python27diablo)
+        self.layout.addJob(python27diablo)
 
-        project_template_parser = configloader.ProjectTemplateParser(
-            tenant, layout)
-        project_parser = configloader.ProjectParser(
-            tenant, layout, project_template_parser)
-        project_config = project_parser.fromYaml([{
+        project_config = self.pcontext.project_parser.fromYaml([{
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'project',
@@ -199,12 +193,12 @@
                 ]
             }
         }])
-        layout.addProjectConfig(project_config)
+        self.layout.addProjectConfig(project_config)
 
         change = model.Change(self.project)
         change.branch = 'master'
         item = queue.enqueueChange(change)
-        item.layout = layout
+        item.layout = self.layout
 
         self.assertTrue(base.changeMatches(change))
         self.assertTrue(python27.changeMatches(change))
@@ -218,7 +212,7 @@
 
         change.branch = 'stable/diablo'
         item = queue.enqueueChange(change)
-        item.layout = layout
+        item.layout = self.layout
 
         self.assertTrue(base.changeMatches(change))
         self.assertTrue(python27.changeMatches(change))
@@ -231,25 +225,19 @@
         self.assertEqual(job.timeout, 70)
 
     def test_inheritance_keeps_matchers(self):
-        tenant = model.Tenant('tenant')
-        layout = model.Layout(tenant)
-
-        pipeline = model.Pipeline('gate', layout)
-        layout.addPipeline(pipeline)
+        pipeline = model.Pipeline('gate', self.layout)
+        self.layout.addPipeline(pipeline)
         queue = model.ChangeQueue(pipeline)
-        project = model.Project('project', self.source)
-        tpc = model.TenantProjectConfig(project)
-        tenant.addUntrustedProject(tpc)
 
-        base = configloader.JobParser.fromYaml(tenant, layout, {
+        base = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'base',
             'parent': None,
             'timeout': 30,
         })
-        layout.addJob(base)
-        python27 = configloader.JobParser.fromYaml(tenant, layout, {
+        self.layout.addJob(base)
+        python27 = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'python27',
@@ -257,13 +245,9 @@
             'timeout': 40,
             'irrelevant-files': ['^ignored-file$'],
         })
-        layout.addJob(python27)
+        self.layout.addJob(python27)
 
-        project_template_parser = configloader.ProjectTemplateParser(
-            tenant, layout)
-        project_parser = configloader.ProjectParser(
-            tenant, layout, project_template_parser)
-        project_config = project_parser.fromYaml([{
+        project_config = self.pcontext.project_parser.fromYaml([{
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'project',
@@ -273,13 +257,13 @@
                 ]
             }
         }])
-        layout.addProjectConfig(project_config)
+        self.layout.addProjectConfig(project_config)
 
-        change = model.Change(project)
+        change = model.Change(self.project)
         change.branch = 'master'
         change.files = ['/COMMIT_MSG', 'ignored-file']
         item = queue.enqueueChange(change)
-        item.layout = layout
+        item.layout = self.layout
 
         self.assertTrue(base.changeMatches(change))
         self.assertFalse(python27.changeMatches(change))
@@ -288,28 +272,26 @@
         self.assertEqual([], item.getJobs())
 
     def test_job_source_project(self):
-        tenant = self.tenant
-        layout = self.layout
         base_project = model.Project('base_project', self.source)
         base_context = model.SourceContext(base_project, 'master',
                                            'test', True)
         tpc = model.TenantProjectConfig(base_project)
-        tenant.addUntrustedProject(tpc)
+        self.tenant.addUntrustedProject(tpc)
 
-        base = configloader.JobParser.fromYaml(tenant, layout, {
+        base = self.pcontext.job_parser.fromYaml({
             '_source_context': base_context,
             '_start_mark': self.start_mark,
             'parent': None,
             'name': 'base',
         })
-        layout.addJob(base)
+        self.layout.addJob(base)
 
         other_project = model.Project('other_project', self.source)
         other_context = model.SourceContext(other_project, 'master',
                                             'test', True)
         tpc = model.TenantProjectConfig(other_project)
-        tenant.addUntrustedProject(tpc)
-        base2 = configloader.JobParser.fromYaml(tenant, layout, {
+        self.tenant.addUntrustedProject(tpc)
+        base2 = self.pcontext.job_parser.fromYaml({
             '_source_context': other_context,
             '_start_mark': self.start_mark,
             'name': 'base',
@@ -318,11 +300,11 @@
                 Exception,
                 "Job base in other_project is not permitted "
                 "to shadow job base in base_project"):
-            layout.addJob(base2)
+            self.layout.addJob(base2)
 
     def test_job_pipeline_allow_untrusted_secrets(self):
         self.pipeline.post_review = False
-        job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+        job = self.pcontext.job_parser.fromYaml({
             '_source_context': self.context,
             '_start_mark': self.start_mark,
             'name': 'job',
@@ -332,11 +314,7 @@
 
         self.layout.addJob(job)
 
-        project_template_parser = configloader.ProjectTemplateParser(
-            self.tenant, self.layout)
-        project_parser = configloader.ProjectParser(
-            self.tenant, self.layout, project_template_parser)
-        project_config = project_parser.fromYaml(
+        project_config = self.pcontext.project_parser.fromYaml(
             [{
                 '_source_context': self.context,
                 '_start_mark': self.start_mark,
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 1338d20..f019ead 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -2033,6 +2033,8 @@
     tenant_config_file = 'config/ansible/main.yaml'
 
     def test_playbook(self):
+        # This test runs a bit long and needs extra time.
+        self.wait_timeout = 120
         # Keep the jobdir around so we can inspect contents if an
         # assert fails.
         self.executor_server.keep_jobdir = True
@@ -2048,6 +2050,12 @@
         build_timeout = self.getJobFromHistory('timeout')
         with self.jobLog(build_timeout):
             self.assertEqual(build_timeout.result, 'TIMED_OUT')
+            post_flag_path = os.path.join(self.test_root, build_timeout.uuid +
+                                          '.post.flag')
+            self.assertTrue(os.path.exists(post_flag_path))
+        build_post_timeout = self.getJobFromHistory('post-timeout')
+        with self.jobLog(build_post_timeout):
+            self.assertEqual(build_post_timeout.result, 'POST_FAILURE')
         build_faillocal = self.getJobFromHistory('faillocal')
         with self.jobLog(build_faillocal):
             self.assertEqual(build_faillocal.result, 'FAILURE')
diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py
index b5ebe9f..602209f 100644
--- a/tests/unit/test_web.py
+++ b/tests/unit/test_web.py
@@ -22,20 +22,30 @@
 import urllib
 import time
 import socket
-from unittest import skip
-
-import webob
 
 import zuul.web
 
 from tests.base import ZuulTestCase, FIXTURE_DIR
 
 
-class TestWeb(ZuulTestCase):
+class FakeConfig(object):
+
+    def __init__(self, config):
+        self.config = config or {}
+
+    def has_option(self, section, option):
+        return option in self.config.get(section, {})
+
+    def get(self, section, option):
+        return self.config.get(section, {}).get(option)
+
+
+class BaseTestWeb(ZuulTestCase):
     tenant_config_file = 'config/single-tenant/main.yaml'
+    config_ini_data = {}
 
     def setUp(self):
-        super(TestWeb, self).setUp()
+        super(BaseTestWeb, self).setUp()
         self.executor_server.hold_jobs_in_build = True
         A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
         A.addApproval('Code-Review', 2)
@@ -45,10 +55,13 @@
         self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
         self.waitUntilSettled()
 
+        self.zuul_ini_config = FakeConfig(self.config_ini_data)
         # Start the web server
         self.web = zuul.web.ZuulWeb(
             listen_address='127.0.0.1', listen_port=0,
-            gear_server='127.0.0.1', gear_port=self.gearman_server.port)
+            gear_server='127.0.0.1', gear_port=self.gearman_server.port,
+            info=zuul.model.WebInfo.fromConfig(self.zuul_ini_config)
+        )
         loop = asyncio.new_event_loop()
         loop.set_debug(True)
         ws_thread = threading.Thread(target=self.web.run, args=(loop,))
@@ -75,7 +88,10 @@
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
         self.waitUntilSettled()
-        super(TestWeb, self).tearDown()
+        super(BaseTestWeb, self).tearDown()
+
+
+class TestWeb(BaseTestWeb):
 
     def test_web_status(self):
         "Test that we can retrieve JSON status info"
@@ -89,7 +105,7 @@
         self.waitUntilSettled()
 
         req = urllib.request.Request(
-            "http://localhost:%s/tenant-one/status.json" % self.port)
+            "http://localhost:%s/tenant-one/status" % self.port)
         f = urllib.request.urlopen(req)
         headers = f.info()
         self.assertIn('Content-Length', headers)
@@ -184,7 +200,6 @@
             "http://localhost:%s/status/foo" % self.port)
         self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, req)
 
-    @skip("This is not supported by zuul-web")
     def test_web_find_change(self):
         # can we filter by change id
         req = urllib.request.Request(
@@ -213,24 +228,84 @@
         f = urllib.request.urlopen(req)
         self.assertEqual(f.read(), public_pem)
 
-    @skip("This may not apply to zuul-web")
-    def test_web_custom_handler(self):
-        def custom_handler(path, tenant_name, request):
-            return webob.Response(body='ok')
-
-        self.webapp.register_path('/custom', custom_handler)
-        req = urllib.request.Request(
-            "http://localhost:%s/custom" % self.port)
-        f = urllib.request.urlopen(req)
-        self.assertEqual(b'ok', f.read())
-
-        self.webapp.unregister_path('/custom')
-        self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, req)
-
-    @skip("This returns a 500")
     def test_web_404_on_unknown_tenant(self):
         req = urllib.request.Request(
-            "http://localhost:{}/non-tenant/status.json".format(self.port))
+            "http://localhost:{}/non-tenant/status".format(self.port))
         e = self.assertRaises(
             urllib.error.HTTPError, urllib.request.urlopen, req)
         self.assertEqual(404, e.code)
+
+
+class TestInfo(BaseTestWeb):
+
+    def setUp(self):
+        super(TestInfo, self).setUp()
+        web_config = self.config_ini_data.get('web', {})
+        self.websocket_url = web_config.get('websocket_url')
+        self.stats_url = web_config.get('stats_url')
+        statsd_config = self.config_ini_data.get('statsd', {})
+        self.stats_prefix = statsd_config.get('prefix')
+
+    def test_info(self):
+        req = urllib.request.Request(
+            "http://localhost:%s/info" % self.port)
+        f = urllib.request.urlopen(req)
+        info = json.loads(f.read().decode('utf8'))
+        self.assertEqual(
+            info, {
+                "info": {
+                    "endpoint": "http://localhost:%s" % self.port,
+                    "capabilities": {
+                        "job_history": False
+                    },
+                    "stats": {
+                        "url": self.stats_url,
+                        "prefix": self.stats_prefix,
+                        "type": "graphite",
+                    },
+                    "websocket_url": self.websocket_url,
+                }
+            })
+
+    def test_tenant_info(self):
+        req = urllib.request.Request(
+            "http://localhost:%s/tenant-one/info" % self.port)
+        f = urllib.request.urlopen(req)
+        info = json.loads(f.read().decode('utf8'))
+        self.assertEqual(
+            info, {
+                "info": {
+                    "endpoint": "http://localhost:%s" % self.port,
+                    "tenant": "tenant-one",
+                    "capabilities": {
+                        "job_history": False
+                    },
+                    "stats": {
+                        "url": self.stats_url,
+                        "prefix": self.stats_prefix,
+                        "type": "graphite",
+                    },
+                    "websocket_url": self.websocket_url,
+                }
+            })
+
+
+class TestWebSocketInfo(TestInfo):
+
+    config_ini_data = {
+        'web': {
+            'websocket_url': 'wss://ws.example.com'
+        }
+    }
+
+
+class TestGraphiteUrl(TestInfo):
+
+    config_ini_data = {
+        'statsd': {
+            'prefix': 'example'
+        },
+        'web': {
+            'stats_url': 'https://graphite.example.com',
+        }
+    }
diff --git a/tools/zuul-changes.py b/tools/zuul-changes.py
index d258354..cdedf51 100755
--- a/tools/zuul-changes.py
+++ b/tools/zuul-changes.py
@@ -24,7 +24,7 @@
 parser.add_argument('pipeline', help='The name of the Zuul pipeline')
 options = parser.parse_args()
 
-data = urllib2.urlopen('%s/status.json' % options.url).read()
+data = urllib2.urlopen('%s/status' % options.url).read()
 data = json.loads(data)
 
 for pipeline in data['pipelines']:
diff --git a/zuul/cmd/web.py b/zuul/cmd/web.py
index abdb1cb..8b0e3ee 100755
--- a/zuul/cmd/web.py
+++ b/zuul/cmd/web.py
@@ -20,6 +20,7 @@
 import threading
 
 import zuul.cmd
+import zuul.model
 import zuul.web
 
 from zuul.lib.config import get_default
@@ -33,8 +34,11 @@
         self.web.stop()
 
     def _run(self):
+        info = zuul.model.WebInfo.fromConfig(self.config)
+
         params = dict()
 
+        params['info'] = info
         params['listen_address'] = get_default(self.config,
                                                'web', 'listen_address',
                                                '127.0.0.1')
diff --git a/zuul/configloader.py b/zuul/configloader.py
index f0f78b7..3511f96 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -365,11 +365,12 @@
 
     schema = vs.Schema(pragma)
 
-    def __init__(self):
+    def __init__(self, pcontext):
         self.log = logging.getLogger("zuul.PragmaParser")
+        self.pcontext = pcontext
 
     def fromYaml(self, conf):
-        with configuration_exceptions('project-template', conf):
+        with configuration_exceptions('pragma', conf):
             self.schema(conf)
 
         bm = conf.get('implied-branch-matchers')
@@ -384,8 +385,13 @@
 
 
 class NodeSetParser(object):
-    @staticmethod
-    def getSchema(anonymous=False):
+    def __init__(self, pcontext):
+        self.log = logging.getLogger("zuul.NodeSetParser")
+        self.pcontext = pcontext
+        self.schema = self.getSchema(False)
+        self.anon_schema = self.getSchema(True)
+
+    def getSchema(self, anonymous=False):
         node = {vs.Required('name'): to_list(str),
                 vs.Required('label'): str,
                 }
@@ -404,9 +410,11 @@
             nodeset[vs.Required('name')] = str
         return vs.Schema(nodeset)
 
-    @staticmethod
-    def fromYaml(conf, anonymous=False):
-        NodeSetParser.getSchema(anonymous)(conf)
+    def fromYaml(self, conf, anonymous=False):
+        if anonymous:
+            self.anon_schema(conf)
+        else:
+            self.schema(conf)
         ns = model.NodeSet(conf.get('name'), conf.get('_source_context'))
         node_names = set()
         group_names = set()
@@ -432,8 +440,12 @@
 
 
 class SecretParser(object):
-    @staticmethod
-    def getSchema():
+    def __init__(self, pcontext):
+        self.log = logging.getLogger("zuul.SecretParser")
+        self.pcontext = pcontext
+        self.schema = self.getSchema()
+
+    def getSchema(self):
         data = {str: vs.Any(str, EncryptedPKCS1_OAEP)}
 
         secret = {vs.Required('name'): str,
@@ -444,10 +456,9 @@
 
         return vs.Schema(secret)
 
-    @staticmethod
-    def fromYaml(layout, conf):
+    def fromYaml(self, conf):
         with configuration_exceptions('secret', conf):
-            SecretParser.getSchema()(conf)
+            self.schema(conf)
         s = model.Secret(conf['name'], conf['_source_context'])
         s.secret_data = conf['data']
         return s
@@ -491,6 +502,7 @@
                       # validation happens in NodeSetParser
                       'nodeset': vs.Any(dict, str),
                       'timeout': int,
+                      'post-timeout': int,
                       'attempts': int,
                       'pre-run': to_list(str),
                       'post-run': to_list(str),
@@ -500,6 +512,8 @@
                       'roles': to_list(role),
                       'required-projects': to_list(vs.Any(job_project, str)),
                       'vars': dict,
+                      'host_vars': {str: dict},
+                      'group_vars': {str: dict},
                       'dependencies': to_list(str),
                       'allowed-projects': to_list(str),
                       'override-branch': str,
@@ -518,6 +532,7 @@
         'abstract',
         'protected',
         'timeout',
+        'post-timeout',
         'workspace',
         'voting',
         'hold-following-changes',
@@ -531,8 +546,11 @@
         'override-checkout',
     ]
 
-    @staticmethod
-    def _getImpliedBranches(tenant, job):
+    def __init__(self, pcontext):
+        self.log = logging.getLogger("zuul.JobParser")
+        self.pcontext = pcontext
+
+    def _getImpliedBranches(self, job):
         # If the user has set a pragma directive for this, use the
         # value (if unset, the value is None).
         if job.source_context.implied_branch_matchers is True:
@@ -549,7 +567,8 @@
 
         # If this project only has one branch, don't create implied
         # branch matchers.  This way central job repos can work.
-        branches = tenant.getProjectBranches(job.source_context.project)
+        branches = self.pcontext.tenant.getProjectBranches(
+            job.source_context.project)
         if len(branches) == 1:
             return None
 
@@ -557,12 +576,11 @@
             return job.source_context.implied_branches
         return [job.source_context.branch]
 
-    @staticmethod
-    def fromYaml(tenant, layout, conf, project_pipeline=False,
-                 name=None, validate=True):
+    def fromYaml(self, conf, project_pipeline=False, name=None,
+                 validate=True):
         if validate:
             with configuration_exceptions('job', conf):
-                JobParser.schema(conf)
+                self.schema(conf)
 
         if name is None:
             name = conf['name']
@@ -596,14 +614,16 @@
         for secret_config in as_list(conf.get('secrets', [])):
             if isinstance(secret_config, str):
                 secret_name = secret_config
-                secret = layout.secrets.get(secret_name)
+                secret = self.pcontext.layout.secrets.get(secret_name)
             else:
                 secret_name = secret_config['name']
-                secret = layout.secrets.get(secret_config['secret'])
+                secret = self.pcontext.layout.secrets.get(
+                    secret_config['secret'])
             if secret is None:
                 raise SecretNotFoundError(secret_name)
-            if secret_name == 'zuul':
-                raise Exception("Secrets named 'zuul' are not allowed.")
+            if secret_name == 'zuul' or secret_name == 'nodepool':
+                raise Exception("Secrets named 'zuul' or 'nodepool' "
+                                "are not allowed.")
             if not secret.source_context.isSameProject(job.source_context):
                 raise Exception(
                     "Unable to use secret %s.  Secrets must be "
@@ -623,9 +643,15 @@
         if secrets and not conf['_source_context'].trusted:
             job.post_review = True
 
-        if conf.get('timeout') and tenant.max_job_timeout != -1 and \
-           int(conf['timeout']) > tenant.max_job_timeout:
-            raise MaxTimeoutError(job, tenant)
+        if (conf.get('timeout') and
+            self.pcontext.tenant.max_job_timeout != -1 and
+            int(conf['timeout']) > self.pcontext.tenant.max_job_timeout):
+            raise MaxTimeoutError(job, self.pcontext.tenant)
+
+        if (conf.get('post-timeout') and
+            self.pcontext.tenant.max_job_timeout != -1 and
+            int(conf['post-timeout']) > self.pcontext.tenant.max_job_timeout):
+            raise MaxTimeoutError(job, self.pcontext.tenant)
 
         if 'post-review' in conf:
             if conf['post-review']:
@@ -640,13 +666,13 @@
         if 'roles' in conf:
             for role in conf.get('roles', []):
                 if 'zuul' in role:
-                    r = JobParser._makeZuulRole(tenant, job, role)
+                    r = self._makeZuulRole(job, role)
                     if r:
                         roles.append(r)
         # A job's repo should be an implicit role source for that job,
         # but not in a project-pipeline variant.
         if not project_pipeline:
-            r = JobParser._makeImplicitRole(job)
+            r = self._makeImplicitRole(job)
             roles.insert(0, r)
         job.addRoles(roles)
 
@@ -668,7 +694,7 @@
                                         job.roles, secrets)
             job.run = (run,)
 
-        for k in JobParser.simple_attributes:
+        for k in self.simple_attributes:
             a = k.replace('-', '_')
             if k in conf:
                 setattr(job, a, conf[k])
@@ -676,14 +702,15 @@
             conf_nodeset = conf['nodeset']
             if isinstance(conf_nodeset, str):
                 # This references an existing named nodeset in the layout.
-                ns = layout.nodesets.get(conf_nodeset)
+                ns = self.pcontext.layout.nodesets.get(conf_nodeset)
                 if ns is None:
                     raise NodesetNotFoundError(conf_nodeset)
             else:
-                ns = NodeSetParser.fromYaml(conf_nodeset, anonymous=True)
-            if tenant.max_nodes_per_job != -1 and \
-               len(ns) > tenant.max_nodes_per_job:
-                raise MaxNodeError(job, tenant)
+                ns = self.pcontext.nodeset_parser.fromYaml(
+                    conf_nodeset, anonymous=True)
+            if self.pcontext.tenant.max_nodes_per_job != -1 and \
+               len(ns) > self.pcontext.tenant.max_nodes_per_job:
+                raise MaxNodeError(job, self.pcontext.tenant)
             job.nodeset = ns
 
         if 'required-projects' in conf:
@@ -699,7 +726,8 @@
                     project_name = project
                     project_override_branch = None
                     project_override_checkout = None
-                (trusted, project) = tenant.getProject(project_name)
+                (trusted, project) = self.pcontext.tenant.getProject(
+                    project_name)
                 if project is None:
                     raise Exception("Unknown project %s" % (project_name,))
                 job_project = model.JobProject(project.canonical_name,
@@ -716,15 +744,30 @@
 
         variables = conf.get('vars', None)
         if variables:
-            if 'zuul' in variables:
-                raise Exception("Variables named 'zuul' are not allowed.")
+            if 'zuul' in variables or 'nodepool' in variables:
+                raise Exception("Variables named 'zuul' or 'nodepool' "
+                                "are not allowed.")
             job.variables = variables
+        host_variables = conf.get('host_vars', None)
+        if host_variables:
+            for host, hvars in host_variables.items():
+                if 'zuul' in hvars or 'nodepool' in hvars:
+                    raise Exception("Variables named 'zuul' or 'nodepool' "
+                                    "are not allowed.")
+            job.host_variables = host_variables
+        group_variables = conf.get('group_vars', None)
+        if group_variables:
+            for group, gvars in group_variables.items():
+                if 'zuul' in group_variables or 'nodepool' in gvars:
+                    raise Exception("Variables named 'zuul' or 'nodepool' "
+                                    "are not allowed.")
+            job.group_variables = group_variables
 
         allowed_projects = conf.get('allowed-projects', None)
         if allowed_projects:
             allowed = []
             for p in as_list(allowed_projects):
-                (trusted, project) = tenant.getProject(p)
+                (trusted, project) = self.pcontext.tenant.getProject(p)
                 if project is None:
                     raise Exception("Unknown project %s" % (p,))
                 allowed.append(project.name)
@@ -732,7 +775,7 @@
 
         branches = None
         if ('branches' not in conf):
-            branches = JobParser._getImpliedBranches(tenant, job)
+            branches = self._getImpliedBranches(job)
         if (not branches) and ('branches' in conf):
             branches = as_list(conf['branches'])
         if branches:
@@ -750,11 +793,10 @@
                 matchers)
         return job
 
-    @staticmethod
-    def _makeZuulRole(tenant, job, role):
+    def _makeZuulRole(self, job, role):
         name = role['zuul'].split('/')[-1]
 
-        (trusted, project) = tenant.getProject(role['zuul'])
+        (trusted, project) = self.pcontext.tenant.getProject(role['zuul'])
         if project is None:
             return None
 
@@ -762,8 +804,7 @@
                               project.connection_name,
                               project.name)
 
-    @staticmethod
-    def _makeImplicitRole(job):
+    def _makeImplicitRole(self, job):
         project = job.source_context.project
         name = project.name.split('/')[-1]
         name = JobParser.ANSIBLE_ROLE_RE.sub('', name)
@@ -774,10 +815,9 @@
 
 
 class ProjectTemplateParser(object):
-    def __init__(self, tenant, layout):
+    def __init__(self, pcontext):
         self.log = logging.getLogger("zuul.ProjectTemplateParser")
-        self.tenant = tenant
-        self.layout = layout
+        self.pcontext = pcontext
         self.schema = self.getSchema()
 
     def getSchema(self):
@@ -799,7 +839,7 @@
             'jobs': job_list,
         }
 
-        for p in self.layout.pipelines.values():
+        for p in self.pcontext.layout.pipelines.values():
             project_template[p.name] = pipeline_contents
         return vs.Schema(project_template)
 
@@ -810,7 +850,7 @@
         source_context = conf['_source_context']
         project_template = model.ProjectConfig(conf['name'], source_context)
         start_mark = conf['_start_mark']
-        for pipeline in self.layout.pipelines.values():
+        for pipeline in self.pcontext.layout.pipelines.values():
             conf_pipeline = conf.get(pipeline.name)
             if not conf_pipeline:
                 continue
@@ -839,19 +879,17 @@
             # validate that the job is existing
             with configuration_exceptions('project or project-template',
                                           attrs):
-                self.layout.getJob(jobname)
+                self.pcontext.layout.getJob(jobname)
 
-            job_list.addJob(JobParser.fromYaml(self.tenant, self.layout,
-                                               attrs, project_pipeline=True,
-                                               name=jobname, validate=False))
+            job_list.addJob(self.pcontext.job_parser.fromYaml(
+                attrs, project_pipeline=True,
+                name=jobname, validate=False))
 
 
 class ProjectParser(object):
-    def __init__(self, tenant, layout, project_template_parser):
+    def __init__(self, pcontext):
         self.log = logging.getLogger("zuul.ProjectParser")
-        self.tenant = tenant
-        self.layout = layout
-        self.project_template_parser = project_template_parser
+        self.pcontext = pcontext
         self.schema = self.getSchema()
 
     def getSchema(self):
@@ -874,7 +912,7 @@
             'jobs': job_list
         }
 
-        for p in self.layout.pipelines.values():
+        for p in self.pcontext.layout.pipelines.values():
             project[p.name] = pipeline_contents
         return vs.Schema(project)
 
@@ -885,7 +923,7 @@
 
         with configuration_exceptions('project', conf_list[0]):
             project_name = conf_list[0]['name']
-            (trusted, project) = self.tenant.getProject(project_name)
+            (trusted, project) = self.pcontext.tenant.getProject(project_name)
             if project is None:
                 raise ProjectNotFoundError(project_name)
             project_config = model.ProjectConfig(project.canonical_name)
@@ -903,16 +941,16 @@
                 # parsing the definition as a template, then applying
                 # all of the templates, including the newly parsed
                 # one, in order.
-                project_template = self.project_template_parser.fromYaml(
-                    conf, validate=False)
+                project_template = self.pcontext.project_template_parser.\
+                    fromYaml(conf, validate=False)
                 # If this project definition is in a place where it
                 # should get implied branch matchers, set it.
                 if (not conf['_source_context'].trusted):
                     implied_branch = conf['_source_context'].branch
                 for name in conf_templates:
-                    if name not in self.layout.project_templates:
+                    if name not in self.pcontext.layout.project_templates:
                         raise TemplateNotFoundError(name)
-                configs.extend([(self.layout.project_templates[name],
+                configs.extend([(self.pcontext.layout.project_templates[name],
                                  implied_branch)
                                 for name in conf_templates])
                 configs.append((project_template, implied_branch))
@@ -930,7 +968,7 @@
             project_config.merge_mode = model.MERGER_MAP['merge-resolve']
         if project_config.default_branch is None:
             project_config.default_branch = 'master'
-        for pipeline in self.layout.pipelines.values():
+        for pipeline in self.pcontext.layout.pipelines.values():
             project_pipeline = model.ProjectPipelineConfig()
             queue_name = None
             debug = False
@@ -958,8 +996,6 @@
 
 
 class PipelineParser(object):
-    log = logging.getLogger("zuul.PipelineParser")
-
     # A set of reporter configuration keys to action mapping
     reporter_actions = {
         'start': 'start_actions',
@@ -969,8 +1005,12 @@
         'disabled': 'disabled_actions',
     }
 
-    @staticmethod
-    def getDriverSchema(dtype, connections):
+    def __init__(self, pcontext):
+        self.log = logging.getLogger("zuul.PipelineParser")
+        self.pcontext = pcontext
+        self.schema = self.getSchema()
+
+    def getDriverSchema(self, dtype):
         methods = {
             'trigger': 'getTriggerSchema',
             'reporter': 'getReporterSchema',
@@ -980,15 +1020,15 @@
 
         schema = {}
         # Add the configured connections as available layout options
-        for connection_name, connection in connections.connections.items():
+        for connection_name, connection in \
+            self.pcontext.connections.connections.items():
             method = getattr(connection.driver, methods[dtype], None)
             if method:
                 schema[connection_name] = to_list(method())
 
         return schema
 
-    @staticmethod
-    def getSchema(layout, connections):
+    def getSchema(self):
         manager = vs.Any('independent',
                          'dependent')
 
@@ -1021,23 +1061,18 @@
                     '_source_context': model.SourceContext,
                     '_start_mark': ZuulMark,
                     }
-        pipeline['require'] = PipelineParser.getDriverSchema('require',
-                                                             connections)
-        pipeline['reject'] = PipelineParser.getDriverSchema('reject',
-                                                            connections)
-        pipeline['trigger'] = vs.Required(
-            PipelineParser.getDriverSchema('trigger', connections))
+        pipeline['require'] = self.getDriverSchema('require')
+        pipeline['reject'] = self.getDriverSchema('reject')
+        pipeline['trigger'] = vs.Required(self.getDriverSchema('trigger'))
         for action in ['start', 'success', 'failure', 'merge-failure',
                        'disabled']:
-            pipeline[action] = PipelineParser.getDriverSchema('reporter',
-                                                              connections)
+            pipeline[action] = self.getDriverSchema('reporter')
         return vs.Schema(pipeline)
 
-    @staticmethod
-    def fromYaml(layout, connections, scheduler, conf):
+    def fromYaml(self, conf):
         with configuration_exceptions('pipeline', conf):
-            PipelineParser.getSchema(layout, connections)(conf)
-        pipeline = model.Pipeline(conf['name'], layout)
+            self.schema(conf)
+        pipeline = model.Pipeline(conf['name'], self.pcontext.layout)
         pipeline.description = conf.get('description')
 
         precedence = model.PRECEDENCE_MAP[conf.get('precedence')]
@@ -1062,13 +1097,13 @@
         pipeline.post_review = conf.get(
             'post-review', False)
 
-        for conf_key, action in PipelineParser.reporter_actions.items():
+        for conf_key, action in self.reporter_actions.items():
             reporter_set = []
             if conf.get(conf_key):
                 for reporter_name, params \
                     in conf.get(conf_key).items():
-                    reporter = connections.getReporter(reporter_name,
-                                                       params)
+                    reporter = self.pcontext.connections.getReporter(
+                        reporter_name, params)
                     reporter.setAction(conf_key)
                     reporter_set.append(reporter)
             setattr(pipeline, action, reporter_set)
@@ -1094,26 +1129,27 @@
         manager_name = conf['manager']
         if manager_name == 'dependent':
             manager = zuul.manager.dependent.DependentPipelineManager(
-                scheduler, pipeline)
+                self.pcontext.scheduler, pipeline)
         elif manager_name == 'independent':
             manager = zuul.manager.independent.IndependentPipelineManager(
-                scheduler, pipeline)
+                self.pcontext.scheduler, pipeline)
 
         pipeline.setManager(manager)
-        layout.pipelines[conf['name']] = pipeline
+        self.pcontext.layout.pipelines[conf['name']] = pipeline
 
         for source_name, require_config in conf.get('require', {}).items():
-            source = connections.getSource(source_name)
+            source = self.pcontext.connections.getSource(source_name)
             manager.ref_filters.extend(
                 source.getRequireFilters(require_config))
 
         for source_name, reject_config in conf.get('reject', {}).items():
-            source = connections.getSource(source_name)
+            source = self.pcontext.connections.getSource(source_name)
             manager.ref_filters.extend(
                 source.getRejectFilters(reject_config))
 
         for trigger_name, trigger_config in conf.get('trigger').items():
-            trigger = connections.getTrigger(trigger_name, trigger_config)
+            trigger = self.pcontext.connections.getTrigger(
+                trigger_name, trigger_config)
             pipeline.triggers.append(trigger)
             manager.event_filters.extend(
                 trigger.getEventFilters(conf['trigger'][trigger_name]))
@@ -1122,8 +1158,12 @@
 
 
 class SemaphoreParser(object):
-    @staticmethod
-    def getSchema():
+    def __init__(self, pcontext):
+        self.log = logging.getLogger("zuul.SemaphoreParser")
+        self.pcontext = pcontext
+        self.schema = self.getSchema()
+
+    def getSchema(self):
         semaphore = {vs.Required('name'): str,
                      'max': int,
                      '_source_context': model.SourceContext,
@@ -1132,16 +1172,43 @@
 
         return vs.Schema(semaphore)
 
-    @staticmethod
-    def fromYaml(conf):
-        SemaphoreParser.getSchema()(conf)
+    def fromYaml(self, conf):
+        self.schema(conf)
         semaphore = model.Semaphore(conf['name'], conf.get('max', 1))
         semaphore.source_context = conf.get('_source_context')
         return semaphore
 
 
+class ParseContext(object):
+    """Hold information about a particular run of the parser"""
+
+    def __init__(self, connections, scheduler, tenant, layout):
+        self.connections = connections
+        self.scheduler = scheduler
+        self.tenant = tenant
+        self.layout = layout
+        self.pragma_parser = PragmaParser(self)
+        self.pipeline_parser = PipelineParser(self)
+        self.nodeset_parser = NodeSetParser(self)
+        self.secret_parser = SecretParser(self)
+        self.job_parser = JobParser(self)
+        self.semaphore_parser = SemaphoreParser(self)
+        self.project_template_parser = None
+        self.project_parser = None
+
+    def setPipelines(self):
+        # Call after pipelines are fixed in the layout to construct
+        # the project parser, which relies on them.
+        self.project_template_parser = ProjectTemplateParser(self)
+        self.project_parser = ProjectParser(self)
+
+
 class TenantParser(object):
-    log = logging.getLogger("zuul.TenantParser")
+    def __init__(self, connections, scheduler, merger):
+        self.log = logging.getLogger("zuul.TenantParser")
+        self.connections = connections
+        self.scheduler = scheduler
+        self.merger = merger
 
     classes = vs.Any('pipeline', 'job', 'semaphore', 'project',
                      'project-template', 'nodeset', 'secret')
@@ -1168,36 +1235,31 @@
         'untrusted-projects': to_list(project_or_group),
     })
 
-    @staticmethod
-    def validateTenantSources(connections):
+    def validateTenantSources(self):
         def v(value, path=[]):
             if isinstance(value, dict):
                 for k, val in value.items():
-                    connections.getSource(k)
-                    TenantParser.validateTenantSource(val, path + [k])
+                    self.connections.getSource(k)
+                    self.validateTenantSource(val, path + [k])
             else:
                 raise vs.Invalid("Invalid tenant source", path)
         return v
 
-    @staticmethod
-    def validateTenantSource(value, path=[]):
-        TenantParser.tenant_source(value)
+    def validateTenantSource(self, value, path=[]):
+        self.tenant_source(value)
 
-    @staticmethod
-    def getSchema(connections=None):
+    def getSchema(self):
         tenant = {vs.Required('name'): str,
                   'max-nodes-per-job': int,
                   'max-job-timeout': int,
-                  'source': TenantParser.validateTenantSources(connections),
+                  'source': self.validateTenantSources(),
                   'exclude-unprotected-branches': bool,
                   'default-parent': str,
                   }
         return vs.Schema(tenant)
 
-    @staticmethod
-    def fromYaml(base, project_key_dir, connections, scheduler, merger, conf,
-                 old_tenant):
-        TenantParser.getSchema(connections)(conf)
+    def fromYaml(self, base, project_key_dir, conf, old_tenant):
+        self.getSchema()(conf)
         tenant = model.Tenant(conf['name'])
         if conf.get('max-nodes-per-job') is not None:
             tenant.max_nodes_per_job = conf['max-nodes-per-job']
@@ -1212,48 +1274,41 @@
         unparsed_config = model.UnparsedTenantConfig()
         # tpcs is TenantProjectConfigs
         config_tpcs, untrusted_tpcs = \
-            TenantParser._loadTenantProjects(
-                project_key_dir, connections, conf)
+            self._loadTenantProjects(project_key_dir, conf)
         for tpc in config_tpcs:
             tenant.addConfigProject(tpc)
         for tpc in untrusted_tpcs:
             tenant.addUntrustedProject(tpc)
 
         for tpc in config_tpcs + untrusted_tpcs:
-            TenantParser._getProjectBranches(tenant, tpc, old_tenant)
-            TenantParser._resolveShadowProjects(tenant, tpc)
+            self._getProjectBranches(tenant, tpc, old_tenant)
+            self._resolveShadowProjects(tenant, tpc)
 
         if old_tenant:
             cached = True
         else:
             cached = False
         tenant.config_projects_config, tenant.untrusted_projects_config = \
-            TenantParser._loadTenantInRepoLayouts(merger, connections,
-                                                  tenant.config_projects,
-                                                  tenant.untrusted_projects,
-                                                  cached, tenant)
-        unparsed_config.extend(tenant.config_projects_config, tenant)
-        unparsed_config.extend(tenant.untrusted_projects_config, tenant)
-        tenant.layout = TenantParser._parseLayout(base, tenant,
-                                                  unparsed_config,
-                                                  scheduler,
-                                                  connections)
+            self._loadTenantInRepoLayouts(tenant.config_projects,
+                                          tenant.untrusted_projects,
+                                          cached, tenant)
+        unparsed_config.extend(tenant.config_projects_config)
+        unparsed_config.extend(tenant.untrusted_projects_config)
+        tenant.layout = self._parseLayout(base, tenant, unparsed_config)
         return tenant
 
-    @staticmethod
-    def _resolveShadowProjects(tenant, tpc):
+    def _resolveShadowProjects(self, tenant, tpc):
         shadow_projects = []
         for sp in tpc.shadow_projects:
             shadow_projects.append(tenant.getProject(sp)[1])
         tpc.shadow_projects = frozenset(shadow_projects)
 
-    @staticmethod
-    def _getProjectBranches(tenant, tpc, old_tenant):
+    def _getProjectBranches(self, tenant, tpc, old_tenant):
         # If we're performing a tenant reconfiguration, we will have
         # an old_tenant object, however, we may be doing so because of
         # a branch creation event, so if we don't have any cached
         # data, query the branches again as well.
-        if old_tenant and tpc.project.unparsed_config:
+        if old_tenant and tpc.project.unparsed_branch_config:
             branches = old_tenant.getProjectBranches(tpc.project)[:]
         else:
             branches = sorted(tpc.project.source.getProjectBranches(
@@ -1263,17 +1318,15 @@
             branches = ['master'] + branches
         tpc.branches = branches
 
-    @staticmethod
-    def _loadProjectKeys(project_key_dir, connection_name, project):
+    def _loadProjectKeys(self, project_key_dir, connection_name, project):
         project.private_key_file = (
             os.path.join(project_key_dir, connection_name,
                          project.name + '.pem'))
 
-        TenantParser._generateKeys(project)
-        TenantParser._loadKeys(project)
+        self._generateKeys(project)
+        self._loadKeys(project)
 
-    @staticmethod
-    def _generateKeys(project):
+    def _generateKeys(self, project):
         if os.path.isfile(project.private_key_file):
             return
 
@@ -1281,7 +1334,7 @@
         if not os.path.isdir(key_dir):
             os.makedirs(key_dir, 0o700)
 
-        TenantParser.log.info(
+        self.log.info(
             "Generating RSA keypair for project %s" % (project.name,)
         )
         private_key, public_key = encryption.generate_rsa_keypair()
@@ -1289,7 +1342,7 @@
 
         # Dump keys to filesystem.  We only save the private key
         # because the public key can be constructed from it.
-        TenantParser.log.info(
+        self.log.info(
             "Saving RSA keypair for project %s to %s" % (
                 project.name, project.private_key_file)
         )
@@ -1344,14 +1397,12 @@
 
         return tenant_project_config
 
-    @staticmethod
-    def _getProjects(source, conf, current_include):
+    def _getProjects(self, source, conf, current_include):
         # Return a project object whether conf is a dict or a str
         projects = []
         if isinstance(conf, str):
             # A simple project name string
-            projects.append(TenantParser._getProject(
-                source, conf, current_include))
+            projects.append(self._getProject(source, conf, current_include))
         elif len(conf.keys()) > 1 and 'projects' in conf:
             # This is a project group
             if 'include' in conf:
@@ -1362,19 +1413,18 @@
                 exclude = set(as_list(conf['exclude']))
                 current_include = current_include - exclude
             for project in conf['projects']:
-                sub_projects = TenantParser._getProjects(
+                sub_projects = self._getProjects(
                     source, project, current_include)
                 projects.extend(sub_projects)
         elif len(conf.keys()) == 1:
             # A project with overrides
-            projects.append(TenantParser._getProject(
+            projects.append(self._getProject(
                 source, conf, current_include))
         else:
             raise Exception("Unable to parse project %s", conf)
         return projects
 
-    @staticmethod
-    def _loadTenantProjects(project_key_dir, connections, conf_tenant):
+    def _loadTenantProjects(self, project_key_dir, conf_tenant):
         config_projects = []
         untrusted_projects = []
 
@@ -1382,38 +1432,32 @@
                                      'secret', 'project-template', 'nodeset'])
 
         for source_name, conf_source in conf_tenant.get('source', {}).items():
-            source = connections.getSource(source_name)
+            source = self.connections.getSource(source_name)
 
             current_include = default_include
             for conf_repo in conf_source.get('config-projects', []):
                 # tpcs = TenantProjectConfigs
-                tpcs = TenantParser._getProjects(source, conf_repo,
-                                                 current_include)
+                tpcs = self._getProjects(source, conf_repo, current_include)
                 for tpc in tpcs:
-                    TenantParser._loadProjectKeys(
+                    self._loadProjectKeys(
                         project_key_dir, source_name, tpc.project)
                     config_projects.append(tpc)
 
             current_include = frozenset(default_include - set(['pipeline']))
             for conf_repo in conf_source.get('untrusted-projects', []):
-                tpcs = TenantParser._getProjects(source, conf_repo,
-                                                 current_include)
+                tpcs = self._getProjects(source, conf_repo,
+                                         current_include)
                 for tpc in tpcs:
-                    TenantParser._loadProjectKeys(
+                    self._loadProjectKeys(
                         project_key_dir, source_name, tpc.project)
                     untrusted_projects.append(tpc)
 
         return config_projects, untrusted_projects
 
-    @staticmethod
-    def _loadTenantInRepoLayouts(merger, connections, config_projects,
-                                 untrusted_projects, cached, tenant):
+    def _loadTenantInRepoLayouts(self, config_projects, untrusted_projects,
+                                 cached, tenant):
         config_projects_config = model.UnparsedTenantConfig()
         untrusted_projects_config = model.UnparsedTenantConfig()
-        # project -> config; these will replace
-        # project.unparsed_config if this method succesfully
-        # completes
-        new_project_unparsed_config = {}
         # project -> branch -> config; these will replace
         # project.unparsed_branch_config if this method succesfully
         # completes
@@ -1426,21 +1470,28 @@
         # data and is inserted in the ordered jobs list for later
         # processing.
         class CachedDataJob(object):
-            def __init__(self, config_project, project):
+            def __init__(self, config_project, project, branch):
                 self.config_project = config_project
                 self.project = project
+                self.branch = branch
 
         for project in config_projects:
             # If we have cached data (this is a reconfiguration) use it.
-            if cached and project.unparsed_config:
-                jobs.append(CachedDataJob(True, project))
+            if cached and project.unparsed_branch_config:
+                # Note: this should only be one branch (master), as
+                # that's all we will initially load below in the
+                # un-cached case.
+                for branch in project.unparsed_branch_config.keys():
+                    jobs.append(CachedDataJob(True, project, branch))
                 continue
             # Otherwise, prepare an empty unparsed config object to
             # hold cached data later.
-            new_project_unparsed_config[project] = model.UnparsedTenantConfig()
+            new_project_unparsed_branch_config[project] = {}
+            new_project_unparsed_branch_config[project]['master'] = \
+                model.UnparsedTenantConfig()
             # Get main config files.  These files are permitted the
             # full range of configuration.
-            job = merger.getFiles(
+            job = self.merger.getFiles(
                 project.source.connection.connection_name,
                 project.name, 'master',
                 files=['zuul.yaml', '.zuul.yaml'],
@@ -1456,12 +1507,12 @@
             if not tpc.load_classes:
                 continue
             # If we have cached data (this is a reconfiguration) use it.
-            if cached and project.unparsed_config:
-                jobs.append(CachedDataJob(False, project))
+            if cached and project.unparsed_branch_config:
+                for branch in project.unparsed_branch_config.keys():
+                    jobs.append(CachedDataJob(False, project, branch))
                 continue
             # Otherwise, prepare an empty unparsed config object to
             # hold cached data later.
-            new_project_unparsed_config[project] = model.UnparsedTenantConfig()
             new_project_unparsed_branch_config[project] = {}
             # Get in-project-repo config files which have a restricted
             # set of options.
@@ -1473,7 +1524,7 @@
             for branch in branches:
                 new_project_unparsed_branch_config[project][branch] = \
                     model.UnparsedTenantConfig()
-                job = merger.getFiles(
+                job = self.merger.getFiles(
                     project.source.connection.connection_name,
                     project.name, branch,
                     files=['zuul.yaml', '.zuul.yaml'],
@@ -1488,22 +1539,22 @@
             # same order they were defined in the main config file.
             # This is important for correct inheritance.
             if isinstance(job, CachedDataJob):
-                TenantParser.log.info(
+                self.log.info(
                     "Loading previously parsed configuration from %s" %
                     (job.project,))
                 if job.config_project:
                     config_projects_config.extend(
-                        job.project.unparsed_config, tenant)
+                        job.project.unparsed_branch_config[job.branch])
                 else:
                     untrusted_projects_config.extend(
-                        job.project.unparsed_config, tenant)
+                        job.project.unparsed_branch_config[job.branch])
                 continue
-            TenantParser.log.debug("Waiting for cat job %s" % (job,))
+            self.log.debug("Waiting for cat job %s" % (job,))
             job.wait()
             if not job.updated:
                 raise Exception("Cat job %s failed" % (job,))
-            TenantParser.log.debug("Cat job %s got files %s" %
-                                   (job, job.files.keys()))
+            self.log.debug("Cat job %s got files %s" %
+                           (job, job.files.keys()))
             loaded = False
             files = sorted(job.files.keys())
             for conf_root in ['zuul.yaml', 'zuul.d', '.zuul.yaml', '.zuul.d']:
@@ -1513,114 +1564,105 @@
                         continue
                     # Don't load from more than configuration in a repo-branch
                     if loaded and loaded != conf_root:
-                        TenantParser.log.warning(
+                        self.log.warning(
                             "Multiple configuration files in %s" %
                             (job.source_context,))
                         continue
                     loaded = conf_root
                     source_context = job.source_context.copy()
                     source_context.path = fn
-                    TenantParser.log.info(
+                    self.log.info(
                         "Loading configuration from %s" %
                         (source_context,))
                     project = source_context.project
                     branch = source_context.branch
                     if source_context.trusted:
-                        incdata = TenantParser._parseConfigProjectLayout(
-                            job.files[fn], source_context, tenant)
-                        config_projects_config.extend(incdata, tenant)
+                        incdata = self.loadConfigProjectLayout(
+                            job.files[fn], source_context)
+                        config_projects_config.extend(incdata)
                     else:
-                        incdata = TenantParser._parseUntrustedProjectLayout(
-                            job.files[fn], source_context, tenant)
-                        untrusted_projects_config.extend(incdata, tenant)
-                    new_project_unparsed_config[project].extend(
-                        incdata, tenant)
-                    if branch in new_project_unparsed_branch_config.get(
-                            project, {}):
-                        new_project_unparsed_branch_config[project][branch].\
-                            extend(incdata, tenant)
+                        incdata = self.loadUntrustedProjectLayout(
+                            job.files[fn], source_context)
+                        untrusted_projects_config.extend(incdata)
+                    new_project_unparsed_branch_config[project][branch].\
+                        extend(incdata)
         # Now that we've sucessfully loaded all of the configuration,
         # cache the unparsed data on the project objects.
-        for project, data in new_project_unparsed_config.items():
-            project.unparsed_config = data
         for project, branch_config in \
             new_project_unparsed_branch_config.items():
             project.unparsed_branch_config = branch_config
         return config_projects_config, untrusted_projects_config
 
-    @staticmethod
-    def _parseConfigProjectLayout(data, source_context, tenant):
+    def loadConfigProjectLayout(self, data, source_context):
         # This is the top-level configuration for a tenant.
         config = model.UnparsedTenantConfig()
         with early_configuration_exceptions(source_context):
-            config.extend(safe_load_yaml(data, source_context), tenant)
+            config.extend(safe_load_yaml(data, source_context))
         return config
 
-    @staticmethod
-    def _parseUntrustedProjectLayout(data, source_context, tenant):
+    def loadUntrustedProjectLayout(self, data, source_context):
         config = model.UnparsedTenantConfig()
         with early_configuration_exceptions(source_context):
-            config.extend(safe_load_yaml(data, source_context), tenant)
+            config.extend(safe_load_yaml(data, source_context))
         if config.pipelines:
             with configuration_exceptions('pipeline', config.pipelines[0]):
                 raise PipelineNotPermittedError()
         return config
 
-    @staticmethod
-    def _getLoadClasses(tenant, conf_object):
+    def _getLoadClasses(self, tenant, conf_object):
         project = conf_object['_source_context'].project
         tpc = tenant.project_configs[project.canonical_name]
         return tpc.load_classes
 
-    @staticmethod
-    def _parseLayoutItems(layout, tenant, data, scheduler, connections,
+    def _parseLayoutItems(self, layout, tenant, data,
                           skip_pipelines=False, skip_semaphores=False):
+        pcontext = ParseContext(self.connections, self.scheduler,
+                                tenant, layout)
         # Handle pragma items first since they modify the source context
         # used by other classes.
-        pragma_parser = PragmaParser()
         for config_pragma in data.pragmas:
-            pragma_parser.fromYaml(config_pragma)
+            pcontext.pragma_parser.fromYaml(config_pragma)
 
         if not skip_pipelines:
             for config_pipeline in data.pipelines:
-                classes = TenantParser._getLoadClasses(
-                    tenant, config_pipeline)
+                classes = self._getLoadClasses(tenant, config_pipeline)
                 if 'pipeline' not in classes:
                     continue
-                layout.addPipeline(PipelineParser.fromYaml(
-                    layout, connections,
-                    scheduler, config_pipeline))
+                layout.addPipeline(pcontext.pipeline_parser.fromYaml(
+                    config_pipeline))
+        pcontext.setPipelines()
 
         for config_nodeset in data.nodesets:
-            classes = TenantParser._getLoadClasses(tenant, config_nodeset)
+            classes = self._getLoadClasses(tenant, config_nodeset)
             if 'nodeset' not in classes:
                 continue
             with configuration_exceptions('nodeset', config_nodeset):
-                layout.addNodeSet(NodeSetParser.fromYaml(
+                layout.addNodeSet(pcontext.nodeset_parser.fromYaml(
                     config_nodeset))
 
         for config_secret in data.secrets:
-            classes = TenantParser._getLoadClasses(tenant, config_secret)
+            classes = self._getLoadClasses(tenant, config_secret)
             if 'secret' not in classes:
                 continue
             with configuration_exceptions('secret', config_secret):
-                layout.addSecret(SecretParser.fromYaml(layout, config_secret))
+                layout.addSecret(pcontext.secret_parser.fromYaml(
+                    config_secret))
 
         for config_job in data.jobs:
-            classes = TenantParser._getLoadClasses(tenant, config_job)
+            classes = self._getLoadClasses(tenant, config_job)
             if 'job' not in classes:
                 continue
             with configuration_exceptions('job', config_job):
-                job = JobParser.fromYaml(tenant, layout, config_job)
+                job = pcontext.job_parser.fromYaml(config_job)
                 added = layout.addJob(job)
                 if not added:
-                    TenantParser.log.debug(
+                    self.log.debug(
                         "Skipped adding job %s which shadows an existing job" %
                         (job,))
 
         # Now that all the jobs are loaded, verify their parents exist
         for config_job in data.jobs:
-            classes = TenantParser._getLoadClasses(tenant, config_job)
+            classes = self._getLoadClasses(tenant, config_job)
             if 'job' not in classes:
                 continue
             with configuration_exceptions('job', config_job):
@@ -1637,25 +1679,26 @@
         else:
             semaphore_layout = layout
         for config_semaphore in data.semaphores:
-            classes = TenantParser._getLoadClasses(
+            classes = self._getLoadClasses(
                 tenant, config_semaphore)
             if 'semaphore' not in classes:
                 continue
             with configuration_exceptions('semaphore', config_semaphore):
-                semaphore = SemaphoreParser.fromYaml(config_semaphore)
+                semaphore = pcontext.semaphore_parser.fromYaml(
+                    config_semaphore)
                 semaphore_layout.addSemaphore(semaphore)
 
-        project_template_parser = ProjectTemplateParser(tenant, layout)
         for config_template in data.project_templates:
-            classes = TenantParser._getLoadClasses(tenant, config_template)
+            classes = self._getLoadClasses(tenant, config_template)
             if 'project-template' not in classes:
                 continue
             with configuration_exceptions('project-template', config_template):
-                layout.addProjectTemplate(project_template_parser.fromYaml(
-                    config_template))
+                layout.addProjectTemplate(
+                    pcontext.project_template_parser.fromYaml(
+                        config_template))
 
-        project_parser = ProjectParser(tenant, layout, project_template_parser)
-        for config_projects in data.projects.values():
+        flattened_projects = self._flattenProjects(data.projects, tenant)
+        for config_projects in flattened_projects.values():
             # Unlike other config classes, we expect multiple project
             # stanzas with the same name, so that a config repo can
             # define a project-pipeline and the project itself can
@@ -1665,25 +1708,43 @@
             # the include/exclude rules before parsing them.
             filtered_projects = []
             for config_project in config_projects:
-                classes = TenantParser._getLoadClasses(tenant, config_project)
+                classes = self._getLoadClasses(tenant, config_project)
                 if 'project' in classes:
                     filtered_projects.append(config_project)
 
             if not filtered_projects:
                 continue
 
-            layout.addProjectConfig(project_parser.fromYaml(
+            layout.addProjectConfig(pcontext.project_parser.fromYaml(
                 filtered_projects))
 
-    @staticmethod
-    def _parseLayout(base, tenant, data, scheduler, connections):
+    def _flattenProjects(self, projects, tenant):
+        # Group together all of the project stanzas for each project.
+        result_projects = {}
+        for config_project in projects:
+            with configuration_exceptions('project', config_project):
+                name = config_project.get('name')
+                if not name:
+                    # There is no name defined so implicitly add the name
+                    # of the project where it is defined.
+                    name = (config_project['_source_context'].
+                            project.canonical_name)
+                else:
+                    trusted, project = tenant.getProject(name)
+                    if project is None:
+                        raise ProjectNotFoundError(name)
+                    name = project.canonical_name
+                config_project['name'] = name
+                result_projects.setdefault(name, []).append(config_project)
+        return result_projects
+
+    def _parseLayout(self, base, tenant, data):
         # Don't call this method from dynamic reconfiguration because
         # it interacts with drivers and connections.
         layout = model.Layout(tenant)
-        TenantParser.log.debug("Created layout id %s", layout.uuid)
+        self.log.debug("Created layout id %s", layout.uuid)
 
-        TenantParser._parseLayoutItems(layout, tenant, data,
-                                       scheduler, connections)
+        self._parseLayoutItems(layout, tenant, data)
 
         for pipeline in layout.pipelines.values():
             pipeline.manager._postConfig(layout)
@@ -1694,6 +1755,12 @@
 class ConfigLoader(object):
     log = logging.getLogger("zuul.ConfigLoader")
 
+    def __init__(self, connections, scheduler, merger):
+        self.connections = connections
+        self.scheduler = scheduler
+        self.merger = merger
+        self.tenant_parser = TenantParser(connections, scheduler, merger)
+
     def expandConfigPath(self, config_path):
         if config_path:
             config_path = os.path.expanduser(config_path)
@@ -1702,28 +1769,27 @@
                             config_path)
         return config_path
 
-    def loadConfig(self, config_path, project_key_dir, scheduler, merger,
-                   connections):
-        abide = model.Abide()
-
+    def readConfig(self, config_path):
         config_path = self.expandConfigPath(config_path)
         with open(config_path) as config_file:
             self.log.info("Loading configuration from %s" % (config_path,))
             data = yaml.safe_load(config_file)
-        config = model.UnparsedAbideConfig()
-        config.extend(data)
         base = os.path.dirname(os.path.realpath(config_path))
+        unparsed_abide = model.UnparsedAbideConfig(base)
+        unparsed_abide.extend(data)
+        return unparsed_abide
 
-        for conf_tenant in config.tenants:
+    def loadConfig(self, unparsed_abide, project_key_dir):
+        abide = model.Abide()
+        for conf_tenant in unparsed_abide.tenants:
             # When performing a full reload, do not use cached data.
-            tenant = TenantParser.fromYaml(
-                base, project_key_dir, connections, scheduler, merger,
-                conf_tenant, old_tenant=None)
+            tenant = self.tenant_parser.fromYaml(unparsed_abide.base,
+                                                 project_key_dir,
+                                                 conf_tenant, old_tenant=None)
             abide.tenants[tenant.name] = tenant
         return abide
 
-    def reloadTenant(self, config_path, project_key_dir, scheduler,
-                     merger, connections, abide, tenant):
+    def reloadTenant(self, config_path, project_key_dir, abide, tenant):
         new_abide = model.Abide()
         new_abide.tenants = abide.tenants.copy()
 
@@ -1731,13 +1797,14 @@
         base = os.path.dirname(os.path.realpath(config_path))
 
         # When reloading a tenant only, use cached data if available.
-        new_tenant = TenantParser.fromYaml(
-            base, project_key_dir, connections, scheduler, merger,
+        new_tenant = self.tenant_parser.fromYaml(
+            base, project_key_dir,
             tenant.unparsed_config, old_tenant=tenant)
         new_abide.tenants[tenant.name] = new_tenant
         return new_abide
 
-    def _loadDynamicProjectData(self, config, project, files, trusted, tenant):
+    def _loadDynamicProjectData(self, config, project,
+                                files, trusted, tenant):
         if trusted:
             branches = ['master']
         else:
@@ -1757,12 +1824,9 @@
             # If there is no files entry at all for this
             # project-branch, then use the cached config.
             if files_entry is None:
-                if trusted:
-                    incdata = project.unparsed_config
-                else:
-                    incdata = project.unparsed_branch_config.get(branch)
+                incdata = project.unparsed_branch_config.get(branch)
                 if incdata:
-                    config.extend(incdata, tenant)
+                    config.extend(incdata)
                 continue
             # Otherwise, do not use the cached config (even if the
             # files are empty as that likely means they were deleted).
@@ -1784,19 +1848,21 @@
                     # Prevent mixing configuration source
                     conf_root = fn.split('/')[0]
                     if loaded and loaded != conf_root:
-                        TenantParser.log.warning(
+                        self.log.warning(
                             "Multiple configuration in %s" % source_context)
                         continue
                     loaded = conf_root
 
                     if trusted:
-                        incdata = TenantParser._parseConfigProjectLayout(
-                            data, source_context, tenant)
+                        incdata = (self.tenant_parser.
+                                   loadConfigProjectLayout(
+                                       data, source_context))
                     else:
-                        incdata = TenantParser._parseUntrustedProjectLayout(
-                            data, source_context, tenant)
+                        incdata = (self.tenant_parser.
+                                   loadUntrustedProjectLayout(
+                                       data, source_context))
 
-                    config.extend(incdata, tenant)
+                    config.extend(incdata)
 
     def createDynamicLayout(self, tenant, files,
                             include_config_projects=False,
@@ -1808,8 +1874,10 @@
                     config, project, files, True, tenant)
         else:
             config = tenant.config_projects_config.copy()
+
         for project in tenant.untrusted_projects:
-            self._loadDynamicProjectData(config, project, files, False, tenant)
+            self._loadDynamicProjectData(config, project, files,
+                                         False, tenant)
 
         layout = model.Layout(tenant)
         self.log.debug("Created layout id %s", layout.uuid)
@@ -1833,9 +1901,8 @@
         else:
             skip_pipelines = skip_semaphores = False
 
-        TenantParser._parseLayoutItems(layout, tenant, config,
-                                       scheduler, connections,
-                                       skip_pipelines=skip_pipelines,
-                                       skip_semaphores=skip_semaphores)
+        self.tenant_parser._parseLayoutItems(layout, tenant, config,
+                                             skip_pipelines=skip_pipelines,
+                                             skip_semaphores=skip_semaphores)
 
         return layout
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
index 86f14d6..1c62f4d 100644
--- a/zuul/connection/__init__.py
+++ b/zuul/connection/__init__.py
@@ -75,11 +75,14 @@
         still in use.  Anything in our cache that isn't in the supplied
         list should be safe to remove from the cache."""
 
-    def getWebHandlers(self, zuul_web):
+    def getWebHandlers(self, zuul_web, info):
         """Return a list of web handlers to register with zuul-web.
 
         :param zuul.web.ZuulWeb zuul_web:
             Zuul Web instance.
+        :param zuul.model.WebInfo info:
+            The WebInfo object for the Zuul Web instance. Can be used by
+            plugins to toggle API capabilities.
         :returns: List of `zuul.web.handler.BaseWebHandler` instances.
         """
         return []
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 6dfcdd3..772ba9b 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -1141,7 +1141,7 @@
 
         return statuses
 
-    def getWebHandlers(self, zuul_web):
+    def getWebHandlers(self, zuul_web, info):
         return [GithubWebhookHandler(self, zuul_web, 'POST', 'payload')]
 
     def validateWebConfig(self, config, connections):
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 501a2c5..e931301 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -125,9 +125,10 @@
 
         return zuul_buildset_table, zuul_build_table
 
-    def getWebHandlers(self, zuul_web):
+    def getWebHandlers(self, zuul_web, info):
+        info.capabilities.job_history = True
         return [
-            SqlWebHandler(self, zuul_web, 'GET', '/{tenant}/builds.json'),
+            SqlWebHandler(self, zuul_web, 'GET', '/{tenant}/builds'),
             StaticHandler(zuul_web, '/{tenant}/builds.html'),
         ]
 
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index d561232..fe0f28d 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import copy
 import gear
 import json
 import logging
@@ -186,6 +185,7 @@
         params = dict()
         params['job'] = job.name
         params['timeout'] = job.timeout
+        params['post_timeout'] = job.post_timeout
         params['items'] = merger_items
         params['projects'] = []
         if hasattr(item.change, 'branch'):
@@ -208,7 +208,9 @@
             nodes.append(n)
         params['nodes'] = nodes
         params['groups'] = [group.toDict() for group in nodeset.getGroups()]
-        params['vars'] = copy.deepcopy(job.variables)
+        params['vars'] = job.variables
+        params['host_vars'] = job.host_variables
+        params['group_vars'] = job.group_variables
         params['zuul'] = zuul_params
         projects = set()
         required_projects = set()
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 8de6fe0..d140a00 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -497,16 +497,22 @@
                 shutil.copy(os.path.join(library_path, fn), target_dir)
 
 
-def make_setup_inventory_dict(nodes):
+def check_varnames(var):
+    # We block these in configloader, but block it here too to make
+    # sure that a job doesn't pass variables named zuul or nodepool.
+    if 'zuul' in var:
+        raise Exception("Defining variables named 'zuul' is not allowed")
+    if 'nodepool' in var:
+        raise Exception("Defining variables named 'nodepool' is not allowed")
 
+
+def make_setup_inventory_dict(nodes):
     hosts = {}
     for node in nodes:
         if (node['host_vars']['ansible_connection'] in
             BLACKLISTED_ANSIBLE_CONNECTION_TYPES):
             continue
-
-        for name in node['name']:
-            hosts[name] = node['host_vars']
+        hosts[node['name']] = node['host_vars']
 
     inventory = {
         'all': {
@@ -517,12 +523,10 @@
     return inventory
 
 
-def make_inventory_dict(nodes, groups, all_vars):
-
+def make_inventory_dict(nodes, args, all_vars):
     hosts = {}
     for node in nodes:
-        for name in node['name']:
-            hosts[name] = node['host_vars']
+        hosts[node['name']] = node['host_vars']
 
     inventory = {
         'all': {
@@ -531,14 +535,16 @@
         }
     }
 
-    for group in groups:
+    for group in args['groups']:
         group_hosts = {}
         for node_name in group['nodes']:
-            # children is a dict with None as values because we don't have
-            # and per-group variables. If we did, None would be a dict
-            # with the per-group variables
             group_hosts[node_name] = None
-        inventory[group['name']] = {'hosts': group_hosts}
+        group_vars = args['group_vars'].get(group['name'], {}).copy()
+        check_varnames(group_vars)
+        inventory[group['name']] = {
+            'hosts': group_hosts,
+            'vars': group_vars,
+        }
 
     return inventory
 
@@ -878,8 +884,10 @@
         success = False
         self.started = True
         time_started = time.time()
-        # timeout value is total job timeout or put another way
-        # the cummulative time that pre, run, and post can consume.
+        # timeout value is "total" job timeout which accounts for
+        # pre-run and run playbooks. post-run is different because
+        # it is used to copy out job logs and we want to do our best
+        # to copy logs even when the job has timed out.
         job_timeout = args['timeout']
         for index, playbook in enumerate(self.jobdir.pre_playbooks):
             # TODOv3(pabelanger): Implement pre-run timeout setting.
@@ -914,11 +922,15 @@
                 # run it again.
                 return None
 
+        post_timeout = args['post_timeout']
         for index, playbook in enumerate(self.jobdir.post_playbooks):
-            # TODOv3(pabelanger): Implement post-run timeout setting.
-            ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
+            # Post timeout operates a little differently to the main job
+            # timeout. We give each post playbook the full post timeout to
+            # do its job because post is where you'll often record job logs
+            # which are vital to understanding why timeouts have happened in
+            # the first place.
             post_status, post_code = self.runAnsiblePlaybook(
-                playbook, ansible_timeout, success, phase='post', index=index)
+                playbook, post_timeout, success, phase='post', index=index)
             if post_status == self.RESULT_ABORTED:
                 return 'ABORTED'
             if post_status != self.RESULT_NORMAL or post_code != 0:
@@ -968,42 +980,45 @@
             # set to True in the clouds.yaml for a cloud if this
             # results in the wrong thing being in interface_ip
             # TODO(jeblair): Move this notice to the docs.
-            ip = node.get('interface_ip')
-            port = node.get('connection_port', node.get('ssh_port', 22))
-            host_vars = dict(
-                ansible_host=ip,
-                ansible_user=self.executor_server.default_username,
-                ansible_port=port,
-                nodepool=dict(
-                    label=node.get('label'),
-                    az=node.get('az'),
-                    cloud=node.get('cloud'),
-                    provider=node.get('provider'),
-                    region=node.get('region'),
-                    interface_ip=node.get('interface_ip'),
-                    public_ipv4=node.get('public_ipv4'),
-                    private_ipv4=node.get('private_ipv4'),
-                    public_ipv6=node.get('public_ipv6')))
+            for name in node['name']:
+                ip = node.get('interface_ip')
+                port = node.get('connection_port', node.get('ssh_port', 22))
+                host_vars = args['host_vars'].get(name, {}).copy()
+                check_varnames(host_vars)
+                host_vars.update(dict(
+                    ansible_host=ip,
+                    ansible_user=self.executor_server.default_username,
+                    ansible_port=port,
+                    nodepool=dict(
+                        label=node.get('label'),
+                        az=node.get('az'),
+                        cloud=node.get('cloud'),
+                        provider=node.get('provider'),
+                        region=node.get('region'),
+                        interface_ip=node.get('interface_ip'),
+                        public_ipv4=node.get('public_ipv4'),
+                        private_ipv4=node.get('private_ipv4'),
+                        public_ipv6=node.get('public_ipv6'))))
 
-            username = node.get('username')
-            if username:
-                host_vars['ansible_user'] = username
+                username = node.get('username')
+                if username:
+                    host_vars['ansible_user'] = username
 
-            connection_type = node.get('connection_type')
-            if connection_type:
-                host_vars['ansible_connection'] = connection_type
+                connection_type = node.get('connection_type')
+                if connection_type:
+                    host_vars['ansible_connection'] = connection_type
 
-            host_keys = []
-            for key in node.get('host_keys'):
-                if port != 22:
-                    host_keys.append("[%s]:%s %s" % (ip, port, key))
-                else:
-                    host_keys.append("%s %s" % (ip, key))
+                host_keys = []
+                for key in node.get('host_keys'):
+                    if port != 22:
+                        host_keys.append("[%s]:%s %s" % (ip, port, key))
+                    else:
+                        host_keys.append("%s %s" % (ip, key))
 
-            hosts.append(dict(
-                name=node['name'],
-                host_vars=host_vars,
-                host_keys=host_keys))
+                hosts.append(dict(
+                    name=name,
+                    host_vars=host_vars,
+                    host_keys=host_keys))
         return hosts
 
     def _blockPluginDirs(self, path):
@@ -1096,10 +1111,7 @@
 
         secrets = playbook['secrets']
         if secrets:
-            if 'zuul' in secrets:
-                # We block this in configloader, but block it here too to make
-                # sure that a job doesn't pass secrets named zuul.
-                raise Exception("Defining secrets named 'zuul' is not allowed")
+            check_varnames(secrets)
             jobdir_playbook.secrets_content = yaml.safe_dump(
                 secrets, default_flow_style=False)
 
@@ -1200,12 +1212,9 @@
 
     def prepareAnsibleFiles(self, args):
         all_vars = args['vars'].copy()
+        check_varnames(all_vars)
         # TODO(mordred) Hack to work around running things with python3
         all_vars['ansible_python_interpreter'] = '/usr/bin/python2'
-        if 'zuul' in all_vars:
-            # We block this in configloader, but block it here too to make
-            # sure that a job doesn't pass variables named zuul.
-            raise Exception("Defining vars named 'zuul' is not allowed")
         all_vars['zuul'] = args['zuul'].copy()
         all_vars['zuul']['executor'] = dict(
             hostname=self.executor_server.hostname,
@@ -1216,7 +1225,7 @@
 
         nodes = self.getHostList(args)
         setup_inventory = make_setup_inventory_dict(nodes)
-        inventory = make_inventory_dict(nodes, args['groups'], all_vars)
+        inventory = make_inventory_dict(nodes, args, all_vars)
 
         with open(self.jobdir.setup_inventory, 'w') as setup_inventory_yaml:
             setup_inventory_yaml.write(
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 88ddf7d..506b94f 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -460,7 +460,8 @@
         # Load layout
         # Late import to break an import loop
         import zuul.configloader
-        loader = zuul.configloader.ConfigLoader()
+        loader = zuul.configloader.ConfigLoader(
+            self.sched.connections, self.sched, None)
 
         self.log.debug("Loading dynamic layout")
         (trusted_updates, untrusted_updates) = item.includesConfigUpdates()
@@ -476,9 +477,7 @@
                 loader.createDynamicLayout(
                     item.pipeline.layout.tenant,
                     build_set.files,
-                    include_config_projects=True,
-                    scheduler=self.sched,
-                    connections=self.sched.connections)
+                    include_config_projects=True)
                 trusted_layout_verified = True
 
             # Then create the config a second time but without changes
diff --git a/zuul/model.py b/zuul/model.py
index 45fc1a8..44e8d06 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -24,6 +24,7 @@
 import textwrap
 
 from zuul import change_matcher
+from zuul.lib.config import get_default
 
 MERGER_MERGE = 1          # "git merge"
 MERGER_MERGE_RESOLVE = 2  # "git merge -s resolve"
@@ -349,7 +350,6 @@
         # when deciding whether to enqueue their changes
         # TODOv3 (jeblair): re-add support for foreign projects if needed
         self.foreign = foreign
-        self.unparsed_config = None
         self.unparsed_branch_config = {}  # branch -> UnparsedTenantConfig
 
     def __str__(self):
@@ -839,7 +839,10 @@
         self.execution_attributes = dict(
             parent=None,
             timeout=None,
+            post_timeout=None,
             variables={},
+            host_variables={},
+            group_variables={},
             nodeset=NodeSet(),
             workspace=None,
             pre_run=(),
@@ -981,10 +984,19 @@
             matchers.append(self.branch_matcher)
         self.branch_matcher = change_matcher.MatchAll(matchers)
 
-    def updateVariables(self, other_vars):
-        v = copy.deepcopy(self.variables)
-        Job._deepUpdate(v, other_vars)
-        self.variables = v
+    def updateVariables(self, other_vars, other_host_vars, other_group_vars):
+        if other_vars is not None:
+            v = copy.deepcopy(self.variables)
+            Job._deepUpdate(v, other_vars)
+            self.variables = v
+        if other_host_vars is not None:
+            v = copy.deepcopy(self.host_variables)
+            Job._deepUpdate(v, other_host_vars)
+            self.host_variables = v
+        if other_group_vars is not None:
+            v = copy.deepcopy(self.group_variables)
+            Job._deepUpdate(v, other_group_vars)
+            self.group_variables = v
 
     def updateParentData(self, other_vars):
         # Update variables, but give the current values priority (used
@@ -1061,7 +1073,8 @@
                                         "from other projects."
                                         % (repr(self), this_origin))
                 if k not in set(['pre_run', 'run', 'post_run', 'roles',
-                                 'variables', 'required_projects',
+                                 'variables', 'host_variables',
+                                 'group_variables', 'required_projects',
                                  'allowed_projects']):
                     # TODO(jeblair): determine if deepcopy is required
                     setattr(self, k, copy.deepcopy(other._get(k)))
@@ -1102,8 +1115,8 @@
         if other._get('post_run') is not None:
             other_post_run = self.freezePlaybooks(other.post_run)
             self.post_run = other_post_run + self.post_run
-        if other._get('variables') is not None:
-            self.updateVariables(other.variables)
+        self.updateVariables(other.variables, other.host_variables,
+                             other.group_variables)
         if other._get('required_projects') is not None:
             self.updateProjects(other.required_projects)
         if (other._get('allowed_projects') is not None and
@@ -2434,8 +2447,10 @@
     An Abide is a collection of tenants.
     """
 
-    def __init__(self):
+    def __init__(self, base=None):
         self.tenants = []
+        self.known_tenants = set()
+        self.base = base
 
     def extend(self, conf):
         if isinstance(conf, UnparsedAbideConfig):
@@ -2453,6 +2468,8 @@
             key, value = list(item.items())[0]
             if key == 'tenant':
                 self.tenants.append(value)
+                if 'name' in value:
+                    self.known_tenants.add(value['name'])
             else:
                 raise ConfigItemUnknownError()
 
@@ -2465,7 +2482,7 @@
         self.pipelines = []
         self.jobs = []
         self.project_templates = []
-        self.projects = {}
+        self.projects = []
         self.nodesets = []
         self.secrets = []
         self.semaphores = []
@@ -2482,23 +2499,13 @@
         r.semaphores = copy.deepcopy(self.semaphores)
         return r
 
-    def extend(self, conf, tenant):
+    def extend(self, conf):
         if isinstance(conf, UnparsedTenantConfig):
             self.pragmas.extend(conf.pragmas)
             self.pipelines.extend(conf.pipelines)
             self.jobs.extend(conf.jobs)
             self.project_templates.extend(conf.project_templates)
-            for k, v in conf.projects.items():
-                name = k
-                # Add the projects to the according canonical name instead of
-                # the given project name. If it is not found, it's ok to add
-                # this to the given name. We also don't need to throw the
-                # ProjectNotFoundException here as semantic validation occurs
-                # later where it will fail then.
-                trusted, project = tenant.getProject(k)
-                if project is not None:
-                    name = project.canonical_name
-                self.projects.setdefault(name, []).extend(v)
+            self.projects.extend(conf.projects)
             self.nodesets.extend(conf.nodesets)
             self.secrets.extend(conf.secrets)
             self.semaphores.extend(conf.semaphores)
@@ -2514,13 +2521,7 @@
                 raise ConfigItemMultipleKeysError()
             key, value = list(item.items())[0]
             if key == 'project':
-                name = value.get('name')
-                if not name:
-                    # There is no name defined so implicitly add the name
-                    # of the project where it is defined.
-                    name = value['_source_context'].project.canonical_name
-                    value['name'] = name
-                self.projects.setdefault(name, []).append(value)
+                self.projects.append(value)
             elif key == 'job':
                 self.jobs.append(value)
             elif key == 'project-template':
@@ -3182,3 +3183,80 @@
         td = self._getTD(build)
         td.add(elapsed, result)
         td.save()
+
+
+class Capabilities(object):
+    """The set of capabilities this Zuul installation has.
+
+    Some plugins add elements to the external API. In order to
+    facilitate consumers knowing if functionality is available
+    or not, keep track of distinct capability flags.
+    """
+    def __init__(self, job_history=False):
+        self.job_history = job_history
+
+    def __repr__(self):
+        return '<Capabilities 0x%x %s>' % (id(self), self._renderFlags())
+
+    def _renderFlags(self):
+        d = self.toDict()
+        return " ".join(['{k}={v}'.format(k=k, v=v) for (k, v) in d.items()])
+
+    def copy(self):
+        return Capabilities(**self.toDict())
+
+    def toDict(self):
+        d = dict()
+        d['job_history'] = self.job_history
+        return d
+
+
+class WebInfo(object):
+    """Information about the system needed by zuul-web /info."""
+
+    def __init__(self, websocket_url=None, endpoint=None,
+                 capabilities=None, stats_url=None,
+                 stats_prefix=None, stats_type=None):
+        self.capabilities = capabilities or Capabilities()
+        self.websocket_url = websocket_url
+        self.stats_url = stats_url
+        self.stats_prefix = stats_prefix
+        self.stats_type = stats_type
+        self.endpoint = endpoint
+        self.tenant = None
+
+    def __repr__(self):
+        return '<WebInfo 0x%x capabilities=%s>' % (
+            id(self), str(self.capabilities))
+
+    def copy(self):
+        return WebInfo(
+            websocket_url=self.websocket_url,
+            endpoint=self.endpoint,
+            stats_url=self.stats_url,
+            stats_prefix=self.stats_prefix,
+            stats_type=self.stats_type,
+            capabilities=self.capabilities.copy())
+
+    @staticmethod
+    def fromConfig(config):
+        return WebInfo(
+            websocket_url=get_default(config, 'web', 'websocket_url', None),
+            stats_url=get_default(config, 'web', 'stats_url', None),
+            stats_prefix=get_default(config, 'statsd', 'prefix'),
+            stats_type=get_default(config, 'web', 'stats_type', 'graphite'),
+        )
+
+    def toDict(self):
+        d = dict()
+        d['websocket_url'] = self.websocket_url
+        stats = dict()
+        stats['url'] = self.stats_url
+        stats['prefix'] = self.stats_prefix
+        stats['type'] = self.stats_type
+        d['stats'] = stats
+        d['endpoint'] = self.endpoint
+        d['capabilities'] = self.capabilities.toDict()
+        if self.tenant:
+            d['tenant'] = self.tenant
+        return d
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 7a0e28c..c58bfc7 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -246,6 +246,7 @@
         self.result_event_queue = queue.Queue()
         self.management_event_queue = zuul.lib.queue.MergedQueue()
         self.abide = model.Abide()
+        self.unparsed_abide = model.UnparsedAbideConfig()
 
         if not testonly:
             time_dir = self._get_time_database_dir()
@@ -548,11 +549,13 @@
         self.config = event.config
         try:
             self.log.info("Full reconfiguration beginning")
-            loader = configloader.ConfigLoader()
+            loader = configloader.ConfigLoader(
+                self.connections, self, self.merger)
+            self.unparsed_abide = loader.readConfig(
+                self.config.get('scheduler', 'tenant_config'))
             abide = loader.loadConfig(
-                self.config.get('scheduler', 'tenant_config'),
-                self._get_project_key_dir(),
-                self, self.merger, self.connections)
+                self.unparsed_abide,
+                self._get_project_key_dir())
             for tenant in abide.tenants.values():
                 self._reconfigureTenant(tenant)
             self.abide = abide
@@ -569,14 +572,13 @@
             # If a change landed to a project, clear out the cached
             # config before reconfiguring.
             for project in event.projects:
-                project.unparsed_config = None
                 project.unparsed_branch_config = {}
             old_tenant = self.abide.tenants[event.tenant_name]
-            loader = configloader.ConfigLoader()
+            loader = configloader.ConfigLoader(
+                self.connections, self, self.merger)
             abide = loader.reloadTenant(
                 self.config.get('scheduler', 'tenant_config'),
                 self._get_project_key_dir(),
-                self, self.merger, self.connections,
                 self.abide, old_tenant)
             tenant = abide.tenants[event.tenant_name]
             self._reconfigureTenant(tenant)
@@ -1150,9 +1152,16 @@
         data['pipelines'] = pipelines
         tenant = self.abide.tenants.get(tenant_name)
         if not tenant:
+            if tenant_name not in self.unparsed_abide.known_tenants:
+                return json.dumps({
+                    "message": "Unknown tenant",
+                    "code": 404
+                })
             self.log.warning("Tenant %s isn't loaded" % tenant_name)
-            return json.dumps(
-                {"message": "Tenant %s isn't ready" % tenant_name})
+            return json.dumps({
+                "message": "Tenant %s isn't ready" % tenant_name,
+                "code": 204
+            })
         for pipeline in tenant.layout.pipelines.values():
             pipelines.append(pipeline.formatStatusJSON(websocket_url))
         return json.dumps(data)
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index e962738..31eac7d 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -16,6 +16,7 @@
 
 
 import asyncio
+import copy
 import json
 import logging
 import os
@@ -25,6 +26,7 @@
 import aiohttp
 from aiohttp import web
 
+import zuul.model
 import zuul.rpcclient
 from zuul.web.handler import StaticHandler
 
@@ -158,41 +160,47 @@
             'key_get': self.key_get,
         }
 
-    async def tenant_list(self, request):
+    async def tenant_list(self, request, result_filter=None):
         job = self.rpc.submitJob('zuul:tenant_list', {})
         return web.json_response(json.loads(job.data[0]))
 
-    async def status_get(self, request):
+    async def status_get(self, request, result_filter=None):
         tenant = request.match_info["tenant"]
         if tenant not in self.cache or \
            (time.time() - self.cache_time[tenant]) > self.cache_expiry:
             job = self.rpc.submitJob('zuul:status_get', {'tenant': tenant})
             self.cache[tenant] = json.loads(job.data[0])
             self.cache_time[tenant] = time.time()
-        resp = web.json_response(self.cache[tenant])
+        payload = self.cache[tenant]
+        if payload.get('code') == 404:
+            return web.HTTPNotFound(reason=payload['message'])
+        if result_filter:
+            payload = result_filter.filterPayload(payload)
+        resp = web.json_response(payload)
         resp.headers['Access-Control-Allow-Origin'] = '*'
         resp.headers["Cache-Control"] = "public, max-age=%d" % \
                                         self.cache_expiry
         resp.last_modified = self.cache_time[tenant]
         return resp
 
-    async def job_list(self, request):
+    async def job_list(self, request, result_filter=None):
         tenant = request.match_info["tenant"]
         job = self.rpc.submitJob('zuul:job_list', {'tenant': tenant})
         resp = web.json_response(json.loads(job.data[0]))
         resp.headers['Access-Control-Allow-Origin'] = '*'
         return resp
 
-    async def key_get(self, request):
+    async def key_get(self, request, result_filter=None):
         tenant = request.match_info["tenant"]
         project = request.match_info["project"]
         job = self.rpc.submitJob('zuul:key_get', {'tenant': tenant,
                                                   'project': project})
         return web.Response(body=job.data[0])
 
-    async def processRequest(self, request, action):
+    async def processRequest(self, request, action, result_filter=None):
+        resp = None
         try:
-            resp = await self.controllers[action](request)
+            resp = await self.controllers[action](request, result_filter)
         except asyncio.CancelledError:
             self.log.debug("request handling cancelled")
         except Exception as e:
@@ -202,6 +210,24 @@
         return resp
 
 
+class ChangeFilter(object):
+    def __init__(self, desired):
+        self.desired = desired
+
+    def filterPayload(self, payload):
+        status = []
+        for pipeline in payload['pipelines']:
+            for change_queue in pipeline['change_queues']:
+                for head in change_queue['heads']:
+                    for change in head:
+                        if self.wantChange(change):
+                            status.append(copy.deepcopy(change))
+        return status
+
+    def wantChange(self, change):
+        return change['id'] == self.desired
+
+
 class ZuulWeb(object):
 
     log = logging.getLogger("zuul.web.ZuulWeb")
@@ -210,13 +236,16 @@
                  gear_server, gear_port,
                  ssl_key=None, ssl_cert=None, ssl_ca=None,
                  static_cache_expiry=3600,
-                 connections=None):
+                 connections=None,
+                 info=None):
+        self.start_time = time.time()
         self.listen_address = listen_address
         self.listen_port = listen_port
         self.event_loop = None
         self.term = None
         self.server = None
         self.static_cache_expiry = static_cache_expiry
+        self.info = info
         # instanciate handlers
         self.rpc = zuul.rpcclient.RPCClient(gear_server, gear_port,
                                             ssl_key, ssl_cert, ssl_ca)
@@ -225,12 +254,37 @@
         self._plugin_routes = []  # type: List[zuul.web.handler.BaseWebHandler]
         connections = connections or []
         for connection in connections:
-            self._plugin_routes.extend(connection.getWebHandlers(self))
+            self._plugin_routes.extend(
+                connection.getWebHandlers(self, self.info))
 
     async def _handleWebsocket(self, request):
         return await self.log_streaming_handler.processRequest(
             request)
 
+    async def _handleRootInfo(self, request):
+        info = self.info.copy()
+        info.endpoint = str(request.url.parent)
+        return self._handleInfo(info)
+
+    def _handleTenantInfo(self, request):
+        info = self.info.copy()
+        info.tenant = request.match_info["tenant"]
+        # yarl.URL.parent on a root url returns the root url, so this is
+        # both safe and accurate for white-labeled tenants like OpenStack,
+        # zuul-web running on / and zuul-web running on a sub-url like
+        # softwarefactory-project.io
+        info.endpoint = str(request.url.parent.parent.parent)
+        return self._handleInfo(info)
+
+    def _handleInfo(self, info):
+        resp = web.json_response({'info': info.toDict()}, status=200)
+        resp.headers['Access-Control-Allow-Origin'] = '*'
+        if self.static_cache_expiry:
+            resp.headers['Cache-Control'] = "public, max-age=%d" % \
+                self.static_cache_expiry
+        resp.last_modified = self.start_time
+        return resp
+
     async def _handleTenantsRequest(self, request):
         return await self.gearman_handler.processRequest(request,
                                                          'tenant_list')
@@ -238,6 +292,11 @@
     async def _handleStatusRequest(self, request):
         return await self.gearman_handler.processRequest(request, 'status_get')
 
+    async def _handleStatusChangeRequest(self, request):
+        change = request.match_info["change"]
+        return await self.gearman_handler.processRequest(
+            request, 'status_get', ChangeFilter(change))
+
     async def _handleJobsRequest(self, request):
         return await self.gearman_handler.processRequest(request, 'job_list')
 
@@ -256,9 +315,13 @@
             is run within a separate (non-main) thread.
         """
         routes = [
-            ('GET', '/tenants.json', self._handleTenantsRequest),
-            ('GET', '/{tenant}/status.json', self._handleStatusRequest),
-            ('GET', '/{tenant}/jobs.json', self._handleJobsRequest),
+            ('GET', '/info', self._handleRootInfo),
+            ('GET', '/{tenant}/info', self._handleTenantInfo),
+            ('GET', '/tenants', self._handleTenantsRequest),
+            ('GET', '/{tenant}/status', self._handleStatusRequest),
+            ('GET', '/{tenant}/jobs', self._handleJobsRequest),
+            ('GET', '/{tenant}/status/change/{change}',
+             self._handleStatusChangeRequest),
             ('GET', '/{tenant}/console-stream', self._handleWebsocket),
             ('GET', '/{tenant}/{project:.*}.pub', self._handleKeyRequest),
         ]
diff --git a/zuul/web/static/javascripts/jquery.zuul.js b/zuul/web/static/javascripts/jquery.zuul.js
index 7e6788b..7da81dc 100644
--- a/zuul/web/static/javascripts/jquery.zuul.js
+++ b/zuul/web/static/javascripts/jquery.zuul.js
@@ -49,7 +49,7 @@
         options = $.extend({
             'enabled': true,
             'graphite_url': '',
-            'source': 'status.json',
+            'source': 'status',
             'msg_id': '#zuul_msg',
             'pipelines_id': '#zuul_pipelines',
             'queue_events_num': '#zuul_queue_events_num',
diff --git a/zuul/web/static/javascripts/zuul.angular.js b/zuul/web/static/javascripts/zuul.angular.js
index 87cbbdd..49f2518 100644
--- a/zuul/web/static/javascripts/zuul.angular.js
+++ b/zuul/web/static/javascripts/zuul.angular.js
@@ -23,7 +23,7 @@
 {
     $scope.tenants = undefined;
     $scope.tenants_fetch = function() {
-        $http.get("tenants.json")
+        $http.get("tenants")
             .then(function success(result) {
                 $scope.tenants = result.data;
             });
@@ -36,7 +36,7 @@
 {
     $scope.jobs = undefined;
     $scope.jobs_fetch = function() {
-        $http.get("jobs.json")
+        $http.get("jobs")
             .then(function success(result) {
                 $scope.jobs = result.data;
             });
@@ -78,7 +78,7 @@
         if ($scope.job_name) {query_string += "&job_name="+$scope.job_name;}
         if ($scope.project) {query_string += "&project="+$scope.project;}
         if (query_string != "") {query_string = "?" + query_string.substr(1);}
-        $http.get("builds.json" + query_string)
+        $http.get("builds" + query_string)
             .then(function success(result) {
                 for (build_pos = 0;
                      build_pos < result.data.length;
diff --git a/zuul/web/static/javascripts/zuul.app.js b/zuul/web/static/javascripts/zuul.app.js
index bf90a4d..6e35eb3 100644
--- a/zuul/web/static/javascripts/zuul.app.js
+++ b/zuul/web/static/javascripts/zuul.app.js
@@ -55,7 +55,7 @@
     var demo = location.search.match(/[?&]demo=([^?&]*)/),
         source_url = location.search.match(/[?&]source_url=([^?&]*)/),
         source = demo ? './status-' + (demo[1] || 'basic') + '.json-sample' :
-            'status.json';
+            'status';
     source = source_url ? source_url[1] : source;
 
     var zuul = $.zuul({