Merge "Cleanup components doc" into feature/zuulv3
diff --git a/README.rst b/README.rst
index 16e7385..52b89df 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,9 @@
 preparation for the third major version of Zuul.  We call this effort
 `Zuul v3`_ and it is described in more detail below.
 
+The latest documentation for Zuul v3 is published at:
+https://docs.openstack.org/infra/zuul/feature/zuulv3/
+
 Contributing
 ------------
 
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 5637552..58f3371 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -101,3 +101,28 @@
 
 .. TODO: describe standard lib and link to published docs for it.
 
+Return Values
+-------------
+
+The job may return some values to Zuul to affect its behavior.  To
+return a value, use the *zuul_return* Ansible module in a job
+playbook.  For example::
+
+  tasks:
+    - zuul_return:
+        data:
+          foo: bar
+
+Will return the dictionary "{'foo': 'bar'}" to Zuul.
+
+.. TODO: xref to section describing formatting
+
+Several uses of these values are planned, but the only currently
+implemented use is to set the log URL for a build.  To do so, set the
+**zuul.log_url** value.  For example::
+
+  tasks:
+    - zuul_return:
+        data:
+          zuul:
+            log_url: http://logs.example.com/path/to/build/logs
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
index d528be1..36a22e4 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
@@ -10,3 +10,7 @@
         that:
           - st.stat.exists
           - st.stat.isreg
+
+    - name: Simple shell task.
+      shell: |+
+        echo "Hello world"
diff --git a/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml b/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml
new file mode 100644
index 0000000..b92ff5c
--- /dev/null
+++ b/tests/fixtures/config/data-return/git/common-config/playbooks/data-return.yaml
@@ -0,0 +1,6 @@
+- hosts: localhost
+  tasks:
+    - zuul_return:
+        data:
+          zuul:
+            log_url: test/log/url
diff --git a/tests/fixtures/config/data-return/git/common-config/zuul.yaml b/tests/fixtures/config/data-return/git/common-config/zuul.yaml
new file mode 100644
index 0000000..8aea931
--- /dev/null
+++ b/tests/fixtures/config/data-return/git/common-config/zuul.yaml
@@ -0,0 +1,22 @@
+- pipeline:
+    name: check
+    manager: independent
+    allow-secrets: true
+    trigger:
+      gerrit:
+        - event: patchset-created
+    success:
+      gerrit:
+        verified: 1
+    failure:
+      gerrit:
+        verified: -1
+
+- job:
+    name: data-return
+
+- project:
+    name: org/project
+    check:
+      jobs:
+        - data-return
diff --git a/tests/fixtures/config/data-return/git/org_project/README b/tests/fixtures/config/data-return/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/data-return/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/data-return/main.yaml b/tests/fixtures/config/data-return/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/data-return/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+    name: tenant-one
+    source:
+      gerrit:
+        config-projects:
+          - common-config
+        untrusted-projects:
+          - org/project
diff --git a/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml
new file mode 100644
index 0000000..3fd423b
--- /dev/null
+++ b/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml
@@ -0,0 +1,5 @@
+- project:
+    name: org/project1
+    tenant-one-gate:
+      jobs:
+        - project-test1
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index aa061ff..61bf9f8 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -2324,6 +2324,16 @@
                 uuid=status_jobs[2]['uuid']),
             status_jobs[2]['report_url'])
 
+        # check job dependencies
+        self.assertIsNotNone(status_jobs[0]['dependencies'])
+        self.assertIsNotNone(status_jobs[1]['dependencies'])
+        self.assertIsNotNone(status_jobs[2]['dependencies'])
+        self.assertEqual(len(status_jobs[0]['dependencies']), 0)
+        self.assertEqual(len(status_jobs[1]['dependencies']), 1)
+        self.assertEqual(len(status_jobs[2]['dependencies']), 1)
+        self.assertIn('project-merge', status_jobs[1]['dependencies'])
+        self.assertIn('project-merge', status_jobs[2]['dependencies'])
+
     def test_live_reconfiguration(self):
         "Test that live reconfiguration works"
         self.executor_server.hold_jobs_in_build = True
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 112f48c..5d49d11 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -486,6 +486,62 @@
         self.assertIn('appears multiple times', A.messages[0],
                       "A should have a syntax error reported")
 
+    def test_multi_repo(self):
+        downstream_repo_conf = textwrap.dedent(
+            """
+            - project:
+                name: org/project1
+                tenant-one-gate:
+                  jobs:
+                    - project-test1
+
+            - job:
+                name: project1-test1
+                parent: project-test1
+            """)
+
+        file_dict = {'.zuul.yaml': downstream_repo_conf}
+        A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+                                           files=file_dict)
+        A.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+        self.waitUntilSettled()
+
+        self.assertEqual(A.data['status'], 'MERGED')
+        self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+        self.waitUntilSettled()
+
+        upstream_repo_conf = textwrap.dedent(
+            """
+            - job:
+                name: project-test1
+
+            - job:
+                name: project-test2
+
+            - project:
+                name: org/project
+                tenant-one-gate:
+                  jobs:
+                    - project-test1
+            """)
+
+        file_dict = {'.zuul.yaml': upstream_repo_conf}
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
+                                           files=file_dict)
+        B.addApproval('code-review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+        self.waitUntilSettled()
+
+        self.assertEqual(B.data['status'], 'MERGED')
+        self.fake_gerrit.addEvent(B.getChangeMergedEvent())
+        self.waitUntilSettled()
+
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        # Ensure the latest change is reflected in the config; if it
+        # isn't this will raise an exception.
+        tenant.layout.getJob('project-test2')
+
 
 class TestAnsible(AnsibleZuulTestCase):
     # A temporary class to hold new tests while others are disabled
@@ -643,4 +699,20 @@
         self.assertHistory([
             dict(name='test1', result='SUCCESS', changes='1,1'),
             dict(name='test2', result='SUCCESS', changes='1,1'),
+        ], ordered=False)
+
+
+class TestDataReturn(AnsibleZuulTestCase):
+    tenant_config_file = 'config/data-return/main.yaml'
+
+    def test_data_return(self):
+        # This exercises a proposed change to a role being checked out
+        # and used.
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+        self.waitUntilSettled()
+        self.assertHistory([
+            dict(name='data-return', result='SUCCESS', changes='1,1'),
         ])
+        self.assertIn('- data-return test/log/url',
+                      A.messages[-1])
diff --git a/tools/018D05F5.gpg b/tools/018D05F5.gpg
new file mode 100644
index 0000000..95775ae
--- /dev/null
+++ b/tools/018D05F5.gpg
@@ -0,0 +1,28 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: SKS 1.1.6
+Comment: Hostname: keyserver.ubuntu.com
+
+mQINBFUZtK8BEADGaOXCZ/ypqcNEU5Y3rospyaJDhi9PiLndRXz6KxZEoDljmaLzQBMiJ3/l
+nNflwcv07sBdQDqBjNClFdDbvP4ttIZsQzWYQya/uHzM3rNxbh2bw24Tz0n/+PwZ10NrGFIo
+Xl9rU79tXe7XTJDifYvEXtpwnNcgo6/j3FJ9l7q9jQO4SwbK4dxKRLnwxPLsOtspvSp6J0PC
+9j6TiPYTrQ8dp8mj05GFF7oK6ZlQAJ3lgYG/QaWA9rXF1bOMw7E/arMI4+WYQOhx+JHkCitk
+ai000MdNRVykrvJD/r9pb6NSzyAIrs/hDYvRjD/+7d2pd47R0CLTQJjsT9JNDlZqpU7i6+47
+zAB9uYTVJFprNF7/BuQ84fK/o81ePwutt+gfGzhKvbjUNLUC6WxFzojZEDbixz0TUOgvjUsK
+4VGoDyxLw1YLebjs5YdGROB19+771sx6leMZpdQhiTaXWlQrTyjbiS7f71Hx2Eng4hpyrySz
+HbBrLzXqXjiMazxt1yp5qq3VEBBgb6iW1ejDihkew1dnx+IJbUJ+OCs8Exntdta9B5+gg557
+Q6egbxQBK3RZ/c+8JHR1ROZ63COQXtAyfTsWwyxcfm7OI0YkNkJ2gNkeMl3spKw4VbGgaC0W
+BGKsdhVd9TfvtssBItS5/bgnIob/3aOFyCmNH33SGCjYDeopPQARAQABtCNMYXVuY2hwYWQg
+UFBBIGZvciBPcGVuU3RhY2sgQ0kgQ29yZYkCOAQTAQIAIgUCVRm0rwIbAwYLCQgHAwIGFQgC
+CQoLBBYCAwECHgECF4AACgkQFbbOfAGNBfUyCA/+OJEojrft6vxgh3iVDlDan1NavVm4D7F1
+mgfRlFwd9BC3trUkaLrNAqHXTi0fWtLeCqD3k0UAekA+0e58AL5EjeGyCadn9TT7oWlaXgiP
+r9OHCaVV/z8DnalQny31PQhfweNOVyOMKh/o7BFaLc3i5KCU+qb/gAcCRC7tLI8Saxf2Czbo
+A6tECr8CHxX9xHlnpspbcw5aAnEfpqd6BTagkkMjJ/+tDhC4pv9USwH3lbBjRlU93miuqoqt
+ooMd++yyAKYd9c8ClRuI33rIAdoAmFfwwqk2prb9fF0BTxvfGdENZ+isOjvYTjzz0cYdBDrx
+fZtl7ruYceC54/6Nt9aKX0ADJBJuiIcNjqgaNCjdBP/p7aCIJzh10GKeDIzitCrK/ikMWcsz
+aqYtctBVQvRxGfF2MSAy/VJny0OhiQI6XVc6eK/9Iu9ZeEAC6GoQRIlarwYit+TGhqgYBKYT
+jWwVlKUZAz7GCIF+wx+NTkUTWVQTnDzTFeBVbzGx3WHQhCqFNayXtKHrdImKfVpQjZZBVo42
+HzKqfGt/kNDM6IKhIuMlqlCUimVZpc3tawb+d8QTTS0IjLrW7dpFfRaZRk82AjQOp96WJL9L
+oDvcEIfKg7RKmcGPBJ2qaquj+PA6yAZL5pX70jigBqjtJ0PZGm7jELb8bB70SVSGsvwHmEz0
+pSs=
+=cc1L
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index d3697c9..9712ae8 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -5,6 +5,7 @@
 # Developers should setup their test systems in a similar way.
 
 # This setup needs to be run as a user that can run sudo.
+TOOLSDIR=$(dirname $0)
 
 # Be sure mysql and zookeeper are started.
 sudo service mysql start
@@ -38,6 +39,10 @@
 
 # TODO(pabelanger): Move this into bindep after we figure out how to enable our
 # PPA.
-sudo add-apt-repository ppa:openstack-ci-core/bubblewrap
+# NOTE(pabelanger): Avoid hitting http://keyserver.ubuntu.com
+sudo apt-key add $TOOLSDIR/018D05F5.gpg
+LSBDISTCODENAME=$(lsb_release -cs)
+echo "deb http://ppa.launchpad.net/openstack-ci-core/bubblewrap/ubuntu $LSBDISTCODENAME main" | \
+    sudo tee /etc/apt/sources.list.d/openstack-ci-core-ubuntu-bubblewrap-xenial.list
 sudo apt-get update
 sudo apt-get --assume-yes install bubblewrap
diff --git a/zuul/ansible/library/command.py b/zuul/ansible/library/command.py
index d27c83e..f701b48 100644
--- a/zuul/ansible/library/command.py
+++ b/zuul/ansible/library/command.py
@@ -409,9 +409,9 @@
                 if t.isAlive():
                     console.addLine("[Zuul] standard output/error still open "
                                     "after child exited")
-                if not ret and fail_json_kwargs:
+                if ret is None and fail_json_kwargs:
                     ret = fail_json_kwargs['rc']
-                elif not ret and not fail_json_kwargs:
+                elif ret is None and not fail_json_kwargs:
                     ret = -1
                 console.addLine("[Zuul] Task exit code: %s\n" % ret)
                 if ret == -1 and not fail_json_kwargs:
diff --git a/zuul/ansible/library/zuul_return.py b/zuul/ansible/library/zuul_return.py
new file mode 100644
index 0000000..9f3332b
--- /dev/null
+++ b/zuul/ansible/library/zuul_return.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+# Copyright (c) 2017 Red Hat
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+import tempfile
+
+
+def set_value(path, new_data, new_file):
+    workdir = os.path.dirname(path)
+    data = None
+    if os.path.exists(path):
+        with open(path, 'r') as f:
+            data = f.read()
+    if data:
+        data = json.loads(data)
+    else:
+        data = {}
+
+    if new_file:
+        with open(new_file, 'r') as f:
+            data.update(json.load(f))
+    if new_data:
+        data.update(new_data)
+
+    (f, tmp_path) = tempfile.mkstemp(dir=workdir)
+    try:
+        f = os.fdopen(f, 'w')
+        json.dump(data, f)
+        f.close()
+        os.rename(tmp_path, path)
+    except Exception:
+        os.unlink(tmp_path)
+        raise
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            path=dict(required=False, type='str'),
+            data=dict(required=False, type='dict'),
+            file=dict(required=False, type='str'),
+        )
+    )
+
+    p = module.params
+    path = p['path']
+    if not path:
+        path = os.path.join(os.environ['ZUUL_JOBDIR'], 'work',
+                            'results.json')
+    set_value(path, p['data'], p['file'])
+    module.exit_json(changed=True, e=os.environ)
+
+from ansible.module_utils.basic import *  # noqa
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+    main()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 735fe38..3c9ecf7 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -1109,13 +1109,20 @@
         untrusted_projects_config = model.UnparsedTenantConfig()
         jobs = []
 
+        # In some cases, we can use cached data, but it's still
+        # important that we process that in the same order along with
+        # any jobs that we run.  This class is used to hold the cached
+        # data and is inserted in the ordered jobs list for later
+        # processing.
+        class CachedDataJob(object):
+            def __init__(self, config_project, project):
+                self.config_project = config_project
+                self.project = project
+
         for project in config_projects:
             # If we have cached data (this is a reconfiguration) use it.
             if cached and project.unparsed_config:
-                TenantParser.log.info(
-                    "Loading previously parsed configuration from %s" %
-                    (project,))
-                config_projects_config.extend(project.unparsed_config)
+                jobs.append(CachedDataJob(True, project))
                 continue
             # Otherwise, prepare an empty unparsed config object to
             # hold cached data later.
@@ -1134,10 +1141,7 @@
         for project in untrusted_projects:
             # If we have cached data (this is a reconfiguration) use it.
             if cached and project.unparsed_config:
-                TenantParser.log.info(
-                    "Loading previously parsed configuration from %s" %
-                    (project,))
-                untrusted_projects_config.extend(project.unparsed_config)
+                jobs.append(CachedDataJob(False, project))
                 continue
             # Otherwise, prepare an empty unparsed config object to
             # hold cached data later.
@@ -1165,8 +1169,21 @@
             # complete in the order they were executed which is the
             # same order they were defined in the main config file.
             # This is important for correct inheritance.
+            if isinstance(job, CachedDataJob):
+                TenantParser.log.info(
+                    "Loading previously parsed configuration from %s" %
+                    (job.project,))
+                if job.config_project:
+                    config_projects_config.extend(
+                        job.project.unparsed_config)
+                else:
+                    untrusted_projects_config.extend(
+                        job.project.unparsed_config)
+                continue
             TenantParser.log.debug("Waiting for cat job %s" % (job,))
             job.wait()
+            TenantParser.log.debug("Cat job %s got files %s" %
+                                   (job, job.files))
             loaded = False
             files = sorted(job.files.keys())
             for conf_root in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
diff --git a/zuul/driver/sql/alembic_reporter/versions/20126015a87d_add_indexes.py b/zuul/driver/sql/alembic_reporter/versions/20126015a87d_add_indexes.py
new file mode 100644
index 0000000..3ac680d
--- /dev/null
+++ b/zuul/driver/sql/alembic_reporter/versions/20126015a87d_add_indexes.py
@@ -0,0 +1,56 @@
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add indexes
+
+Revision ID: 20126015a87d
+Revises: 1dd914d4a482
+Create Date: 2017-07-07 07:17:27.992040
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '20126015a87d'
+down_revision = '1dd914d4a482'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+
+BUILDSET_TABLE = 'zuul_buildset'
+BUILD_TABLE = 'zuul_build'
+
+
+def upgrade():
+    # To allow a dashboard to show a per-project view, optionally filtered
+    # by pipeline.
+    op.create_index(
+        'project_pipeline_idx', BUILDSET_TABLE, ['project', 'pipeline'])
+
+    # To allow a dashboard to show a per-project-change view
+    op.create_index(
+        'project_change_idx', BUILDSET_TABLE, ['project', 'change'])
+
+    # To allow a dashboard to show a per-change view
+    op.create_index('change_idx', BUILDSET_TABLE, ['change'])
+
+    # To allow a dashboard to show a job lib view. buildset_id is included
+    # so that it's a covering index and can satisfy the join back to buildset
+    # without an additional lookup.
+    op.create_index(
+        'job_name_buildset_id_idx', BUILD_TABLE, ['job_name', 'buildset_id'])
+
+
+def downgrade():
+    pass
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index 442d1c5..c36d569 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -107,7 +107,6 @@
 
 class ExecutorClient(object):
     log = logging.getLogger("zuul.ExecutorClient")
-    negative_function_cache_ttl = 5
 
     def __init__(self, config, sched):
         self.config = config
@@ -125,8 +124,6 @@
 
         self.cleanup_thread = GearmanCleanup(self)
         self.cleanup_thread.start()
-        self.function_cache = set()
-        self.function_cache_time = 0
 
     def stop(self):
         self.log.debug("Stopping")
@@ -135,40 +132,6 @@
         self.gearman.shutdown()
         self.log.debug("Stopped")
 
-    def isJobRegistered(self, name):
-        if self.function_cache_time:
-            for connection in self.gearman.active_connections:
-                if connection.connect_time > self.function_cache_time:
-                    self.function_cache = set()
-                    self.function_cache_time = 0
-                    break
-        if name in self.function_cache:
-            self.log.debug("Function %s is registered" % name)
-            return True
-        if ((time.time() - self.function_cache_time) <
-            self.negative_function_cache_ttl):
-            self.log.debug("Function %s is not registered "
-                           "(negative ttl in effect)" % name)
-            return False
-        self.function_cache_time = time.time()
-        for connection in self.gearman.active_connections:
-            try:
-                req = gear.StatusAdminRequest()
-                connection.sendAdminRequest(req, timeout=300)
-            except Exception:
-                self.log.exception("Exception while checking functions")
-                continue
-            for line in req.response.split('\n'):
-                parts = [x.strip() for x in line.split()]
-                if not parts or parts[0] == '.':
-                    continue
-                self.function_cache.add(parts[0])
-        if name in self.function_cache:
-            self.log.debug("Function %s is registered" % name)
-            return True
-        self.log.debug("Function %s is not registered" % name)
-        return False
-
     def execute(self, job, item, pipeline, dependent_items=[],
                 merger_items=[]):
         tenant = pipeline.layout.tenant
@@ -332,7 +295,7 @@
         build.parameters = params
 
         if job.name == 'noop':
-            self.sched.onBuildCompleted(build, 'SUCCESS')
+            self.sched.onBuildCompleted(build, 'SUCCESS', {})
             return build
 
         gearman_job = gear.TextJob('executor:execute', json.dumps(params),
@@ -420,9 +383,10 @@
                     result = 'RETRY_LIMIT'
                 else:
                     build.retry = True
+            result_data = data.get('data', {})
             self.log.info("Build %s complete, result %s" %
                           (job, result))
-            self.sched.onBuildCompleted(build, result)
+            self.sched.onBuildCompleted(build, result, result_data)
             # The test suite expects the build to be removed from the
             # internal dict after it's added to the report queue.
             del self.builds[job.unique]
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 9bf4f8d..818c7e2 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -118,12 +118,12 @@
         env.update(self.env)
         key_path = os.path.expanduser(key_path)
         self.log.debug('Adding SSH Key {}'.format(key_path))
-        output = ''
         try:
-            output = subprocess.check_output(['ssh-add', key_path], env=env,
-                                             stderr=subprocess.PIPE)
-        except subprocess.CalledProcessError:
-            self.log.error('ssh-add failed: {}'.format(output))
+            subprocess.check_output(['ssh-add', key_path], env=env,
+                                    stderr=subprocess.PIPE)
+        except subprocess.CalledProcessError as e:
+            self.log.error('ssh-add failed. stdout: %s, stderr: %s',
+                           e.output, e.stderr)
             raise
         self.log.info('Added SSH Key {}'.format(key_path))
 
@@ -187,6 +187,9 @@
         os.makedirs(self.ansible_root)
         ssh_dir = os.path.join(self.work_root, '.ssh')
         os.mkdir(ssh_dir, 0o700)
+        self.result_data_file = os.path.join(self.work_root, 'results.json')
+        with open(self.result_data_file, 'w'):
+            pass
         self.known_hosts = os.path.join(ssh_dir, 'known_hosts')
         self.inventory = os.path.join(self.ansible_root, 'inventory.yaml')
         self.playbooks = []  # The list of candidate playbooks
@@ -695,16 +698,12 @@
         self.running = False
         self.aborted = False
         self.thread = None
-        self.ssh_agent = None
-
         self.private_key_file = get_default(self.executor_server.config,
                                             'executor', 'private_key_file',
                                             '~/.ssh/id_rsa')
         self.ssh_agent = SshAgent()
 
     def run(self):
-        self.ssh_agent.start()
-        self.ssh_agent.add(self.private_key_file)
         self.running = True
         self.thread = threading.Thread(target=self.execute)
         self.thread.start()
@@ -717,6 +716,8 @@
 
     def execute(self):
         try:
+            self.ssh_agent.start()
+            self.ssh_agent.add(self.private_key_file)
             self.jobdir = JobDir(self.executor_server.jobdir_root,
                                  self.executor_server.keep_jobdir,
                                  str(self.job.unique))
@@ -726,19 +727,20 @@
             self.job.sendWorkException(traceback.format_exc())
         finally:
             self.running = False
-            try:
-                self.jobdir.cleanup()
-            except Exception:
-                self.log.exception("Error cleaning up jobdir:")
-            try:
-                self.executor_server.finishJob(self.job.unique)
-            except Exception:
-                self.log.exception("Error finalizing job thread:")
+            if self.jobdir:
+                try:
+                    self.jobdir.cleanup()
+                except Exception:
+                    self.log.exception("Error cleaning up jobdir:")
             if self.ssh_agent:
                 try:
                     self.ssh_agent.stop()
                 except Exception:
                     self.log.exception("Error stopping SSH agent:")
+            try:
+                self.executor_server.finishJob(self.job.unique)
+            except Exception:
+                self.log.exception("Error finalizing job thread:")
 
     def _execute(self):
         args = json.loads(self.job.arguments)
@@ -836,12 +838,22 @@
         self.job.sendWorkStatus(0, 100)
 
         result = self.runPlaybooks(args)
+        data = self.getResultData()
+        result_data = json.dumps(dict(result=result,
+                                      data=data))
+        self.log.debug("Sending result: %s" % (result_data,))
+        self.job.sendWorkComplete(result_data)
 
-        if result is None:
-            self.job.sendWorkFail()
-            return
-        result = dict(result=result)
-        self.job.sendWorkComplete(json.dumps(result))
+    def getResultData(self):
+        data = {}
+        try:
+            with open(self.jobdir.result_data_file) as f:
+                file_data = f.read()
+                if file_data:
+                    data = json.loads(file_data)
+        except Exception:
+            self.log.exception("Unable to load result data:")
+        return data
 
     def doMergeChanges(self, merger, items, repo_state):
         ret = merger.mergeChanges(items, repo_state=repo_state)
@@ -884,10 +896,10 @@
         result = None
 
         pre_failed = False
-        for playbook in self.jobdir.pre_playbooks:
+        for count, playbook in enumerate(self.jobdir.pre_playbooks):
             # TODOv3(pabelanger): Implement pre-run timeout setting.
             pre_status, pre_code = self.runAnsiblePlaybook(
-                playbook, args['timeout'])
+                playbook, args['timeout'], phase='pre', count=count)
             if pre_status != self.RESULT_NORMAL or pre_code != 0:
                 # These should really never fail, so return None and have
                 # zuul try again
@@ -897,7 +909,7 @@
 
         if not pre_failed:
             job_status, job_code = self.runAnsiblePlaybook(
-                self.jobdir.playbook, args['timeout'])
+                self.jobdir.playbook, args['timeout'], phase='run')
             if job_status == self.RESULT_TIMED_OUT:
                 return 'TIMED_OUT'
             if job_status == self.RESULT_ABORTED:
@@ -913,10 +925,10 @@
             else:
                 result = 'FAILURE'
 
-        for playbook in self.jobdir.post_playbooks:
+        for count, playbook in enumerate(self.jobdir.post_playbooks):
             # TODOv3(pabelanger): Implement post-run timeout setting.
             post_status, post_code = self.runAnsiblePlaybook(
-                playbook, args['timeout'], success)
+                playbook, args['timeout'], success, phase='post', count=count)
             if post_status != self.RESULT_NORMAL or post_code != 0:
                 # If we encountered a pre-failure, that takes
                 # precedence over the post result.
@@ -1084,11 +1096,8 @@
                 for entry in os.listdir(d):
                     self._blockPluginDirs(os.path.join(d, entry))
             return d
-        # We assume the repository itself is a collection of roles
-        if not trusted:
-            for entry in os.listdir(path):
-                self._blockPluginDirs(os.path.join(path, entry))
-        return path
+        # It is neither a bare role, nor a collection of roles
+        raise Exception("Unable to find role in %s" % (path,))
 
     def prepareZuulRole(self, args, role, root, trusted, untrusted):
         self.log.debug("Prepare zuul role for %s" % (role,))
@@ -1189,7 +1198,8 @@
         all_vars['zuul']['executor'] = dict(
             hostname=self.executor_server.hostname,
             src_root=self.jobdir.src_root,
-            log_root=self.jobdir.log_root)
+            log_root=self.jobdir.log_root,
+            result_data_file=self.jobdir.result_data_file)
 
         nodes = self.getHostList(args)
         inventory = make_inventory_dict(nodes, args['groups'], all_vars)
@@ -1281,6 +1291,7 @@
         env_copy.update(self.ssh_agent.env)
         env_copy['LOGNAME'] = 'zuul'
         env_copy['ZUUL_JOB_OUTPUT_FILE'] = self.jobdir.job_output_file
+        env_copy['ZUUL_JOBDIR'] = self.jobdir.root
         pythonpath = env_copy.get('PYTHONPATH')
         if pythonpath:
             pythonpath = [pythonpath]
@@ -1376,7 +1387,8 @@
 
         return (self.RESULT_NORMAL, ret)
 
-    def runAnsiblePlaybook(self, playbook, timeout, success=None):
+    def runAnsiblePlaybook(self, playbook, timeout, success=None,
+                           phase=None, count=None):
         env_copy = os.environ.copy()
         env_copy['LOGNAME'] = 'zuul'
 
@@ -1390,6 +1402,12 @@
         if success is not None:
             cmd.extend(['-e', 'success=%s' % str(bool(success))])
 
+        if phase:
+            cmd.extend(['-e', 'zuul_execution_phase=%s' % phase])
+
+        if count is not None:
+            cmd.extend(['-e', 'zuul_execution_phase_count=%s' % count])
+
         result, code = self.runAnsible(
             cmd=cmd, timeout=timeout, trusted=playbook.trusted)
         self.log.debug("Ansible complete, result %s code %s" % (
diff --git a/zuul/model.py b/zuul/model.py
index 4744bbe..ffbb70c 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -1077,6 +1077,7 @@
         self.uuid = uuid
         self.url = None
         self.result = None
+        self.result_data = {}
         self.build_set = None
         self.execute_time = time.time()
         self.start_time = None
@@ -1095,7 +1096,9 @@
                 (self.uuid, self.job.name, self.worker))
 
     def getSafeAttributes(self):
-        return Attributes(uuid=self.uuid)
+        return Attributes(uuid=self.uuid,
+                          result=self.result,
+                          result_data=self.result_data)
 
 
 class Worker(object):
@@ -1627,6 +1630,8 @@
         if pattern:
             url = self.formatUrlPattern(pattern, job, build)
         if not url:
+            url = build.result_data.get('zuul', {}).get('log_url')
+        if not url:
             url = build.url or job.name
         return (result, url)
 
@@ -1693,6 +1698,7 @@
 
             ret['jobs'].append({
                 'name': job.name,
+                'dependencies': list(job.dependencies),
                 'elapsed_time': elapsed,
                 'remaining_time': remaining,
                 'url': build_url,
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index fe6a673..e5e7f87 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -273,10 +273,11 @@
         self.wake_event.set()
         self.log.debug("Done adding start event for build: %s" % build)
 
-    def onBuildCompleted(self, build, result):
+    def onBuildCompleted(self, build, result, result_data):
         self.log.debug("Adding complete event for build: %s result: %s" % (
             build, result))
         build.end_time = time.time()
+        build.result_data = result_data
         # Note, as soon as the result is set, other threads may act
         # upon this, even though the event hasn't been fully
         # processed.  Ensure that any other data from the event (eg,
@@ -779,6 +780,7 @@
                 self.log.error("Unable to handle event %s" % event)
             event.done()
         except Exception:
+            self.log.exception("Exception in management event:")
             event.exception(sys.exc_info())
         self.management_event_queue.task_done()
 
diff --git a/zuul/web.py b/zuul/web.py
index 2ef65fe..ab16e11 100644
--- a/zuul/web.py
+++ b/zuul/web.py
@@ -138,6 +138,9 @@
                     break
                 elif msg.type == aiohttp.WSMsgType.CLOSED:
                     break
+        except asyncio.CancelledError:
+            self.log.debug("Websocket request handling cancelled")
+            pass
         except Exception as e:
             self.log.exception("Websocket exception:")
             await ws.close(code=4009, message=str(e).encode('utf-8'))
@@ -228,5 +231,6 @@
     logging.basicConfig(level=logging.DEBUG)
     loop = asyncio.get_event_loop()
     loop.set_debug(True)
-    z = ZuulWeb()
+    z = ZuulWeb(listen_address="127.0.0.1", listen_port=9000,
+                gear_server="127.0.0.1", gear_port=4730)
     z.run(loop)