Merge "Don't decode str in LogStreamingHandler"
diff --git a/.zuul.yaml b/.zuul.yaml
index d73be8f..caef296 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -25,8 +25,9 @@
     required-projects:
       - openstack/ara
     files:
-      - zuul/ansible/callback/.*
+      - zuul/ansible/.*
       - playbooks/zuul-stream/.*
+      - requirements.txt
 
 - project:
     check:
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 48e7ba8..5bcd2a2 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -25,7 +25,7 @@
 ------
 
 A tenant is a collection of projects which share a Zuul
-configuration.  An example tenant definition is:
+configuration. Some examples of tenant definitions are:
 
 .. code-block:: yaml
 
@@ -46,6 +46,27 @@
              - project2:
                  exclude-unprotected-branches: true
 
+.. code-block:: yaml
+
+   - tenant:
+       name: my-tenant
+       source:
+         gerrit:
+           config-projects:
+             - common-config
+           untrusted-projects:
+             - exclude:
+                 - job
+                 - semaphore
+                 - project
+                 - project-template
+                 - nodeset
+                 - secret
+               projects:
+                 - project1
+                 - project2:
+                     exclude-unprotected-branches: true
+
 .. attr:: tenant
 
    The following attributes are supported:
@@ -157,6 +178,24 @@
             processed. Defaults to the tenant wide setting of
             exclude-unprotected-branches.
 
+      .. attr:: <project-group>
+
+         The items in the list are dictionaries with the following
+         attributes. A **configuration items** definition is applied
+         to the list of projects.
+
+         .. attr:: include
+
+            A list of **configuration items** that should be loaded.
+
+         .. attr:: exclude
+
+            A list of **configuration items** that should not be loaded.
+
+         .. attr:: projects
+
+            A list of **project** items.
+
    .. attr:: max-nodes-per-job
       :default: 5
 
diff --git a/requirements.txt b/requirements.txt
index f24f195..7057c5a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -25,6 +25,6 @@
 cachecontrol
 pyjwt
 iso8601
-aiohttp
+aiohttp<3.0.0
 uvloop;python_version>='3.5'
 psutil
diff --git a/tools/encrypt_secret.py b/tools/encrypt_secret.py
index 4cb1666..45ad68c 100755
--- a/tools/encrypt_secret.py
+++ b/tools/encrypt_secret.py
@@ -26,9 +26,11 @@
 try:
     from urllib.request import Request
     from urllib.request import urlopen
+    from urllib.parse import urlparse
 except ImportError:
     from urllib2 import Request
     from urllib2 import urlopen
+    from urlparse import urlparse
 
 DESCRIPTION = """Encrypt a secret for Zuul.
 
@@ -43,7 +45,6 @@
     parser.add_argument('url',
                         help="The base URL of the zuul server and tenant.  "
                         "E.g., https://zuul.example.com/tenant-name")
-    # TODO(jeblair): Throw a fit if SSL is not used.
     parser.add_argument('project',
                         help="The name of the project.")
     parser.add_argument('--strip', action='store_true', default=False,
@@ -60,6 +61,15 @@
                         "to standard output.")
     args = parser.parse_args()
 
+    # We should not use unencrypted connections for retrieving the public key.
+    # Otherwise our secret can be compromised. The schemes file and https are
+    # considered safe.
+    url = urlparse(args.url)
+    if url.scheme not in ('file', 'https'):
+        sys.stderr.write("WARNING: Retrieving encryption key via an "
+                         "unencrypted connection. Your secret may get "
+                         "compromised.\n")
+
     req = Request("%s/%s.pub" % (args.url.rstrip('/'), args.project))
     pubkey = urlopen(req)
 
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index 68c9000..a3a53cf 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -159,6 +159,7 @@
             self.log.exception("Error starting Zuul:")
             # TODO(jeblair): If we had all threads marked as daemon,
             # we might be able to have a nicer way of exiting here.
+            self.sched.stop()
             sys.exit(1)
 
         signal.signal(signal.SIGHUP, self.reconfigure_handler)
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 3e16304..53ef173 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -777,7 +777,16 @@
         return data
 
     def doMergeChanges(self, merger, items, repo_state):
-        ret = merger.mergeChanges(items, repo_state=repo_state)
+        try:
+            ret = merger.mergeChanges(items, repo_state=repo_state)
+        except ValueError as e:
+            # Return ABORTED so that we'll try again. At this point all of
+            # the refs we're trying to merge should be valid refs. If we
+            # can't fetch them, it should resolve itself.
+            self.log.exception("Could not fetch refs to merge from remote")
+            result = dict(result='ABORTED')
+            self.job.sendWorkComplete(json.dumps(result))
+            return False
         if not ret:  # merge conflict
             result = dict(result='MERGER_FAILURE')
             if self.executor_server.statsd:
@@ -844,6 +853,13 @@
         repo.checkout(selected_ref)
         return selected_ref
 
+    def getAnsibleTimeout(self, start, timeout):
+        if timeout is not None:
+            now = time.time()
+            elapsed = now - start
+            timeout = timeout - elapsed
+        return timeout
+
     def runPlaybooks(self, args):
         result = None
 
@@ -861,10 +877,15 @@
         pre_failed = False
         success = False
         self.started = True
+        time_started = time.time()
+        # timeout value is total job timeout or put another way
+        # the cummulative time that pre, run, and post can consume.
+        job_timeout = args['timeout']
         for index, playbook in enumerate(self.jobdir.pre_playbooks):
             # TODOv3(pabelanger): Implement pre-run timeout setting.
+            ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
             pre_status, pre_code = self.runAnsiblePlaybook(
-                playbook, args['timeout'], phase='pre', index=index)
+                playbook, ansible_timeout, phase='pre', index=index)
             if pre_status != self.RESULT_NORMAL or pre_code != 0:
                 # These should really never fail, so return None and have
                 # zuul try again
@@ -872,8 +893,9 @@
                 break
 
         if not pre_failed:
+            ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
             job_status, job_code = self.runAnsiblePlaybook(
-                self.jobdir.playbook, args['timeout'], phase='run')
+                self.jobdir.playbook, ansible_timeout, phase='run')
             if job_status == self.RESULT_ABORTED:
                 return 'ABORTED'
             elif job_status == self.RESULT_TIMED_OUT:
@@ -894,8 +916,9 @@
 
         for index, playbook in enumerate(self.jobdir.post_playbooks):
             # TODOv3(pabelanger): Implement post-run timeout setting.
+            ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
             post_status, post_code = self.runAnsiblePlaybook(
-                playbook, args['timeout'], success, phase='post', index=index)
+                playbook, ansible_timeout, success, phase='post', index=index)
             if post_status == self.RESULT_ABORTED:
                 return 'ABORTED'
             if post_status != self.RESULT_NORMAL or post_code != 0:
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 2bce43f..c06497d 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -231,6 +231,7 @@
         self.statsd = get_statsd(config)
         self.rpc = rpclistener.RPCListener(config, self)
         self.stats_thread = threading.Thread(target=self.runStats)
+        self.stats_thread.daemon = True
         self.stats_stop = threading.Event()
         # TODO(jeblair): fix this
         # Despite triggers being part of the pipeline, there is one trigger set