Merge "Use ssh for git-upload-pack" into feature/zuulv3
diff --git a/doc/source/reporters.rst b/doc/source/reporters.rst
index e3ab947..dd053fa 100644
--- a/doc/source/reporters.rst
+++ b/doc/source/reporters.rst
@@ -44,6 +44,10 @@
set as the commit status on github.
``status: 'success'``
+ **status-url**
+ String value for a link url to set in the github status. Defaults to the zuul
+ server status_url, or the empty string if that is unset.
+
**comment**
Boolean value (``true`` or ``false``) that determines if the reporter should
add a comment to the pipeline status to the github pull request. Defaults
diff --git a/doc/source/triggers.rst b/doc/source/triggers.rst
index f73ad2f..41a56a0 100644
--- a/doc/source/triggers.rst
+++ b/doc/source/triggers.rst
@@ -133,6 +133,8 @@
*push* - head reference updated (pushed to branch)
+ *status* - status set on commit
+
A ``pull_request_review`` event will
have associated action(s) to trigger from. The supported actions are:
@@ -165,6 +167,12 @@
strings each of which is matched to the review state, which can be one of
``approved``, ``comment``, or ``request_changes``.
+ **status**
+ This is only used for ``status`` actions. It accepts a list of strings each of
+ which matches the user setting the status, the status context, and the status
+ itself in the format of ``user:context:status``. For example,
+ ``zuul_github_ci_bot:check_pipeline:success``.
+
**ref**
This is only used for ``push`` events. This field is treated as a regular
expression and multiple refs may be listed. Github always sends full ref
diff --git a/doc/source/zuul.rst b/doc/source/zuul.rst
index 56cc6a8..a7dfb44 100644
--- a/doc/source/zuul.rst
+++ b/doc/source/zuul.rst
@@ -108,6 +108,10 @@
commands.
``state_dir=/var/lib/zuul``
+**jobroot_dir**
+ Path to directory that Zuul should store temporary job files.
+ ``jobroot_dir=/tmp``
+
**report_times**
Boolean value (``true`` or ``false``) that determines if Zuul should
include elapsed times for each job in the textual report. Used by
@@ -165,6 +169,33 @@
Path to PID lock file for the merger process.
``pidfile=/var/run/zuul-merger/merger.pid``
+executor
+""""""""
+
+The zuul-executor process configuration.
+
+**finger_port**
+ Port to use for finger log streamer.
+ ``finger_port=79``
+
+**git_dir**
+ Directory that Zuul should clone local git repositories to.
+ ``git_dir=/var/lib/zuul/git``
+
+**log_config**
+ Path to log config file for the executor process.
+ ``log_config=/etc/zuul/logging.yaml``
+
+**private_key_file**
+ SSH private key file to be used when logging into worker nodes.
+ ``private_key_file=~/.ssh/id_rsa``
+
+**user**
+ User ID for the zuul-executor process. In normal operation as a daemon,
+ the executor should be started as the ``root`` user, but it will drop
+ privileges to this user during startup.
+ ``user=zuul``
+
.. _connection:
connection ArbitraryName
diff --git a/etc/status/public_html/index.html b/etc/status/public_html/index.html
index ca5bb56..cc3d40a 100644
--- a/etc/status/public_html/index.html
+++ b/etc/status/public_html/index.html
@@ -30,8 +30,7 @@
<script src="jquery.zuul.js"></script>
<script src="zuul.app.js"></script>
<script>
- // @license magnet:?xt=urn:btih:8e4f440f4c65981c5bf93c76d35135ba5064d8b7&dn=apache-2.0.txt
-Apache 2.0
+ // @license magnet:?xt=urn:btih:8e4f440f4c65981c5bf93c76d35135ba5064d8b7&dn=apache-2.0.txt Apache 2.0
zuul_build_dom(jQuery, '#zuul_container');
zuul_start(jQuery);
// @license-end
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index c7e23b2..aec7a46 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -279,7 +279,16 @@
var $change_link = $('<small />');
if (change.url !== null) {
- if (/^[0-9a-f]{40}$/.test(change.id)) {
+ var github_id = change.id.match(/^([0-9]+),([0-9a-f]{40})$/);
+ if (github_id) {
+ $change_link.append(
+ $('<a />').attr('href', change.url).append(
+ $('<abbr />')
+ .attr('title', change.id)
+ .text('#' + github_id[1])
+ )
+ );
+ } else if (/^[0-9a-f]{40}$/.test(change.id)) {
var change_id_short = change.id.slice(0, 7);
$change_link.append(
$('<a />').attr('href', change.url).append(
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index bf19895..1065cec 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -18,6 +18,9 @@
;git_user_name=zuul
zuul_url=http://zuul.example.com/p
+[executor]
+default_username=zuul
+
[webapp]
listen_address=0.0.0.0
port=8001
diff --git a/requirements.txt b/requirements.txt
index 9f20458..746bbcb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,8 @@
pbr>=1.1.0
-Github3.py==1.0.0a2
+# pull from master until https://github.com/sigmavirus24/github3.py/pull/671
+# is in a release
+-e git+https://github.com/sigmavirus24/github3.py.git@develop#egg=Github3.py
PyYAML>=3.1.0
Paste
WebOb>=1.2.3
@@ -21,3 +23,6 @@
sqlalchemy
alembic
cryptography>=1.6
+cachecontrol
+pyjwt
+iso8601
diff --git a/setup.cfg b/setup.cfg
index 9ee64f3..5ae0903 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -26,6 +26,7 @@
zuul = zuul.cmd.client:main
zuul-cloner = zuul.cmd.cloner:main
zuul-executor = zuul.cmd.executor:main
+ zuul-bwrap = zuul.driver.bubblewrap:main
[build_sphinx]
source-dir = doc/source
diff --git a/tests/base.py b/tests/base.py
index c3b8a9b..d8f88b7 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -16,6 +16,7 @@
# under the License.
from six.moves import configparser as ConfigParser
+import datetime
import gc
import hashlib
import json
@@ -555,7 +556,8 @@
class FakeGithubPullRequest(object):
def __init__(self, github, number, project, branch,
- subject, upstream_root, files=[], number_of_commits=1):
+ subject, upstream_root, files=[], number_of_commits=1,
+ writers=[]):
"""Creates a new PR with several commits.
Sends an event about opened PR."""
self.github = github
@@ -570,10 +572,13 @@
self.comments = []
self.labels = []
self.statuses = {}
+ self.reviews = []
+ self.writers = []
self.updated_at = None
self.head_sha = None
self.is_merged = False
self.merge_message = None
+ self.state = 'open'
self._createPRRef()
self._addCommitToRepo(files=files)
self._updateTimeStamp()
@@ -600,6 +605,21 @@
def getPullRequestClosedEvent(self):
return self._getPullRequestEvent('closed')
+ def getPushEvent(self, old_sha, ref='refs/heads/master'):
+ name = 'push'
+ data = {
+ 'ref': ref,
+ 'before': old_sha,
+ 'after': self.head_sha,
+ 'repository': {
+ 'full_name': self.project
+ },
+ 'sender': {
+ 'login': 'ghuser'
+ }
+ }
+ return (name, data)
+
def addComment(self, message):
self.comments.append(message)
self._updateTimeStamp()
@@ -771,10 +791,9 @@
repo = self._getRepo()
return repo.references[self._getPRReference()].commit.hexsha
- def setStatus(self, sha, state, url, description, context):
+ def setStatus(self, sha, state, url, description, context, user='zuul'):
# Since we're bypassing github API, which would require a user, we
# hard set the user as 'zuul' here.
- user = 'zuul'
# insert the status at the top of the list, to simulate that it
# is the most recent set status
self.statuses[sha].insert(0, ({
@@ -787,6 +806,40 @@
}
}))
+ def addReview(self, user, state, granted_on=None):
+ gh_time_format = '%Y-%m-%dT%H:%M:%SZ'
+ # convert the timestamp to a str format that would be returned
+ # from github as 'submitted_at' in the API response
+
+ if granted_on:
+ granted_on = datetime.datetime.utcfromtimestamp(granted_on)
+ submitted_at = time.strftime(
+ gh_time_format, granted_on.timetuple())
+ else:
+ # github timestamps only down to the second, so we need to make
+ # sure reviews that tests add appear to be added over a period of
+ # time in the past and not all at once.
+ if not self.reviews:
+ # the first review happens 10 mins ago
+ offset = 600
+ else:
+ # subsequent reviews happen 1 minute closer to now
+ offset = 600 - (len(self.reviews) * 60)
+
+ granted_on = datetime.datetime.utcfromtimestamp(
+ time.time() - offset)
+ submitted_at = time.strftime(
+ gh_time_format, granted_on.timetuple())
+
+ self.reviews.append({
+ 'state': state,
+ 'user': {
+ 'login': user,
+ 'email': user + "@derp.com",
+ },
+ 'submitted_at': submitted_at,
+ })
+
def _getPRReference(self):
return '%s/head' % self.number
@@ -818,6 +871,21 @@
}
return (name, data)
+ def getCommitStatusEvent(self, context, state='success', user='zuul'):
+ name = 'status'
+ data = {
+ 'state': state,
+ 'sha': self.head_sha,
+ 'description': 'Test results for %s: %s' % (self.head_sha, state),
+ 'target_url': 'http://zuul/%s' % self.head_sha,
+ 'branches': [],
+ 'context': context,
+ 'sender': {
+ 'login': user
+ }
+ }
+ return (name, data)
+
class FakeGithubConnection(githubconnection.GithubConnection):
log = logging.getLogger("zuul.test.FakeGithubConnection")
@@ -882,6 +950,7 @@
'ref': pr.branch,
},
'mergeable': True,
+ 'state': pr.state,
'head': {
'sha': pr.head_sha,
'repo': {
@@ -891,10 +960,21 @@
}
return data
+ def getPullBySha(self, sha):
+ prs = list(set([p for p in self.pull_requests if sha == p.head_sha]))
+ if len(prs) > 1:
+ raise Exception('Multiple pulls found with head sha: %s' % sha)
+ pr = prs[0]
+ return self.getPull(pr.project, pr.number)
+
def getPullFileNames(self, project, number):
pr = self.pull_requests[number - 1]
return pr.files
+ def _getPullReviews(self, owner, project, number):
+ pr = self.pull_requests[number - 1]
+ return pr.reviews
+
def getUser(self, login):
data = {
'username': login,
@@ -903,6 +983,16 @@
}
return data
+ def getRepoPermission(self, project, login):
+ owner, proj = project.split('/')
+ for pr in self.pull_requests:
+ pr_owner, pr_project = pr.project.split('/')
+ if (pr_owner == owner and proj == pr_project):
+ if login in pr.writers:
+ return 'write'
+ else:
+ return 'read'
+
def getGitUrl(self, project):
return os.path.join(self.upstream_root, str(project))
@@ -934,8 +1024,12 @@
owner, proj = project.split('/')
for pr in self.pull_requests:
pr_owner, pr_project = pr.project.split('/')
+ # This is somewhat risky, if the same commit exists in multiple
+ # PRs, we might grab the wrong one that doesn't have a status
+ # that is expected to be there. Maybe re-work this so that there
+ # is a global registry of commit statuses like with github.
if (pr_owner == owner and pr_project == proj and
- pr.head_sha == sha):
+ sha in pr.statuses):
return pr.statuses[sha]
def setCommitStatus(self, project, sha, state,
@@ -1118,6 +1212,25 @@
self.log.debug(" OK")
return True
+ def getWorkspaceRepos(self, projects):
+ """Return workspace git repo objects for the listed projects
+
+ :arg list projects: A list of strings, each the canonical name
+ of a project.
+
+ :returns: A dictionary of {name: repo} for every listed
+ project.
+ :rtype: dict
+
+ """
+
+ repos = {}
+ for project in projects:
+ path = os.path.join(self.jobdir.src_root, project)
+ repo = git.Repo(path)
+ repos[project] = repo
+ return repos
+
class RecordingExecutorServer(zuul.executor.server.ExecutorServer):
"""An Ansible executor to be used in tests.
@@ -1204,9 +1317,10 @@
class RecordingAnsibleJob(zuul.executor.server.AnsibleJob):
- def doMergeChanges(self, items):
+ def doMergeChanges(self, merger, items, repo_state):
# Get a merger in order to update the repos involved in this job.
- commit = super(RecordingAnsibleJob, self).doMergeChanges(items)
+ commit = super(RecordingAnsibleJob, self).doMergeChanges(
+ merger, items, repo_state)
if not commit: # merge conflict
self.recordResult('MERGER_FAILURE')
return commit
@@ -1302,9 +1416,9 @@
len(self.low_queue))
self.log.debug("releasing queued job %s (%s)" % (regex, qlen))
for job in self.getQueue():
- if job.name != 'executor:execute':
+ if job.name != b'executor:execute':
continue
- parameters = json.loads(job.arguments)
+ parameters = json.loads(job.arguments.decode('utf8'))
if not regex or re.match(regex, parameters.get('job')):
self.log.debug("releasing queued job %s" %
job.unique)
@@ -1755,12 +1869,20 @@
# Make per test copy of Configuration.
self.setup_config()
+ self.private_key_file = os.path.join(self.test_root, 'test_id_rsa')
+ if not os.path.exists(self.private_key_file):
+ src_private_key_file = os.path.join(FIXTURE_DIR, 'test_id_rsa')
+ shutil.copy(src_private_key_file, self.private_key_file)
+ shutil.copy('{}.pub'.format(src_private_key_file),
+ '{}.pub'.format(self.private_key_file))
+ os.chmod(self.private_key_file, 0o0600)
self.config.set('zuul', 'tenant_config',
os.path.join(FIXTURE_DIR,
self.config.get('zuul', 'tenant_config')))
self.config.set('merger', 'git_dir', self.merger_src_root)
self.config.set('executor', 'git_dir', self.executor_src_root)
self.config.set('zuul', 'state_dir', self.state_root)
+ self.config.set('executor', 'private_key_file', self.private_key_file)
self.statsd = FakeStatsd()
# note, use 127.0.0.1 rather than localhost to avoid getting ipv6
@@ -1838,7 +1960,7 @@
self.sched.reconfigure(self.config)
self.sched.resume()
- def configure_connections(self):
+ def configure_connections(self, source_only=False):
# Set up gerrit related fakes
# Set a changes database so multiple FakeGerrit's can report back to
# a virtual canonical database given by the configured hostname
@@ -1881,7 +2003,7 @@
# Register connections from the config using fakes
self.connections = zuul.lib.connections.ConnectionRegistry()
- self.connections.configure(self.config)
+ self.connections.configure(self.config, source_only=source_only)
def setup_config(self):
# This creates the per-test configuration object. It can be
@@ -2070,6 +2192,8 @@
def shutdown(self):
self.log.debug("Shutting down after tests")
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
self.executor_client.stop()
self.merge_client.stop()
self.executor_server.stop()
@@ -2085,10 +2209,17 @@
self.fake_nodepool.stop()
self.zk.disconnect()
self.printHistory()
- # we whitelist watchdog threads as they have relatively long delays
+ # We whitelist watchdog threads as they have relatively long delays
# before noticing they should exit, but they should exit on their own.
+ # Further the pydevd threads also need to be whitelisted so debugging
+ # e.g. in PyCharm is possible without breaking shutdown.
+ whitelist = ['executor-watchdog',
+ 'pydevd.CommandThread',
+ 'pydevd.Reader',
+ 'pydevd.Writer',
+ ]
threads = [t for t in threading.enumerate()
- if t.name != 'executor-watchdog']
+ if t.name not in whitelist]
if len(threads) > 1:
log_str = ""
for thread_id, stack_frame in sys._current_frames().items():
@@ -2189,7 +2320,8 @@
# It hasn't been reported yet.
return False
# Make sure that none of the worker connections are in GRAB_WAIT
- for connection in self.executor_server.worker.active_connections:
+ worker = self.executor_server.executor_worker
+ for connection in worker.active_connections:
if connection.state == 'GRAB_WAIT':
return False
return True
@@ -2308,6 +2440,12 @@
jobs = filter(lambda x: x.result == result, jobs)
return len(list(jobs))
+ def getBuildByName(self, name):
+ for build in self.builds:
+ if build.name == name:
+ return build
+ raise Exception("Unable to find build %s" % name)
+
def getJobFromHistory(self, name, project=None):
for job in self.history:
if (job.name == name and
@@ -2541,6 +2679,29 @@
specified_conn.server == conn.server):
conn.addEvent(event)
+ def getUpstreamRepos(self, projects):
+ """Return upstream git repo objects for the listed projects
+
+ :arg list projects: A list of strings, each the canonical name
+ of a project.
+
+ :returns: A dictionary of {name: repo} for every listed
+ project.
+ :rtype: dict
+
+ """
+
+ repos = {}
+ for project in projects:
+ # FIXME(jeblair): the upstream root does not yet have a
+ # hostname component; that needs to be added, and this
+ # line removed:
+ tmp_project_name = '/'.join(project.split('/')[1:])
+ path = os.path.join(self.upstream_root, tmp_project_name)
+ repo = git.Repo(path)
+ repos[project] = repo
+ return repos
+
class AnsibleZuulTestCase(ZuulTestCase):
"""ZuulTestCase but with an actual ansible executor running"""
diff --git a/tests/encrypt_secret.py b/tests/encrypt_secret.py
index b8524a0..0b0cf19 100644
--- a/tests/encrypt_secret.py
+++ b/tests/encrypt_secret.py
@@ -30,5 +30,6 @@
ciphertext = encryption.encrypt_pkcs1_oaep(sys.argv[1], public_key)
print(ciphertext.encode('base64'))
+
if __name__ == '__main__':
main()
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
new file mode 100644
index 0000000..d528be1
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
@@ -0,0 +1,12 @@
+- hosts: all
+ tasks:
+ - name: Register hello-world.txt file.
+ stat:
+ path: "{{zuul.executor.log_root}}/hello-world.txt"
+ register: st
+
+ - name: Assert hello-world.txt file.
+ assert:
+ that:
+ - st.stat.exists
+ - st.stat.isreg
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index f9be158..02b87bd 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -72,3 +72,7 @@
nodes:
- name: ubuntu-xenial
image: ubuntu-xenial
+
+- job:
+ name: hello
+ post-run: hello-post
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index a2d9c6f..ca734c5 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -2,6 +2,10 @@
parent: python27
name: faillocal
+- job:
+ parent: hello
+ name: hello-world
+
- project:
name: org/project
check:
@@ -10,3 +14,4 @@
- faillocal
- check-vars
- timeout
+ - hello-world
diff --git a/tests/fixtures/config/ansible/git/org_project/playbooks/hello-world.yaml b/tests/fixtures/config/ansible/git/org_project/playbooks/hello-world.yaml
new file mode 100644
index 0000000..373de02
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/org_project/playbooks/hello-world.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ tasks:
+ - copy:
+ content: "hello world"
+ dest: "{{zuul.executor.log_root}}/hello-world.txt"
diff --git a/tests/fixtures/config/inventory/git/common-config/playbooks/group-inventory.yaml b/tests/fixtures/config/inventory/git/common-config/playbooks/group-inventory.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/inventory/git/common-config/playbooks/group-inventory.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/inventory/git/common-config/playbooks/single-inventory.yaml b/tests/fixtures/config/inventory/git/common-config/playbooks/single-inventory.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/inventory/git/common-config/playbooks/single-inventory.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/inventory/git/common-config/zuul.yaml b/tests/fixtures/config/inventory/git/common-config/zuul.yaml
new file mode 100644
index 0000000..184bd80
--- /dev/null
+++ b/tests/fixtures/config/inventory/git/common-config/zuul.yaml
@@ -0,0 +1,42 @@
+- pipeline:
+ name: check
+ manager: independent
+ allow-secrets: true
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- nodeset:
+ name: nodeset1
+ nodes:
+ - name: controller
+ image: controller-image
+ - name: compute1
+ image: compute-image
+ - name: compute2
+ image: compute-image
+ groups:
+ - name: ceph-osd
+ nodes:
+ - controller
+ - name: ceph-monitor
+ nodes:
+ - controller
+ - compute1
+ - compute2
+
+- job:
+ name: single-inventory
+ nodes:
+ - name: ubuntu-xenial
+ image: ubuntu-xenial
+
+- job:
+ name: group-inventory
+ nodes: nodeset1
diff --git a/tests/fixtures/config/inventory/git/org_project/.zuul.yaml b/tests/fixtures/config/inventory/git/org_project/.zuul.yaml
new file mode 100644
index 0000000..26310a0
--- /dev/null
+++ b/tests/fixtures/config/inventory/git/org_project/.zuul.yaml
@@ -0,0 +1,6 @@
+- project:
+ name: org/project
+ check:
+ jobs:
+ - single-inventory
+ - group-inventory
diff --git a/tests/fixtures/config/inventory/git/org_project/README b/tests/fixtures/config/inventory/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/inventory/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/inventory/main.yaml b/tests/fixtures/config/inventory/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/inventory/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
diff --git a/tests/fixtures/config/openstack/git/project-config/zuul.yaml b/tests/fixtures/config/openstack/git/project-config/zuul.yaml
index 5d0c774..aff2046 100644
--- a/tests/fixtures/config/openstack/git/project-config/zuul.yaml
+++ b/tests/fixtures/config/openstack/git/project-config/zuul.yaml
@@ -66,7 +66,7 @@
- job:
name: dsvm
parent: base
- repos:
+ required-projects:
- openstack/keystone
- openstack/nova
diff --git a/tests/fixtures/config/push-reqs/git/common-config/playbooks/job1.yaml b/tests/fixtures/config/push-reqs/git/common-config/playbooks/job1.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/push-reqs/git/common-config/playbooks/job1.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/push-reqs/git/common-config/zuul.yaml b/tests/fixtures/config/push-reqs/git/common-config/zuul.yaml
new file mode 100644
index 0000000..6569966
--- /dev/null
+++ b/tests/fixtures/config/push-reqs/git/common-config/zuul.yaml
@@ -0,0 +1,119 @@
+- pipeline:
+ name: current
+ manager: independent
+ require:
+ github:
+ current-patchset: true
+ gerrit:
+ current-patchset: true
+ trigger:
+ github:
+ - event: push
+ gerrit:
+ - event: ref-updated
+
+- pipeline:
+ name: open
+ manager: independent
+ require:
+ github:
+ open: true
+ gerrit:
+ open: true
+ trigger:
+ github:
+ - event: push
+ gerrit:
+ - event: ref-updated
+
+- pipeline:
+ name: review
+ manager: independent
+ require:
+ github:
+ review:
+ - type: approval
+ gerrit:
+ approval:
+ - email: herp@derp.invalid
+ trigger:
+ github:
+ - event: push
+ gerrit:
+ - event: ref-updated
+
+- pipeline:
+ name: status
+ manager: independent
+ require:
+ github:
+ status: 'zuul:check:success'
+ trigger:
+ github:
+ - event: push
+
+- pipeline:
+ name: pushhub
+ manager: independent
+ require:
+ gerrit:
+ open: true
+ trigger:
+ github:
+ - event: push
+ gerrit:
+ - event: ref-updated
+
+- pipeline:
+ name: pushgerrit
+ manager: independent
+ require:
+ github:
+ open: true
+ trigger:
+ github:
+ - event: push
+ gerrit:
+ - event: ref-updated
+
+- job:
+ name: job1
+
+- project:
+ name: org/project1
+ current:
+ jobs:
+ - job1
+ open:
+ jobs:
+ - job1
+ review:
+ jobs:
+ - job1
+ status:
+ jobs:
+ - job1
+ pushhub:
+ jobs:
+ - job1
+ pushgerrit:
+ jobs:
+ - job1
+
+- project:
+ name: org/project2
+ current:
+ jobs:
+ - job1
+ open:
+ jobs:
+ - job1
+ review:
+ jobs:
+ - job1
+ pushhub:
+ jobs:
+ - job1
+ pushgerrit:
+ jobs:
+ - job1
diff --git a/tests/fixtures/config/push-reqs/git/org_project1/README b/tests/fixtures/config/push-reqs/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/push-reqs/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/push-reqs/git/org_project2/README b/tests/fixtures/config/push-reqs/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/push-reqs/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/push-reqs/main.yaml b/tests/fixtures/config/push-reqs/main.yaml
new file mode 100644
index 0000000..d9f1a42
--- /dev/null
+++ b/tests/fixtures/config/push-reqs/main.yaml
@@ -0,0 +1,11 @@
+- tenant:
+ name: tenant-one
+ source:
+ github:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1
+ gerrit:
+ untrusted-projects:
+ - org/project2
diff --git a/tests/fixtures/config/zuul-connections-multiple-gerrits/git/common-config/zuul.yaml b/tests/fixtures/config/zuul-connections-multiple-gerrits/git/common-config/zuul.yaml
index 961ff06..8f858cd 100644
--- a/tests/fixtures/config/zuul-connections-multiple-gerrits/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/zuul-connections-multiple-gerrits/git/common-config/zuul.yaml
@@ -24,6 +24,25 @@
another_gerrit:
verified: -1
+- pipeline:
+ name: common_check
+ manager: independent
+ trigger:
+ another_gerrit:
+ - event: patchset-created
+ review_gerrit:
+ - event: patchset-created
+ success:
+ review_gerrit:
+ verified: 1
+ another_gerrit:
+ verified: 1
+ failure:
+ review_gerrit:
+ verified: -1
+ another_gerrit:
+ verified: -1
+
- job:
name: project-test1
@@ -41,3 +60,16 @@
another_check:
jobs:
- project-test2
+
+
+- project:
+ name: review.example.com/org/project2
+ common_check:
+ jobs:
+ - project-test1
+
+- project:
+ name: another.example.com/org/project2
+ common_check:
+ jobs:
+ - project-test2
diff --git a/tests/fixtures/config/zuul-connections-multiple-gerrits/git/org_project2/README b/tests/fixtures/config/zuul-connections-multiple-gerrits/git/org_project2/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/zuul-connections-multiple-gerrits/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/zuul-connections-multiple-gerrits/main.yaml b/tests/fixtures/config/zuul-connections-multiple-gerrits/main.yaml
index f5bff21..38810fd 100644
--- a/tests/fixtures/config/zuul-connections-multiple-gerrits/main.yaml
+++ b/tests/fixtures/config/zuul-connections-multiple-gerrits/main.yaml
@@ -6,6 +6,8 @@
- common-config
untrusted-projects:
- org/project1
+ - org/project2
another_gerrit:
untrusted-projects:
- org/project1
+ - org/project2
diff --git a/tests/fixtures/layouts/merging-github.yaml b/tests/fixtures/layouts/merging-github.yaml
index 4e13063..9f43f75 100644
--- a/tests/fixtures/layouts/merging-github.yaml
+++ b/tests/fixtures/layouts/merging-github.yaml
@@ -2,6 +2,7 @@
name: merge
description: Pipeline for merging the pull request
manager: independent
+ merge-failure-message: 'Merge failed'
trigger:
github:
- event: pull_request
diff --git a/tests/fixtures/layouts/repo-checkout-four-project.yaml b/tests/fixtures/layouts/repo-checkout-four-project.yaml
new file mode 100644
index 0000000..392931a
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-four-project.yaml
@@ -0,0 +1,81 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ start:
+ gerrit:
+ verified: 0
+ precedence: high
+
+- job:
+ name: integration
+ required-projects:
+ - org/project1
+ - org/project2
+ - org/project3
+ - org/project4
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project3
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project4
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/repo-checkout-no-timer.yaml b/tests/fixtures/layouts/repo-checkout-no-timer.yaml
new file mode 100644
index 0000000..2b65850
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-no-timer.yaml
@@ -0,0 +1,20 @@
+- pipeline:
+ name: periodic
+ manager: independent
+ # Trigger is required, set it to one that is a noop
+ # during tests that check the timer trigger.
+ trigger:
+ gerrit:
+ - event: ref-updated
+
+- job:
+ name: integration
+ override-branch: stable/havana
+ required-projects:
+ - org/project1
+
+- project:
+ name: org/project1
+ periodic:
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/repo-checkout-post.yaml b/tests/fixtures/layouts/repo-checkout-post.yaml
new file mode 100644
index 0000000..9698289
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-post.yaml
@@ -0,0 +1,25 @@
+- pipeline:
+ name: post
+ manager: independent
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^(?!refs/).*$
+
+- job:
+ name: integration
+ required-projects:
+ - org/project1
+ - org/project2
+
+- project:
+ name: org/project1
+ post:
+ jobs:
+ - integration
+
+- project:
+ name: org/project2
+ post:
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/repo-checkout-six-project.yaml b/tests/fixtures/layouts/repo-checkout-six-project.yaml
new file mode 100644
index 0000000..93a64ea
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-six-project.yaml
@@ -0,0 +1,104 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ start:
+ gerrit:
+ verified: 0
+ precedence: high
+
+- job:
+ name: integration
+ required-projects:
+ - org/project1
+ - org/project2
+ - org/project3
+ - name: org/project4
+ override-branch: master
+ - org/project5
+ - org/project6
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project3
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project4
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project5
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project6
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/repo-checkout-timer.yaml b/tests/fixtures/layouts/repo-checkout-timer.yaml
new file mode 100644
index 0000000..d5917d1
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-timer.yaml
@@ -0,0 +1,18 @@
+- pipeline:
+ name: periodic
+ manager: independent
+ trigger:
+ timer:
+ - time: '* * * * * */1'
+
+- job:
+ name: integration
+ override-branch: stable/havana
+ required-projects:
+ - org/project1
+
+- project:
+ name: org/project1
+ periodic:
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/repo-checkout-two-project.yaml b/tests/fixtures/layouts/repo-checkout-two-project.yaml
new file mode 100644
index 0000000..239d80c
--- /dev/null
+++ b/tests/fixtures/layouts/repo-checkout-two-project.yaml
@@ -0,0 +1,59 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ start:
+ gerrit:
+ verified: 0
+ precedence: high
+
+- job:
+ name: integration
+ required-projects:
+ - org/project1
+ - org/project2
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
+
+- project:
+ name: org/project2
+ check:
+ jobs:
+ - integration
+ gate:
+ queue: integrated
+ jobs:
+ - integration
diff --git a/tests/fixtures/layouts/reporting-github.yaml b/tests/fixtures/layouts/reporting-github.yaml
index c939f39..8dd35b0 100644
--- a/tests/fixtures/layouts/reporting-github.yaml
+++ b/tests/fixtures/layouts/reporting-github.yaml
@@ -29,6 +29,7 @@
github:
comment: false
status: 'success'
+ status-url: http://logs.example.com/{pipeline.name}/{change.project}/{change.number}/{change.patchset}/
failure:
github:
comment: false
diff --git a/tests/fixtures/layouts/requirements-github.yaml b/tests/fixtures/layouts/requirements-github.yaml
index cacc54f..9933f27 100644
--- a/tests/fixtures/layouts/requirements-github.yaml
+++ b/tests/fixtures/layouts/requirements-github.yaml
@@ -13,11 +13,233 @@
github:
comment: true
+- pipeline:
+ name: trigger_status
+ manager: independent
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'trigger me'
+ require-status: "zuul:check:success"
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: trigger
+ manager: independent
+ trigger:
+ github:
+ - event: pull_request
+ action: status
+ status: 'zuul:check:success'
+ success:
+ github:
+ status: 'success'
+ failure:
+ github:
+ status: 'failure'
+
+- pipeline:
+ name: reviewusername
+ manager: independent
+ require:
+ github:
+ review:
+ - username: '^(herp|derp)$'
+ type: approved
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: reviewreq
+ manager: independent
+ require:
+ github:
+ review:
+ - type: approved
+ permission: write
+ reject:
+ github:
+ review:
+ - type: changes_requested
+ permission: write
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: reviewuserstate
+ manager: independent
+ require:
+ github:
+ review:
+ - username: 'derp'
+ type: approved
+ permission: write
+ reject:
+ github:
+ review:
+ - type: changes_requested
+ permission: write
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: newer_than
+ manager: independent
+ require:
+ github:
+ review:
+ - type: approved
+ permission: write
+ newer-than: 1d
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: older_than
+ manager: independent
+ require:
+ github:
+ review:
+ - type: approved
+ permission: write
+ older-than: 1d
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: require_open
+ manager: independent
+ require:
+ github:
+ open: true
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: 'test me'
+ success:
+ github:
+ comment: true
+
+- pipeline:
+ name: require_current
+ manager: independent
+ require:
+ github:
+ current-patchset: true
+ trigger:
+ github:
+ - event: pull_request
+ action: changed
+ success:
+ github:
+ comment: true
+
- job:
name: project1-pipeline
+- job:
+ name: project2-trigger
+- job:
+ name: project3-reviewusername
+- job:
+ name: project4-reviewreq
+- job:
+ name: project5-reviewuserstate
+- job:
+ name: project6-newerthan
+- job:
+ name: project7-olderthan
+- job:
+ name: project8-requireopen
+- job:
+ name: project9-requirecurrent
- project:
name: org/project1
pipeline:
jobs:
- project1-pipeline
+ trigger_status:
+ jobs:
+ - project1-pipeline
+
+- project:
+ name: org/project2
+ trigger:
+ jobs:
+ - project2-trigger
+
+- project:
+ name: org/project3
+ reviewusername:
+ jobs:
+ - project3-reviewusername
+
+- project:
+ name: org/project4
+ reviewreq:
+ jobs:
+ - project4-reviewreq
+
+- project:
+ name: org/project5
+ reviewuserstate:
+ jobs:
+ - project5-reviewuserstate
+
+- project:
+ name: org/project6
+ newer_than:
+ jobs:
+ - project6-newerthan
+
+- project:
+ name: org/project7
+ older_than:
+ jobs:
+ - project7-olderthan
+
+- project:
+ name: org/project8
+ require_open:
+ jobs:
+ - project8-requireopen
+
+- project:
+ name: org/project9
+ require_current:
+ jobs:
+ - project9-requirecurrent
diff --git a/tests/fixtures/layouts/unmanaged-project.yaml b/tests/fixtures/layouts/unmanaged-project.yaml
new file mode 100644
index 0000000..d72c26e
--- /dev/null
+++ b/tests/fixtures/layouts/unmanaged-project.yaml
@@ -0,0 +1,25 @@
+- pipeline:
+ name: check
+ manager: independent
+ require:
+ gerrit:
+ open: True
+ current-patchset: True
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+ start:
+ gerrit:
+ verified: 0
+
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - noop
diff --git a/tests/fixtures/test_id_rsa b/tests/fixtures/test_id_rsa
new file mode 100644
index 0000000..a793bd0
--- /dev/null
+++ b/tests/fixtures/test_id_rsa
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWwIBAAKBgQCX10EQhi7hEMk1h7/fQaEj9H2DxWR0s3RXD5UI7j1Bn21tBUus
+Y0tPC5wXES4VfilXg+EuOKsE6z8x8txP1wd1+d6Hq3SWXnOcqxxv2ueAy6Gc31E7
+a2IVDYvqVsAOtxsWddvMGTj98/lexQBX6Bh+wmuba/43lq5UPepwvfgNOQIDAQAB
+AoGADMCHNlwOk9hVDanY82cPoXVnFSn+xc5MdwNYAOgBPQGmrwFC2bd9G6Zd9ZH7
+zNJLpo3s23Tm6ALZy9gZqJrmhWDZBOqeYtmkd0yUf5bCbUzNre8+gHJY8k9PAxVM
+dPr2bq8G4PyN3yC2euTht35KLjb7hD8WiF3exgI/d8oBvgECQQDFKuWmkLtkSkGo
+1KRbeBfRePbfzhGJ1yHRyO72Z1+hVXuRmtcjTfPhMikgx9dxWbpqr/RPgs7D7N8D
+JpFlsiR/AkEAxSX4LOwovklPzCZ8FyfHhkydNgDyBw8y2Xe1OO0LBN51batf9rcl
+rJBYFvulrD+seYNRCWBFpEi4KKZh4YESRwJAKmz+mYbPK9dmpYOMEjqXNXXH+YSH
+9ZcbKd8IvHCl/Ts9qakd3fTqI2z9uJYH39Yk7MwL0Agfob0Yh78GzlE01QJACheu
+g8Y3M76XCjFyKtFLgpGLfsc/nKLnjIB3U4m3BbHJuyqJyByKHjJpgAuz6IR99N6H
+GH7IMefTHame2yd7YwJAUIGRD+iOO0RJvtEHUbsz6IxrQdubNOvzm/78eyBTcbsa
+8996D18fJF6Q0/Gg0Cm65PNOpIthP3qxFkuuduUEUg==
+-----END RSA PRIVATE KEY-----
diff --git a/tests/fixtures/test_id_rsa.pub b/tests/fixtures/test_id_rsa.pub
new file mode 100644
index 0000000..bffc726
--- /dev/null
+++ b/tests/fixtures/test_id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCX10EQhi7hEMk1h7/fQaEj9H2DxWR0s3RXD5UI7j1Bn21tBUusY0tPC5wXES4VfilXg+EuOKsE6z8x8txP1wd1+d6Hq3SWXnOcqxxv2ueAy6Gc31E7a2IVDYvqVsAOtxsWddvMGTj98/lexQBX6Bh+wmuba/43lq5UPepwvfgNOQ== Private Key For Zuul Tests DO NOT USE
diff --git a/tests/fixtures/zuul-connections-merger.conf b/tests/fixtures/zuul-connections-merger.conf
new file mode 100644
index 0000000..7a1bc42
--- /dev/null
+++ b/tests/fixtures/zuul-connections-merger.conf
@@ -0,0 +1,35 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+job_name_in_report=true
+status_url=http://zuul.example.com/status
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+
+[connection github]
+driver=github
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=fake_id_rsa1
+
+[connection resultsdb]
+driver=sql
+dburi=$MYSQL_FIXTURE_DBURI$
+
+[connection smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
diff --git a/tests/fixtures/zuul-github-driver.conf b/tests/fixtures/zuul-github-driver.conf
index ab34619..dfa813d 100644
--- a/tests/fixtures/zuul-github-driver.conf
+++ b/tests/fixtures/zuul-github-driver.conf
@@ -3,7 +3,7 @@
[zuul]
job_name_in_report=true
-status_url=http://zuul.example.com/status
+status_url=http://zuul.example.com/status/#{change.number},{change.patchset}
[merger]
git_dir=/tmp/zuul-test/git
diff --git a/tests/fixtures/zuul-push-reqs.conf b/tests/fixtures/zuul-push-reqs.conf
new file mode 100644
index 0000000..661ac79
--- /dev/null
+++ b/tests/fixtures/zuul-push-reqs.conf
@@ -0,0 +1,23 @@
+[gearman]
+server=127.0.0.1
+
+[zuul]
+job_name_in_report=true
+status_url=http://zuul.example.com/status
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+zuul_url=http://zuul.example.com/p
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+
+[connection github]
+driver=github
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
diff --git a/tests/unit/test_bubblewrap.py b/tests/unit/test_bubblewrap.py
new file mode 100644
index 0000000..b274944
--- /dev/null
+++ b/tests/unit/test_bubblewrap.py
@@ -0,0 +1,54 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import logging
+import subprocess
+import tempfile
+import testtools
+
+from zuul.driver import bubblewrap
+from zuul.executor.server import SshAgent
+
+
+class TestBubblewrap(testtools.TestCase):
+ def setUp(self):
+ super(TestBubblewrap, self).setUp()
+ self.log_fixture = self.useFixture(
+ fixtures.FakeLogger(level=logging.DEBUG))
+ self.useFixture(fixtures.NestedTempfile())
+
+ def test_bubblewrap_wraps(self):
+ bwrap = bubblewrap.BubblewrapDriver()
+ work_dir = tempfile.mkdtemp()
+ ansible_dir = tempfile.mkdtemp()
+ ssh_agent = SshAgent()
+ self.addCleanup(ssh_agent.stop)
+ ssh_agent.start()
+ po = bwrap.getPopen(work_dir=work_dir,
+ ansible_dir=ansible_dir,
+ ssh_auth_sock=ssh_agent.env['SSH_AUTH_SOCK'])
+ self.assertTrue(po.passwd_r > 2)
+ self.assertTrue(po.group_r > 2)
+ self.assertTrue(work_dir in po.command)
+ self.assertTrue(ansible_dir in po.command)
+ # Now run /usr/bin/id to verify passwd/group entries made it in
+ true_proc = po(['/usr/bin/id'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (output, errs) = true_proc.communicate()
+ # Make sure it printed things on stdout
+ self.assertTrue(len(output.strip()))
+ # And that it did not print things on stderr
+ self.assertEqual(0, len(errs.strip()))
+ # Make sure the _r's are closed
+ self.assertIsNone(po.passwd_r)
+ self.assertIsNone(po.group_r)
diff --git a/tests/unit/test_cloner.py b/tests/unit/test_cloner.py
deleted file mode 100644
index e65904b..0000000
--- a/tests/unit/test_cloner.py
+++ /dev/null
@@ -1,752 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2012 Hewlett-Packard Development Company, L.P.
-# Copyright 2014 Wikimedia Foundation Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-import os
-import shutil
-import time
-
-import git
-
-import zuul.lib.cloner
-
-from tests.base import ZuulTestCase
-
-
-class TestCloner(ZuulTestCase):
-
- log = logging.getLogger("zuul.test.cloner")
- workspace_root = None
-
- def setUp(self):
- self.skip("Disabled for early v3 development")
-
- super(TestCloner, self).setUp()
- self.workspace_root = os.path.join(self.test_root, 'workspace')
-
- self.updateConfigLayout(
- 'tests/fixtures/layout-cloner.yaml')
- self.sched.reconfigure(self.config)
- self.registerJobs()
-
- def getWorkspaceRepos(self, projects):
- repos = {}
- for project in projects:
- repos[project] = git.Repo(
- os.path.join(self.workspace_root, project))
- return repos
-
- def getUpstreamRepos(self, projects):
- repos = {}
- for project in projects:
- repos[project] = git.Repo(
- os.path.join(self.upstream_root, project))
- return repos
-
- def test_cache_dir(self):
- projects = ['org/project1', 'org/project2']
- cache_root = os.path.join(self.test_root, "cache")
- for project in projects:
- upstream_repo_path = os.path.join(self.upstream_root, project)
- cache_repo_path = os.path.join(cache_root, project)
- git.Repo.clone_from(upstream_repo_path, cache_repo_path)
-
- self.worker.hold_jobs_in_build = True
- A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
- A.addApproval('CRVW', 2)
- self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
-
- self.waitUntilSettled()
-
- self.assertEquals(1, len(self.builds), "One build is running")
-
- B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
- B.setMerged()
-
- upstream = self.getUpstreamRepos(projects)
- states = [{
- 'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- }]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- cache_dir=cache_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
-
- work = self.getWorkspaceRepos(projects)
- # project1 is the zuul_project so the origin should be set to the
- # zuul_url since that is the most up to date.
- cache_repo_path = os.path.join(cache_root, 'org/project1')
- self.assertNotEqual(
- work['org/project1'].remotes.origin.url,
- cache_repo_path,
- 'workspace repo origin should not be the cache'
- )
- zuul_url_repo_path = os.path.join(self.git_root, 'org/project1')
- self.assertEqual(
- work['org/project1'].remotes.origin.url,
- zuul_url_repo_path,
- 'workspace repo origin should be the zuul url'
- )
-
- # project2 is not the zuul_project so the origin should be set
- # to upstream since that is the best we can do
- cache_repo_path = os.path.join(cache_root, 'org/project2')
- self.assertNotEqual(
- work['org/project2'].remotes.origin.url,
- cache_repo_path,
- 'workspace repo origin should not be the cache'
- )
- upstream_repo_path = os.path.join(self.upstream_root, 'org/project2')
- self.assertEqual(
- work['org/project2'].remotes.origin.url,
- upstream_repo_path,
- 'workspace repo origin should be the upstream url'
- )
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_one_branch(self):
- self.worker.hold_jobs_in_build = True
-
- projects = ['org/project1', 'org/project2']
- A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
- B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
- A.addApproval('CRVW', 2)
- B.addApproval('CRVW', 2)
- self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
-
- self.waitUntilSettled()
-
- self.assertEquals(2, len(self.builds), "Two builds are running")
-
- upstream = self.getUpstreamRepos(projects)
- states = [
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- },
- ]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
-
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_multi_branch(self):
- self.worker.hold_jobs_in_build = True
- projects = ['org/project1', 'org/project2',
- 'org/project3', 'org/project4']
-
- self.create_branch('org/project2', 'stable/havana')
- self.create_branch('org/project4', 'stable/havana')
- A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
- B = self.fake_gerrit.addFakeChange('org/project2', 'stable/havana',
- 'B')
- C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
- A.addApproval('CRVW', 2)
- B.addApproval('CRVW', 2)
- C.addApproval('CRVW', 2)
- self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
-
- self.waitUntilSettled()
-
- self.assertEquals(3, len(self.builds), "Three builds are running")
-
- upstream = self.getUpstreamRepos(projects)
- states = [
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].
- commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].
- commit('stable/havana')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- 'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].
- commit('master')),
- },
- ]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_upgrade(self):
- # Simulates an upgrade test
- self.worker.hold_jobs_in_build = True
- projects = ['org/project1', 'org/project2', 'org/project3',
- 'org/project4', 'org/project5', 'org/project6']
-
- self.create_branch('org/project2', 'stable/havana')
- self.create_branch('org/project3', 'stable/havana')
- self.create_branch('org/project4', 'stable/havana')
- self.create_branch('org/project5', 'stable/havana')
- A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
- B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
- C = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
- 'C')
- D = self.fake_gerrit.addFakeChange('org/project3', 'master', 'D')
- E = self.fake_gerrit.addFakeChange('org/project4', 'stable/havana',
- 'E')
- A.addApproval('CRVW', 2)
- B.addApproval('CRVW', 2)
- C.addApproval('CRVW', 2)
- D.addApproval('CRVW', 2)
- E.addApproval('CRVW', 2)
- self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(E.addApproval('APRV', 1))
-
- self.waitUntilSettled()
-
- self.assertEquals(5, len(self.builds), "Five builds are running")
-
- # Check the old side of the upgrade first
- upstream = self.getUpstreamRepos(projects)
- states = [
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit(
- 'stable/havana')),
- 'org/project3': str(upstream['org/project3'].commit(
- 'stable/havana')),
- 'org/project4': str(upstream['org/project4'].commit(
- 'stable/havana')),
- 'org/project5': str(upstream['org/project5'].commit(
- 'stable/havana')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit(
- 'stable/havana')),
- 'org/project3': str(upstream['org/project3'].commit(
- 'stable/havana')),
- 'org/project4': str(upstream['org/project4'].commit(
- 'stable/havana')),
- 'org/project5': str(upstream['org/project5'].commit(
- 'stable/havana')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit(
- 'stable/havana')),
- 'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].commit(
- 'stable/havana')),
-
- 'org/project5': str(upstream['org/project5'].commit(
- 'stable/havana')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit(
- 'stable/havana')),
- 'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].commit(
- 'stable/havana')),
- 'org/project5': str(upstream['org/project5'].commit(
- 'stable/havana')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit(
- 'stable/havana')),
- 'org/project3': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project4': self.builds[4].parameters['ZUUL_COMMIT'],
- 'org/project5': str(upstream['org/project5'].commit(
- 'stable/havana')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- ]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- branch='stable/havana', # Old branch for upgrade
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct on old side of upgrade' %
- (project, number))
- shutil.rmtree(self.workspace_root)
-
- # Check the new side of the upgrade
- states = [
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- ]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- branch='master', # New branch for upgrade
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct on old side of upgrade' %
- (project, number))
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_project_override(self):
- self.worker.hold_jobs_in_build = True
- projects = ['org/project1', 'org/project2', 'org/project3',
- 'org/project4', 'org/project5', 'org/project6']
-
- self.create_branch('org/project3', 'stable/havana')
- self.create_branch('org/project4', 'stable/havana')
- self.create_branch('org/project6', 'stable/havana')
- A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
- B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
- C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
- D = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
- 'D')
- A.addApproval('CRVW', 2)
- B.addApproval('CRVW', 2)
- C.addApproval('CRVW', 2)
- D.addApproval('CRVW', 2)
- self.fake_gerrit.addEvent(A.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(B.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(C.addApproval('APRV', 1))
- self.fake_gerrit.addEvent(D.addApproval('APRV', 1))
-
- self.waitUntilSettled()
-
- self.assertEquals(4, len(self.builds), "Four builds are running")
-
- upstream = self.getUpstreamRepos(projects)
- states = [
- {'org/project1': self.builds[0].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project2': str(upstream['org/project2'].commit('master')),
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project3': str(upstream['org/project3'].commit('master')),
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit('master')),
- },
- {'org/project1': self.builds[1].parameters['ZUUL_COMMIT'],
- 'org/project2': self.builds[2].parameters['ZUUL_COMMIT'],
- 'org/project3': self.builds[3].parameters['ZUUL_COMMIT'],
- 'org/project4': str(upstream['org/project4'].commit('master')),
- 'org/project5': str(upstream['org/project5'].commit('master')),
- 'org/project6': str(upstream['org/project6'].commit(
- 'stable/havana')),
- },
- ]
-
- for number, build in enumerate(self.builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters['ZUUL_BRANCH'],
- zuul_ref=build.parameters['ZUUL_REF'],
- zuul_url=self.src_root,
- project_branches={'org/project4': 'master'},
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_periodic(self):
- self.worker.hold_jobs_in_build = True
- self.create_branch('org/project', 'stable/havana')
- self.updateConfigLayout(
- 'tests/fixtures/layout-timer.yaml')
- self.sched.reconfigure(self.config)
- self.registerJobs()
-
- # The pipeline triggers every second, so we should have seen
- # several by now.
- time.sleep(5)
- self.waitUntilSettled()
-
- builds = self.builds[:]
-
- self.worker.hold_jobs_in_build = False
- # Stop queuing timer triggered jobs so that the assertions
- # below don't race against more jobs being queued.
- self.updateConfigLayout(
- 'tests/fixtures/layout-no-timer.yaml')
- self.sched.reconfigure(self.config)
- self.registerJobs()
- self.worker.release()
- self.waitUntilSettled()
-
- projects = ['org/project']
-
- self.assertEquals(2, len(builds), "Two builds are running")
-
- upstream = self.getUpstreamRepos(projects)
- states = [
- {'org/project':
- str(upstream['org/project'].commit('stable/havana')),
- },
- {'org/project':
- str(upstream['org/project'].commit('stable/havana')),
- },
- ]
-
- for number, build in enumerate(builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters.get('ZUUL_BRANCH', None),
- zuul_ref=build.parameters.get('ZUUL_REF', None),
- zuul_url=self.src_root,
- branch='stable/havana',
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
-
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_periodic_update(self):
- # Test that the merger correctly updates its local repository
- # before running a periodic job.
-
- # Prime the merger with the current state
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
- self.waitUntilSettled()
-
- # Merge a different change
- B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- B.setMerged()
-
- # Start a periodic job
- self.worker.hold_jobs_in_build = True
- self.executor.negative_function_cache_ttl = 0
- self.config.set('zuul', 'layout_config',
- 'tests/fixtures/layout-timer.yaml')
- self.sched.reconfigure(self.config)
- self.registerJobs()
-
- # The pipeline triggers every second, so we should have seen
- # several by now.
- time.sleep(5)
- self.waitUntilSettled()
-
- builds = self.builds[:]
-
- self.worker.hold_jobs_in_build = False
- # Stop queuing timer triggered jobs so that the assertions
- # below don't race against more jobs being queued.
- self.config.set('zuul', 'layout_config',
- 'tests/fixtures/layout-no-timer.yaml')
- self.sched.reconfigure(self.config)
- self.registerJobs()
- self.worker.release()
- self.waitUntilSettled()
-
- projects = ['org/project']
-
- self.assertEquals(2, len(builds), "Two builds are running")
-
- upstream = self.getUpstreamRepos(projects)
- self.assertEqual(upstream['org/project'].commit('master').hexsha,
- B.patchsets[0]['revision'])
- states = [
- {'org/project':
- str(upstream['org/project'].commit('master')),
- },
- {'org/project':
- str(upstream['org/project'].commit('master')),
- },
- ]
-
- for number, build in enumerate(builds):
- self.log.debug("Build parameters: %s", build.parameters)
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters.get('ZUUL_BRANCH', None),
- zuul_ref=build.parameters.get('ZUUL_REF', None),
- zuul_url=self.git_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
- state = states[number]
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, number))
-
- shutil.rmtree(self.workspace_root)
-
- self.worker.hold_jobs_in_build = False
- self.worker.release()
- self.waitUntilSettled()
-
- def test_post_checkout(self):
- self.worker.hold_jobs_in_build = True
- project = "org/project1"
-
- A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
- event = A.getRefUpdatedEvent()
- A.setMerged()
- self.fake_gerrit.addEvent(event)
- self.waitUntilSettled()
-
- build = self.builds[0]
- state = {'org/project1': build.parameters['ZUUL_COMMIT']}
-
- build.release()
- self.waitUntilSettled()
-
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=[project],
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters.get('ZUUL_BRANCH', None),
- zuul_ref=build.parameters.get('ZUUL_REF', None),
- zuul_newrev=build.parameters.get('ZUUL_NEWREV', None),
- zuul_url=self.git_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos([project])
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, 0))
- shutil.rmtree(self.workspace_root)
-
- def test_post_and_master_checkout(self):
- self.worker.hold_jobs_in_build = True
- projects = ["org/project1", "org/project2"]
-
- A = self.fake_gerrit.addFakeChange(projects[0], 'master', 'A')
- event = A.getRefUpdatedEvent()
- A.setMerged()
- self.fake_gerrit.addEvent(event)
- self.waitUntilSettled()
-
- build = self.builds[0]
- upstream = self.getUpstreamRepos(projects)
- state = {'org/project1':
- build.parameters['ZUUL_COMMIT'],
- 'org/project2':
- str(upstream['org/project2'].commit('master')),
- }
-
- build.release()
- self.waitUntilSettled()
-
- cloner = zuul.lib.cloner.Cloner(
- git_base_url=self.upstream_root,
- projects=projects,
- workspace=self.workspace_root,
- zuul_project=build.parameters.get('ZUUL_PROJECT', None),
- zuul_branch=build.parameters.get('ZUUL_BRANCH', None),
- zuul_ref=build.parameters.get('ZUUL_REF', None),
- zuul_newrev=build.parameters.get('ZUUL_NEWREV', None),
- zuul_url=self.git_root,
- )
- cloner.execute()
- work = self.getWorkspaceRepos(projects)
-
- for project in projects:
- self.assertEquals(state[project],
- str(work[project].commit('HEAD')),
- 'Project %s commit for build %s should '
- 'be correct' % (project, 0))
- shutil.rmtree(self.workspace_root)
diff --git a/tests/unit/test_cloner_cmd.py b/tests/unit/test_cloner_cmd.py
index 2d8747f..84bd243 100644
--- a/tests/unit/test_cloner_cmd.py
+++ b/tests/unit/test_cloner_cmd.py
@@ -26,7 +26,7 @@
def test_default_cache_dir_empty(self):
self.app.parse_arguments(['base', 'repo'])
- self.assertEqual(None, self.app.args.cache_dir)
+ self.assertIsNone(self.app.args.cache_dir)
def test_default_cache_dir_environ(self):
try:
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index db32938..142a248 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -265,3 +265,63 @@
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
+
+ def test_multiple_project_separate_gerrits_common_pipeline(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ A = self.fake_another_gerrit.addFakeChange(
+ 'org/project2', 'master', 'A')
+ self.fake_another_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+
+ self.waitUntilSettled()
+
+ self.assertBuilds([dict(name='project-test2',
+ changes='1,1',
+ project='org/project2',
+ pipeline='common_check')])
+
+ # NOTE(jamielennox): the tests back the git repo for both connections
+ # onto the same git repo on the file system. If we just create another
+ # fake change the fake_review_gerrit will try to create another 1,1
+ # change and git will fail to create the ref. Arbitrarily set it to get
+ # around the problem.
+ self.fake_review_gerrit.change_number = 50
+
+ B = self.fake_review_gerrit.addFakeChange(
+ 'org/project2', 'master', 'B')
+ self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+
+ self.waitUntilSettled()
+
+ self.assertBuilds([
+ dict(name='project-test2',
+ changes='1,1',
+ project='org/project2',
+ pipeline='common_check'),
+ dict(name='project-test1',
+ changes='51,1',
+ project='org/project2',
+ pipeline='common_check'),
+ ])
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+
+class TestConnectionsMerger(ZuulTestCase):
+ config_file = 'zuul-connections-merger.conf'
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ def configure_connections(self):
+ super(TestConnectionsMerger, self).configure_connections(True)
+
+ def test_connections_merger(self):
+ "Test merger only configures source connections"
+
+ self.assertIn("gerrit", self.connections.connections)
+ self.assertIn("github", self.connections.connections)
+ self.assertNotIn("smtp", self.connections.connections)
+ self.assertNotIn("sql", self.connections.connections)
+ self.assertNotIn("timer", self.connections.connections)
+ self.assertNotIn("zuul", self.connections.connections)
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
new file mode 100644
index 0000000..100e4ec
--- /dev/null
+++ b/tests/unit/test_executor.py
@@ -0,0 +1,349 @@
+#!/usr/bin/env python
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Wikimedia Foundation Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import time
+
+from tests.base import ZuulTestCase, simple_layout
+
+
+class TestExecutorRepos(ZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ log = logging.getLogger("zuul.test.executor")
+
+ def assertRepoState(self, repo, state, project, build, number):
+ if 'branch' in state:
+ self.assertFalse(repo.head.is_detached,
+ 'Project %s commit for build %s #%s should '
+ 'not have a detached HEAD' % (
+ project, build, number))
+ self.assertEquals(repo.active_branch.name,
+ state['branch'],
+ 'Project %s commit for build %s #%s should '
+ 'be on the correct branch' % (
+ project, build, number))
+ if 'commit' in state:
+ self.assertEquals(state['commit'],
+ str(repo.commit('HEAD')),
+ 'Project %s commit for build %s #%s should '
+ 'be correct' % (
+ project, build, number))
+ ref = repo.commit('HEAD')
+ repo_messages = set(
+ [c.message.strip() for c in repo.iter_commits(ref)])
+ if 'present' in state:
+ for change in state['present']:
+ msg = '%s-1' % change.subject
+ self.assertTrue(msg in repo_messages,
+ 'Project %s for build %s #%s should '
+ 'have change %s' % (
+ project, build, number, change.subject))
+ if 'absent' in state:
+ for change in state['absent']:
+ msg = '%s-1' % change.subject
+ self.assertTrue(msg not in repo_messages,
+ 'Project %s for build %s #%s should '
+ 'not have change %s' % (
+ project, build, number, change.subject))
+
+ @simple_layout('layouts/repo-checkout-two-project.yaml')
+ def test_one_branch(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ p1 = 'review.example.com/org/project1'
+ p2 = 'review.example.com/org/project2'
+ projects = [p1, p2]
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+ A.addApproval('code-review', 2)
+ B.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEquals(2, len(self.builds), "Two builds are running")
+
+ upstream = self.getUpstreamRepos(projects)
+ states = [
+ {p1: dict(present=[A], absent=[B], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ branch='master'),
+ },
+ {p1: dict(present=[A], absent=[B], branch='master'),
+ p2: dict(present=[B], absent=[A], branch='master'),
+ },
+ ]
+
+ for number, build in enumerate(self.builds):
+ self.log.debug("Build parameters: %s", build.parameters)
+ work = build.getWorkspaceRepos(projects)
+ state = states[number]
+
+ for project in projects:
+ self.assertRepoState(work[project], state[project],
+ project, build, number)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ @simple_layout('layouts/repo-checkout-four-project.yaml')
+ def test_multi_branch(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ p1 = 'review.example.com/org/project1'
+ p2 = 'review.example.com/org/project2'
+ p3 = 'review.example.com/org/project3'
+ p4 = 'review.example.com/org/project4'
+ projects = [p1, p2, p3, p4]
+
+ self.create_branch('org/project2', 'stable/havana')
+ self.create_branch('org/project4', 'stable/havana')
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'stable/havana',
+ 'B')
+ C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
+ A.addApproval('code-review', 2)
+ B.addApproval('code-review', 2)
+ C.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEquals(3, len(self.builds), "Three builds are running")
+
+ upstream = self.getUpstreamRepos(projects)
+ states = [
+ {p1: dict(present=[A], absent=[B, C], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ branch='master'),
+ p3: dict(commit=str(upstream[p3].commit('master')),
+ branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ },
+ {p1: dict(present=[A], absent=[B, C], branch='master'),
+ p2: dict(present=[B], absent=[A, C], branch='stable/havana'),
+ p3: dict(commit=str(upstream[p3].commit('master')),
+ branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('stable/havana')),
+ branch='stable/havana'),
+ },
+ {p1: dict(present=[A], absent=[B, C], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ branch='master'),
+ p3: dict(present=[C], absent=[A, B], branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ },
+ ]
+
+ for number, build in enumerate(self.builds):
+ self.log.debug("Build parameters: %s", build.parameters)
+ work = build.getWorkspaceRepos(projects)
+ state = states[number]
+
+ for project in projects:
+ self.assertRepoState(work[project], state[project],
+ project, build, number)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ @simple_layout('layouts/repo-checkout-six-project.yaml')
+ def test_project_override(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ p1 = 'review.example.com/org/project1'
+ p2 = 'review.example.com/org/project2'
+ p3 = 'review.example.com/org/project3'
+ p4 = 'review.example.com/org/project4'
+ p5 = 'review.example.com/org/project5'
+ p6 = 'review.example.com/org/project6'
+ projects = [p1, p2, p3, p4, p5, p6]
+
+ self.create_branch('org/project3', 'stable/havana')
+ self.create_branch('org/project4', 'stable/havana')
+ self.create_branch('org/project6', 'stable/havana')
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+ C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
+ D = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
+ 'D')
+ A.addApproval('code-review', 2)
+ B.addApproval('code-review', 2)
+ C.addApproval('code-review', 2)
+ D.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(C.addApproval('approved', 1))
+ self.fake_gerrit.addEvent(D.addApproval('approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEquals(4, len(self.builds), "Four builds are running")
+
+ upstream = self.getUpstreamRepos(projects)
+ states = [
+ {p1: dict(present=[A], absent=[B, C, D], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ branch='master'),
+ p3: dict(commit=str(upstream[p3].commit('master')),
+ branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ p5: dict(commit=str(upstream[p5].commit('master')),
+ branch='master'),
+ p6: dict(commit=str(upstream[p6].commit('master')),
+ branch='master'),
+ },
+ {p1: dict(present=[A, B], absent=[C, D], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ branch='master'),
+ p3: dict(commit=str(upstream[p3].commit('master')),
+ branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ p5: dict(commit=str(upstream[p5].commit('master')),
+ branch='master'),
+ p6: dict(commit=str(upstream[p6].commit('master')),
+ branch='master'),
+ },
+ {p1: dict(present=[A, B], absent=[C, D], branch='master'),
+ p2: dict(present=[C], absent=[A, B, D], branch='master'),
+ p3: dict(commit=str(upstream[p3].commit('master')),
+ branch='master'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ p5: dict(commit=str(upstream[p5].commit('master')),
+ branch='master'),
+ p6: dict(commit=str(upstream[p6].commit('master')),
+ branch='master'),
+ },
+ {p1: dict(present=[A, B], absent=[C, D], branch='master'),
+ p2: dict(present=[C], absent=[A, B, D], branch='master'),
+ p3: dict(present=[D], absent=[A, B, C],
+ branch='stable/havana'),
+ p4: dict(commit=str(upstream[p4].commit('master')),
+ branch='master'),
+ p5: dict(commit=str(upstream[p5].commit('master')),
+ branch='master'),
+ p6: dict(commit=str(upstream[p6].commit('stable/havana')),
+ branch='stable/havana'),
+ },
+ ]
+
+ for number, build in enumerate(self.builds):
+ self.log.debug("Build parameters: %s", build.parameters)
+ work = build.getWorkspaceRepos(projects)
+ state = states[number]
+
+ for project in projects:
+ self.assertRepoState(work[project], state[project],
+ project, build, number)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ def test_periodic(self):
+ # This test can not use simple_layout because it must start
+ # with a configuration which does not include a
+ # timer-triggered job so that we have an opportunity to set
+ # the hold flag before the first job.
+ self.executor_server.hold_jobs_in_build = True
+ # Start timer trigger - also org/project
+ self.commitConfigUpdate('common-config',
+ 'layouts/repo-checkout-timer.yaml')
+ self.sched.reconfigure(self.config)
+
+ p1 = 'review.example.com/org/project1'
+ projects = [p1]
+ self.create_branch('org/project1', 'stable/havana')
+
+ # The pipeline triggers every second, so we should have seen
+ # several by now.
+ time.sleep(5)
+ self.waitUntilSettled()
+
+ # Stop queuing timer triggered jobs so that the assertions
+ # below don't race against more jobs being queued.
+ self.commitConfigUpdate('common-config',
+ 'layouts/repo-checkout-no-timer.yaml')
+ self.sched.reconfigure(self.config)
+
+ self.assertEquals(1, len(self.builds), "One build is running")
+
+ upstream = self.getUpstreamRepos(projects)
+ states = [
+ {p1: dict(commit=str(upstream[p1].commit('stable/havana')),
+ branch='stable/havana'),
+ },
+ ]
+
+ for number, build in enumerate(self.builds):
+ self.log.debug("Build parameters: %s", build.parameters)
+ work = build.getWorkspaceRepos(projects)
+ state = states[number]
+
+ for project in projects:
+ self.assertRepoState(work[project], state[project],
+ project, build, number)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ @simple_layout('layouts/repo-checkout-post.yaml')
+ def test_post_and_master_checkout(self):
+ self.executor_server.hold_jobs_in_build = True
+ p1 = "review.example.com/org/project1"
+ p2 = "review.example.com/org/project2"
+ projects = [p1, p2]
+
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ event = A.getRefUpdatedEvent()
+ A.setMerged()
+ self.fake_gerrit.addEvent(event)
+ self.waitUntilSettled()
+
+ upstream = self.getUpstreamRepos(projects)
+ states = [
+ {p1: dict(commit=str(upstream[p1].commit('master')),
+ present=[A], branch='master'),
+ p2: dict(commit=str(upstream[p2].commit('master')),
+ absent=[A], branch='master'),
+ },
+ ]
+
+ for number, build in enumerate(self.builds):
+ self.log.debug("Build parameters: %s", build.parameters)
+ work = build.getWorkspaceRepos(projects)
+ state = states[number]
+
+ for project in projects:
+ self.assertRepoState(work[project], state[project],
+ project, build, number)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index f287790..6cc010e 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -260,7 +260,7 @@
check_status = A.statuses[A.head_sha][0]
check_url = ('http://zuul.example.com/status/#%s,%s' %
(A.number, A.head_sha))
- self.assertEqual('check', check_status['context'])
+ self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('Standard check', check_status['description'])
self.assertEqual('pending', check_status['state'])
self.assertEqual(check_url, check_status['url'])
@@ -274,7 +274,7 @@
check_status = A.statuses[A.head_sha][0]
check_url = ('http://zuul.example.com/status/#%s,%s' %
(A.number, A.head_sha))
- self.assertEqual('check', check_status['context'])
+ self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('success', check_status['state'])
self.assertEqual(check_url, check_status['url'])
self.assertEqual(1, len(A.comments))
@@ -297,9 +297,12 @@
# pipeline reports success status
self.assertEqual(3, len(A.statuses[A.head_sha]))
report_status = A.statuses[A.head_sha][0]
- self.assertEqual('reporting', report_status['context'])
+ self.assertEqual('tenant-one/reporting', report_status['context'])
self.assertEqual('success', report_status['state'])
self.assertEqual(2, len(A.comments))
+ report_url = ('http://logs.example.com/reporting/%s/%s/%s/' %
+ (A.project, A.number, A.head_sha))
+ self.assertEqual(report_url, report_status['url'])
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge(self):
@@ -335,6 +338,8 @@
self.fake_github.emitEvent(D.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertFalse(D.is_merged)
+ self.assertEqual(len(D.comments), 1)
+ self.assertEqual(D.comments[0], 'Merge failed')
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_parallel_changes(self):
diff --git a/tests/unit/test_github_requirements.py b/tests/unit/test_github_requirements.py
index bb9993e..5dd6e80 100644
--- a/tests/unit/test_github_requirements.py
+++ b/tests/unit/test_github_requirements.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from tests.base import ZuulTestCase, simple_layout
@@ -43,3 +45,284 @@
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project1-pipeline')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_trigger_require_status(self):
+ "Test trigger requirement: status"
+ A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('trigger me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No status from zuul so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # An error status should not cause it to be enqueued
+ A.setStatus(A.head_sha, 'error', 'null', 'null', 'check')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A success status goes in
+ A.setStatus(A.head_sha, 'success', 'null', 'null', 'check')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project1-pipeline')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_trigger_on_status(self):
+ "Test trigger on: status"
+ A = self.fake_github.openFakePullRequest('org/project2', 'master', 'A')
+
+ # An error status should not cause it to be enqueued
+ A.setStatus(A.head_sha, 'error', 'null', 'null', 'check')
+ self.fake_github.emitEvent(A.getCommitStatusEvent('check',
+ state='error'))
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A success status from unknown user should not cause it to be
+ # enqueued
+ A.setStatus(A.head_sha, 'success', 'null', 'null', 'check', user='foo')
+ self.fake_github.emitEvent(A.getCommitStatusEvent('check',
+ state='success',
+ user='foo'))
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A success status from zuul goes in
+ A.setStatus(A.head_sha, 'success', 'null', 'null', 'check')
+ self.fake_github.emitEvent(A.getCommitStatusEvent('check'))
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project2-trigger')
+
+ # An error status for a different context should not cause it to be
+ # enqueued
+ A.setStatus(A.head_sha, 'error', 'null', 'null', 'gate')
+ self.fake_github.emitEvent(A.getCommitStatusEvent('gate',
+ state='error'))
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_pipeline_require_review_username(self):
+ "Test pipeline requirement: review username"
+
+ A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No approval from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # Add an approved review from derp
+ A.addReview('derp', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project3-reviewusername')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_pipeline_require_review_state(self):
+ "Test pipeline requirement: review state"
+
+ A = self.fake_github.openFakePullRequest('org/project4', 'master', 'A')
+ # Add derp to writers
+ A.writers.append('derp')
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # A negative review from derp should not cause it to be enqueued
+ A.addReview('derp', 'CHANGES_REQUESTED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive from nobody should not cause it to be enqueued
+ A.addReview('nobody', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive review from derp should cause it to be enqueued
+ A.addReview('derp', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project4-reviewreq')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_pipeline_require_review_user_state(self):
+ "Test pipeline requirement: review state from user"
+
+ A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
+ # Add derp and herp to writers
+ A.writers.extend(('derp', 'herp'))
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # A negative review from derp should not cause it to be enqueued
+ A.addReview('derp', 'CHANGES_REQUESTED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive from nobody should not cause it to be enqueued
+ A.addReview('nobody', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive review from herp (a writer) should not cause it to be
+ # enqueued
+ A.addReview('herp', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive review from derp should cause it to be enqueued
+ A.addReview('derp', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
+
+# TODO: Implement reject on approval username/state
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_pipeline_require_review_latest_user_state(self):
+ "Test pipeline requirement: review state from user"
+
+ A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
+ # Add derp and herp to writers
+ A.writers.extend(('derp', 'herp'))
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # The first negative review from derp should not cause it to be
+ # enqueued
+ for i in range(1, 4):
+ submitted_at = time.time() - 72 * 60 * 60
+ A.addReview('derp', 'CHANGES_REQUESTED',
+ submitted_at)
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A positive review from derp should cause it to be enqueued
+ A.addReview('derp', 'APPROVED')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_require_review_newer_than(self):
+
+ A = self.fake_github.openFakePullRequest('org/project6', 'master', 'A')
+ # Add derp and herp to writers
+ A.writers.extend(('derp', 'herp'))
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # Add a too-old positive review, should not be enqueued
+ submitted_at = time.time() - 72 * 60 * 60
+ A.addReview('derp', 'APPROVED',
+ submitted_at)
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # Add a recent positive review
+ submitted_at = time.time() - 12 * 60 * 60
+ A.addReview('derp', 'APPROVED', submitted_at)
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project6-newerthan')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_require_review_older_than(self):
+
+ A = self.fake_github.openFakePullRequest('org/project7', 'master', 'A')
+ # Add derp and herp to writers
+ A.writers.extend(('derp', 'herp'))
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # No positive review from derp so should not be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # Add a too-new positive, should not be enqueued
+ submitted_at = time.time() - 12 * 60 * 60
+ A.addReview('derp', 'APPROVED',
+ submitted_at)
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # Add an old enough positive, should enqueue
+ submitted_at = time.time() - 72 * 60 * 60
+ A.addReview('herp', 'APPROVED', submitted_at)
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
+ self.assertEqual(self.history[0].name, 'project7-olderthan')
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_require_open(self):
+
+ A = self.fake_github.openFakePullRequest('org/project8', 'master', 'A')
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent('test me')
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+
+ # PR is open, we should have enqueued
+ self.assertEqual(len(self.history), 1)
+
+ # close the PR and try again
+ A.state = 'closed'
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ # PR is closed, should not trigger
+ self.assertEqual(len(self.history), 1)
+
+ @simple_layout('layouts/requirements-github.yaml', driver='github')
+ def test_require_current(self):
+
+ A = self.fake_github.openFakePullRequest('org/project9', 'master', 'A')
+ # A sync event that we will keep submitting to trigger
+ sync = A.getPullRequestSynchronizeEvent()
+ self.fake_github.emitEvent(sync)
+ self.waitUntilSettled()
+
+ # PR head is current should enqueue
+ self.assertEqual(len(self.history), 1)
+
+ # Add a commit to the PR, re-issue the original comment event
+ A.addCommit()
+ self.fake_github.emitEvent(sync)
+ self.waitUntilSettled()
+ # Event hash is not current, should not trigger
+ self.assertEqual(len(self.history), 1)
diff --git a/tests/unit/test_inventory.py b/tests/unit/test_inventory.py
new file mode 100644
index 0000000..2835d30
--- /dev/null
+++ b/tests/unit/test_inventory.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import yaml
+
+from tests.base import ZuulTestCase
+
+
+class TestInventory(ZuulTestCase):
+
+ tenant_config_file = 'config/inventory/main.yaml'
+
+ def setUp(self):
+ super(TestInventory, self).setUp()
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ def _get_build_inventory(self, name):
+ build = self.getBuildByName(name)
+ inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')
+ return yaml.safe_load(open(inv_path, 'r'))
+
+ def test_single_inventory(self):
+
+ inventory = self._get_build_inventory('single-inventory')
+
+ all_nodes = ('ubuntu-xenial',)
+ self.assertIn('all', inventory)
+ self.assertIn('hosts', inventory['all'])
+ self.assertIn('vars', inventory['all'])
+ for node_name in all_nodes:
+ self.assertIn(node_name, inventory['all']['hosts'])
+ self.assertIn('zuul', inventory['all']['vars'])
+ z_vars = inventory['all']['vars']['zuul']
+ self.assertIn('executor', z_vars)
+ self.assertIn('src_root', z_vars['executor'])
+ self.assertIn('job', z_vars)
+ self.assertEqual(z_vars['job'], 'single-inventory')
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ def test_group_inventory(self):
+
+ inventory = self._get_build_inventory('group-inventory')
+
+ all_nodes = ('controller', 'compute1', 'compute2')
+ self.assertIn('all', inventory)
+ self.assertIn('hosts', inventory['all'])
+ self.assertIn('vars', inventory['all'])
+ for group_name in ('ceph-osd', 'ceph-monitor'):
+ self.assertIn(group_name, inventory)
+ for node_name in all_nodes:
+ self.assertIn(node_name, inventory['all']['hosts'])
+ self.assertIn(node_name,
+ inventory['ceph-monitor']['hosts'])
+ self.assertIn('zuul', inventory['all']['vars'])
+ z_vars = inventory['all']['vars']['zuul']
+ self.assertIn('executor', z_vars)
+ self.assertIn('src_root', z_vars['executor'])
+ self.assertIn('job', z_vars)
+ self.assertEqual(z_vars['job'], 'group-inventory')
+
+ self.executor_server.release()
+ self.waitUntilSettled()
diff --git a/tests/unit/test_log_streamer.py b/tests/unit/test_log_streamer.py
new file mode 100644
index 0000000..3ea5a8e
--- /dev/null
+++ b/tests/unit/test_log_streamer.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import socket
+import tempfile
+
+import zuul.lib.log_streamer
+import tests.base
+
+
+class TestLogStreamer(tests.base.BaseTestCase):
+
+ log = logging.getLogger("zuul.test.cloner")
+
+ def setUp(self):
+ super(TestLogStreamer, self).setUp()
+ self.host = '0.0.0.0'
+
+ def startStreamer(self, port, root=None):
+ if not root:
+ root = tempfile.gettempdir()
+ return zuul.lib.log_streamer.LogStreamer(None, self.host, port, root)
+
+ def test_start_stop(self):
+ port = 7900
+ streamer = self.startStreamer(port)
+ self.addCleanup(streamer.stop)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.addCleanup(s.close)
+ self.assertEqual(0, s.connect_ex((self.host, port)))
+ s.close()
+
+ streamer.stop()
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.addCleanup(s.close)
+ self.assertNotEqual(0, s.connect_ex((self.host, port)))
+ s.close()
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 5f968b4..e7e53c4 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -106,7 +106,7 @@
base.auth = model.AuthContext()
py27 = model.Job('py27')
- self.assertEqual(None, py27.timeout)
+ self.assertIsNone(py27.timeout)
py27.inheritFrom(base)
self.assertEqual(30, py27.timeout)
self.assertEqual(['base-pre'],
@@ -115,7 +115,7 @@
[x.path for x in py27.run])
self.assertEqual(['base-post'],
[x.path for x in py27.post_run])
- self.assertEqual(None, py27.auth)
+ self.assertIsNone(py27.auth)
def test_job_variants(self):
# This simulates freezing a job.
@@ -433,11 +433,11 @@
})
layout.addJob(in_repo_job_with_inherit_false)
- self.assertEqual(None, in_repo_job_without_inherit.auth)
+ self.assertIsNone(in_repo_job_without_inherit.auth)
self.assertEqual(1, len(in_repo_job_with_inherit.auth.secrets))
self.assertEqual(in_repo_job_with_inherit.auth.secrets[0].name,
'pypi-credentials')
- self.assertEqual(None, in_repo_job_with_inherit_false.auth)
+ self.assertIsNone(in_repo_job_with_inherit_false.auth)
def test_job_inheritance_job_tree(self):
tenant = model.Tenant('tenant')
diff --git a/tests/unit/test_multi_driver.py b/tests/unit/test_multi_driver.py
index a1107de..864bd31 100644
--- a/tests/unit/test_multi_driver.py
+++ b/tests/unit/test_multi_driver.py
@@ -28,10 +28,10 @@
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
B = self.fake_github.openFakePullRequest('org/project1', 'master', 'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
-
self.waitUntilSettled()
self.assertEqual(2, len(self.builds))
diff --git a/tests/unit/test_push_reqs.py b/tests/unit/test_push_reqs.py
new file mode 100644
index 0000000..657d9b8
--- /dev/null
+++ b/tests/unit/test_push_reqs.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2017 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tests.base import ZuulTestCase
+
+
+class TestPushRequirements(ZuulTestCase):
+ config_file = 'zuul-push-reqs.conf'
+ tenant_config_file = 'config/push-reqs/main.yaml'
+
+ def setup_config(self):
+ super(TestPushRequirements, self).setup_config()
+
+ def test_push_requirements(self):
+ self.executor_server.hold_jobs_in_build = True
+
+ # Create a github change, add a change and emit a push event
+ A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
+ old_sha = A.head_sha
+ self.fake_github.emitEvent(A.getPushEvent(old_sha))
+
+ self.waitUntilSettled()
+
+ # All but one pipeline should be skipped
+ self.assertEqual(1, len(self.builds))
+ self.assertEqual('pushhub', self.builds[0].pipeline)
+ self.assertEqual('org/project1', self.builds[0].project)
+
+ # Make a gerrit change, and emit a ref-updated event
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+ self.fake_gerrit.addEvent(B.getRefUpdatedEvent())
+
+ self.waitUntilSettled()
+
+ # All but one pipeline should be skipped, increasing builds by 1
+ self.assertEqual(2, len(self.builds))
+ self.assertEqual('pushgerrit', self.builds[1].pipeline)
+ self.assertEqual('org/project2', self.builds[1].project)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index d416369..0ac42c1 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -21,9 +21,8 @@
import os
import re
import shutil
-import sys
import time
-from unittest import (skip, skipIf)
+from unittest import skip
import git
from six.moves import urllib
@@ -64,7 +63,11 @@
self.assertIsNone(self.getJobFromHistory('project-test2').node)
# TODOv3(jeblair): we may want to report stats by tenant (also?).
- self.assertReportedStat('gerrit.event.comment-added', value='1|c')
+ # Per-driver
+ self.assertReportedStat('zuul.event.gerrit.comment-added', value='1|c')
+ # Per-driver per-connection
+ self.assertReportedStat('zuul.event.gerrit.gerrit.comment-added',
+ value='1|c')
self.assertReportedStat('zuul.pipeline.gate.current_changes',
value='1|g')
self.assertReportedStat('zuul.pipeline.gate.job.project-merge.SUCCESS',
@@ -510,7 +513,6 @@
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_failed_change_at_head_with_queue(self):
"Test that if a change at the head fails, queued jobs are canceled"
@@ -937,7 +939,6 @@
a = source.getChange(event, refresh=True)
self.assertTrue(source.canMerge(a, mgr.getSubmitAllowNeeds()))
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_project_merge_conflict(self):
"Test that gate merge conflicts are handled properly"
@@ -959,7 +960,6 @@
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
- self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 1)
self.gearman_server.release('project-merge')
@@ -977,7 +977,7 @@
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
- self.assertEqual(B.reported, 2)
+ self.assertIn('Merge Failed', B.messages[-1])
self.assertEqual(C.reported, 2)
self.assertHistory([
@@ -989,13 +989,13 @@
dict(name='project-test2', result='SUCCESS', changes='1,1 3,1'),
], ordered=False)
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_delayed_merge_conflict(self):
"Test that delayed check merge conflicts are handled properly"
# Hold jobs in the gearman queue so that we can test whether
- # the executor returns a merge failure after the scheduler has
- # successfully merged.
+ # the executor sucesfully merges a change based on an old
+ # repo state (frozen by the scheduler) which would otherwise
+ # conflict.
self.gearman_server.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project',
'master', 'A',
@@ -1068,9 +1068,12 @@
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
- dict(name='project-merge', result='MERGER_FAILURE', changes='2,1'),
- dict(name='project-merge', result='MERGER_FAILURE',
- changes='2,1 3,1'),
+ dict(name='project-merge', result='SUCCESS', changes='2,1'),
+ dict(name='project-test1', result='SUCCESS', changes='2,1'),
+ dict(name='project-test2', result='SUCCESS', changes='2,1'),
+ dict(name='project-merge', result='SUCCESS', changes='2,1 3,1'),
+ dict(name='project-test1', result='SUCCESS', changes='2,1 3,1'),
+ dict(name='project-test2', result='SUCCESS', changes='2,1 3,1'),
], ordered=False)
def test_post(self):
@@ -1182,7 +1185,8 @@
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'NEW')
- self.assertEqual(C.reported, 2)
+ self.assertIn('This change depends on a change that failed to merge.',
+ C.messages[-1])
self.assertEqual(len(self.history), 1)
def test_failing_dependent_changes(self):
@@ -1927,7 +1931,6 @@
self.assertEqual(A.reported, 2)
@simple_layout('layouts/no-jobs-project.yaml')
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_no_job_project(self):
"Test that reports with no jobs don't get sent"
A = self.fake_gerrit.addFakeChange('org/no-jobs-project',
@@ -2059,7 +2062,6 @@
self.assertReportedStat('test-timing', '3|ms')
self.assertReportedStat('test-gauge', '12|g')
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_stuck_job_cleanup(self):
"Test that pending jobs are cleaned up if removed from layout"
@@ -2187,7 +2189,6 @@
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_queue_precedence(self):
"Test that queue precedence works"
@@ -2332,7 +2333,6 @@
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.data['status'], 'NEW')
- self.assertEqual(B.reported, 1)
self.assertEqual(len(self.history), 0)
# Add the "project-test3" job.
@@ -2348,7 +2348,7 @@
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'NEW')
- self.assertEqual(B.reported, 2)
+ self.assertIn('Merge Failed', B.messages[-1])
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
@@ -3400,6 +3400,16 @@
self.assertFalse(self.smtp_messages[1]['body'].startswith(failure_msg))
self.assertTrue(self.smtp_messages[1]['body'].endswith(footer_msg))
+ @simple_layout('layouts/unmanaged-project.yaml')
+ def test_unmanaged_project_start_message(self):
+ "Test start reporting is not done for unmanaged projects."
+ self.init_repo("org/project", tag='init')
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(0, len(A.messages))
+
@skip("Disabled for early v3 development")
def test_merge_failure_reporters(self):
"""Check that the config is set up correctly"""
@@ -3539,9 +3549,9 @@
self.assertEqual([], running_item['failing_reasons'])
self.assertEqual([], running_item['items_behind'])
self.assertEqual('https://hostname/1', running_item['url'])
- self.assertEqual(None, running_item['item_ahead'])
+ self.assertIsNone(running_item['item_ahead'])
self.assertEqual('org/project', running_item['project'])
- self.assertEqual(None, running_item['remaining_time'])
+ self.assertIsNone(running_item['remaining_time'])
self.assertEqual(True, running_item['active'])
self.assertEqual('1,1', running_item['id'])
@@ -3556,7 +3566,7 @@
self.assertEqual(7, len(job['worker']))
self.assertEqual(False, job['canceled'])
self.assertEqual(True, job['voting'])
- self.assertEqual(None, job['result'])
+ self.assertIsNone(job['result'])
self.assertEqual('gate', job['pipeline'])
break
@@ -3874,7 +3884,6 @@
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 0)
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
@@ -4025,11 +4034,9 @@
self.assertEqual(self.history[0].changes, '2,1 1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_crd_check_reconfiguration(self):
self._test_crd_check_reconfiguration('org/project1', 'org/project2')
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_crd_undefined_project(self):
"""Test that undefined projects in dependencies are handled for
independent pipelines"""
@@ -4039,7 +4046,6 @@
self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
@simple_layout('layouts/ignore-dependencies.yaml')
- @skipIf(sys.version_info.major > 2, 'Fails on py3')
def test_crd_check_ignore_dependencies(self):
"Test cross-repo dependencies can be ignored"
diff --git a/tests/unit/test_ssh_agent.py b/tests/unit/test_ssh_agent.py
new file mode 100644
index 0000000..c9c1ebd
--- /dev/null
+++ b/tests/unit/test_ssh_agent.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import subprocess
+
+from tests.base import ZuulTestCase
+from zuul.executor.server import SshAgent
+
+
+class TestSshAgent(ZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ def test_ssh_agent(self):
+ # Need a private key to add
+ env_copy = dict(os.environ)
+ # DISPLAY and SSH_ASKPASS will cause interactive test runners to get a
+ # surprise
+ if 'DISPLAY' in env_copy:
+ del env_copy['DISPLAY']
+ if 'SSH_ASKPASS' in env_copy:
+ del env_copy['SSH_ASKPASS']
+
+ agent = SshAgent()
+ agent.start()
+ env_copy.update(agent.env)
+
+ pub_key_file = '{}.pub'.format(self.private_key_file)
+ pub_key = None
+ with open(pub_key_file) as pub_key_f:
+ pub_key = pub_key_f.read().split('== ')[0]
+
+ agent.add(self.private_key_file)
+ keys = agent.list()
+ self.assertEqual(1, len(keys))
+ self.assertEqual(keys[0].split('== ')[0], pub_key)
+ agent.remove(self.private_key_file)
+ keys = agent.list()
+ self.assertEqual([], keys)
+ agent.stop()
+ # Agent is now dead and thus this should fail
+ with open('/dev/null') as devnull:
+ self.assertRaises(subprocess.CalledProcessError,
+ subprocess.check_call,
+ ['ssh-add', self.private_key_file],
+ env=env_copy,
+ stderr=devnull)
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 2168a7f..18a49db 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -262,9 +262,9 @@
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
- self.assertEqual(A.reported, 2,
- "A should report start and failure")
- self.assertIn('syntax error', A.messages[1],
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_trusted_syntax_error(self):
@@ -283,9 +283,9 @@
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
- self.assertEqual(A.reported, 2,
- "A should report start and failure")
- self.assertIn('syntax error', A.messages[1],
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_yaml_error(self):
@@ -303,9 +303,9 @@
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
- self.assertEqual(A.reported, 2,
- "A should report start and failure")
- self.assertIn('syntax error', A.messages[1],
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_shadow_error(self):
@@ -323,9 +323,9 @@
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
- self.assertEqual(A.reported, 2,
- "A should report start and failure")
- self.assertIn('not permitted to shadow', A.messages[1],
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertIn('not permitted to shadow', A.messages[0],
"A should have a syntax error reported")
@@ -344,6 +344,8 @@
self.assertEqual(build.result, 'FAILURE')
build = self.getJobFromHistory('check-vars')
self.assertEqual(build.result, 'SUCCESS')
+ build = self.getJobFromHistory('hello-world')
+ self.assertEqual(build.result, 'SUCCESS')
build = self.getJobFromHistory('python27')
self.assertEqual(build.result, 'SUCCESS')
flag_path = os.path.join(self.test_root, build.uuid + '.flag')
diff --git a/tools/trigger-job.py b/tools/trigger-job.py
index 7123afc..dd69f1b 100755
--- a/tools/trigger-job.py
+++ b/tools/trigger-job.py
@@ -73,5 +73,6 @@
while not job.complete:
time.sleep(1)
+
if __name__ == '__main__':
main()
diff --git a/tools/update-storyboard.py b/tools/update-storyboard.py
index 12e6916..51434c9 100644
--- a/tools/update-storyboard.py
+++ b/tools/update-storyboard.py
@@ -96,5 +96,6 @@
if ok_lanes and not task_found:
add_task(sync, task, lanes[ok_lanes[0]])
+
if __name__ == '__main__':
main()
diff --git a/tox.ini b/tox.ini
index 6a50c6d..9b97eca 100644
--- a/tox.ini
+++ b/tox.ini
@@ -51,6 +51,6 @@
[flake8]
# These are ignored intentionally in openstack-infra projects;
# please don't submit patches that solely correct them or enable them.
-ignore = E305,E125,E129,E402,H,W503
+ignore = E125,E129,E402,H,W503
show-source = True
exclude = .venv,.tox,dist,doc,build,*.egg
diff --git a/zuul/ansible/action/copy.py b/zuul/ansible/action/copy.py
index bb54430..d870c24 100644
--- a/zuul/ansible/action/copy.py
+++ b/zuul/ansible/action/copy.py
@@ -25,6 +25,6 @@
source = self._task.args.get('src', None)
remote_src = self._task.args.get('remote_src', False)
- if not remote_src and not paths._is_safe_path(source):
+ if not remote_src and source and not paths._is_safe_path(source):
return paths._fail_dict(source)
return super(ActionModule, self).run(tmp, task_vars)
diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py
index e6b3461..904316c 100644
--- a/zuul/ansible/callback/zuul_stream.py
+++ b/zuul/ansible/callback/zuul_stream.py
@@ -24,14 +24,14 @@
def linesplit(socket):
- buff = socket.recv(4096)
+ buff = socket.recv(4096).decode("utf-8")
buffering = True
while buffering:
if "\n" in buff:
(line, buff) = buff.split("\n", 1)
yield line + "\n"
else:
- more = socket.recv(4096)
+ more = socket.recv(4096).decode("utf-8")
if not more:
buffering = False
else:
@@ -40,6 +40,32 @@
yield buff
+def zuul_filter_result(result):
+ """Remove keys from shell/command output.
+
+ Zuul streams stdout into the log above, so including stdout and stderr
+ in the result dict that ansible displays in the logs is duplicate
+ noise. We keep stdout in the result dict so that other callback plugins
+ like ARA could also have access to it. But drop them here.
+
+ Remove changed so that we don't show a bunch of "changed" titles
+ on successful shell tasks, since that doesn't make sense from a Zuul
+ POV. The super class treats missing "changed" key as False.
+
+ Remove cmd because most of the script content where people want to
+ see the script run is run with -x. It's possible we may want to revist
+ this to be smarter about when we remove it - like, only remove it
+ if it has an embedded newline - so that for normal 'simple' uses
+ of cmd it'll echo what the command was for folks.
+ """
+
+ for key in ('changed', 'cmd',
+ 'stderr', 'stderr_lines',
+ 'stdout', 'stdout_lines'):
+ result.pop(key, None)
+ return result
+
+
class CallbackModule(default.CallbackModule):
'''
@@ -103,3 +129,37 @@
target=self._read_log, args=(host, ip))
p.daemon = True
p.start()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if result._task.action in ('command', 'shell'):
+ zuul_filter_result(result._result)
+ super(CallbackModule, self).v2_runner_on_failed(
+ result, ignore_errors=ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ if result._task.action in ('command', 'shell'):
+ zuul_filter_result(result._result)
+ else:
+ return super(CallbackModule, self).v2_runner_on_ok(result)
+
+ if self._play.strategy == 'free':
+ return super(CallbackModule, self).v2_runner_on_ok(result)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+
+ if delegated_vars:
+ msg = "ok: [{host} -> {delegated_host} %s]".format(
+ host=result._host.get_name(),
+ delegated_host=delegated_vars['ansible_host'])
+ else:
+ msg = "ok: [{host}]".format(host=result._host.get_name())
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg += " Runtime: {delta} Start: {start} End: {end}".format(
+ **result._result)
+
+ self._handle_warnings(result._result)
+
+ self._display.display(msg)
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 1893f5a..931639f 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -24,9 +24,11 @@
import logging
import os
+import pwd
import socket
import sys
import signal
+import tempfile
import zuul.cmd
import zuul.executor.server
@@ -37,6 +39,9 @@
# Similar situation with gear and statsd.
+DEFAULT_FINGER_PORT = 79
+
+
class Executor(zuul.cmd.ZuulApp):
def parse_arguments(self):
@@ -72,15 +77,67 @@
self.executor.stop()
self.executor.join()
+ def start_log_streamer(self):
+ pipe_read, pipe_write = os.pipe()
+ child_pid = os.fork()
+ if child_pid == 0:
+ os.close(pipe_write)
+ import zuul.lib.log_streamer
+
+ self.log.info("Starting log streamer")
+ streamer = zuul.lib.log_streamer.LogStreamer(
+ self.user, '0.0.0.0', self.finger_port, self.jobroot_dir)
+
+ # Keep running until the parent dies:
+ pipe_read = os.fdopen(pipe_read)
+ pipe_read.read()
+ self.log.info("Stopping log streamer")
+ streamer.stop()
+ os._exit(0)
+ else:
+ os.close(pipe_read)
+ self.log_streamer_pid = child_pid
+
+ def change_privs(self):
+ '''
+ Drop our privileges to the zuul user.
+ '''
+ if os.getuid() != 0:
+ return
+ pw = pwd.getpwnam(self.user)
+ os.setgroups([])
+ os.setgid(pw.pw_gid)
+ os.setuid(pw.pw_uid)
+ os.umask(0o022)
+
def main(self, daemon=True):
# See comment at top of file about zuul imports
- self.setup_logging('executor', 'log_config')
+ if self.config.has_option('executor', 'user'):
+ self.user = self.config.get('executor', 'user')
+ else:
+ self.user = 'zuul'
+ if self.config.has_option('zuul', 'jobroot_dir'):
+ self.jobroot_dir = os.path.expanduser(
+ self.config.get('zuul', 'jobroot_dir'))
+ else:
+ self.jobroot_dir = tempfile.gettempdir()
+
+ self.setup_logging('executor', 'log_config')
self.log = logging.getLogger("zuul.Executor")
+ if self.config.has_option('executor', 'finger_port'):
+ self.finger_port = int(self.config.get('executor', 'finger_port'))
+ else:
+ self.finger_port = DEFAULT_FINGER_PORT
+
+ self.start_log_streamer()
+ self.change_privs()
+
ExecutorServer = zuul.executor.server.ExecutorServer
self.executor = ExecutorServer(self.config, self.connections,
+ jobdir_root=self.jobroot_dir,
keep_jobdir=self.args.keep_jobdir)
self.executor.start()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 1374e9b..42a9b01 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -47,6 +47,16 @@
pass
+class NodeFromGroupNotFoundError(Exception):
+ def __init__(self, nodeset, node, group):
+ message = textwrap.dedent("""\
+ In nodeset {nodeset} the group {group} contains a
+ node named {node} which is not defined in the nodeset.""")
+ message = textwrap.fill(message.format(nodeset=nodeset,
+ node=node, group=group))
+ super(NodeFromGroupNotFoundError, self).__init__(message)
+
+
class ProjectNotFoundError(Exception):
def __init__(self, project):
message = textwrap.dedent("""\
@@ -169,8 +179,13 @@
vs.Required('image'): str,
}
+ group = {vs.Required('name'): str,
+ vs.Required('nodes'): [str]
+ }
+
nodeset = {vs.Required('name'): str,
vs.Required('nodes'): [node],
+ 'groups': [group],
'_source_context': model.SourceContext,
'_start_mark': yaml.Mark,
}
@@ -182,9 +197,18 @@
with configuration_exceptions('nodeset', conf):
NodeSetParser.getSchema()(conf)
ns = model.NodeSet(conf['name'])
+ node_names = []
for conf_node in as_list(conf['nodes']):
node = model.Node(conf_node['name'], conf_node['image'])
ns.addNode(node)
+ node_names.append(conf_node['name'])
+ for conf_group in as_list(conf.get('groups', [])):
+ for node_name in conf_group['nodes']:
+ if node_name not in node_names:
+ raise NodeFromGroupNotFoundError(conf['name'], node_name,
+ conf_group['name'])
+ group = model.Group(conf_group['name'], conf_group['nodes'])
+ ns.addGroup(group)
return ns
@@ -229,6 +253,9 @@
role = vs.Any(zuul_role, galaxy_role)
+ job_project = {vs.Required('name'): str,
+ 'override-branch': str}
+
job = {vs.Required('name'): str,
'parent': str,
'failure-message': str,
@@ -252,10 +279,11 @@
'_source_context': model.SourceContext,
'_start_mark': yaml.Mark,
'roles': to_list(role),
- 'repos': to_list(str),
+ 'required-projects': to_list(vs.Any(job_project, str)),
'vars': dict,
'dependencies': to_list(str),
'allowed-projects': to_list(str),
+ 'override-branch': str,
}
return vs.Schema(job)
@@ -271,6 +299,7 @@
'success-message',
'failure-url',
'success-url',
+ 'override-branch',
]
@staticmethod
@@ -364,10 +393,23 @@
ns.addNode(node)
job.nodeset = ns
- if 'repos' in conf:
- # Accumulate repos in a set so that job inheritance
- # is additive.
- job.repos = job.repos.union(set(conf.get('repos', [])))
+ if 'required-projects' in conf:
+ new_projects = {}
+ projects = as_list(conf.get('required-projects', []))
+ for project in projects:
+ if isinstance(project, dict):
+ project_name = project['name']
+ project_override_branch = project.get('override-branch')
+ else:
+ project_name = project
+ project_override_branch = None
+ (trusted, project) = tenant.getProject(project_name)
+ if project is None:
+ raise Exception("Unknown project %s" % (project_name,))
+ job_project = model.JobProject(project_name,
+ project_override_branch)
+ new_projects[project_name] = job_project
+ job.updateProjects(new_projects)
tags = conf.get('tags')
if tags:
@@ -519,6 +561,7 @@
'templates': [str],
'merge-mode': vs.Any('merge', 'merge-resolve',
'cherry-pick'),
+ 'default-branch': str,
'_source_context': model.SourceContext,
'_start_mark': yaml.Mark,
}
@@ -554,15 +597,20 @@
configs.extend([layout.project_templates[name]
for name in conf_templates])
configs.append(project_template)
+ # Set the following values to the first one that we find and
+ # ignore subsequent settings.
mode = conf.get('merge-mode')
if mode and project_config.merge_mode is None:
- # Set the merge mode to the first one that we find and
- # ignore subsequent settings.
project_config.merge_mode = model.MERGER_MAP[mode]
+ default_branch = conf.get('default-branch')
+ if default_branch and project_config.default_branch is None:
+ project_config.default_branch = default_branch
if project_config.merge_mode is None:
# If merge mode was not specified in any project stanza,
# set it to the default.
project_config.merge_mode = model.MERGER_MAP['merge-resolve']
+ if project_config.default_branch is None:
+ project_config.default_branch = 'master'
for pipeline in layout.pipelines.values():
project_pipeline = model.ProjectPipelineConfig()
queue_name = None
@@ -855,7 +903,7 @@
key_dir = os.path.dirname(project.private_key_file)
if not os.path.isdir(key_dir):
- os.makedirs(key_dir)
+ os.makedirs(key_dir, 0o700)
TenantParser.log.info(
"Generating RSA keypair for project %s" % (project.name,)
@@ -872,6 +920,9 @@
with open(project.private_key_file, 'wb') as f:
f.write(pem_private_key)
+ # Ensure private key is read/write for zuul user only.
+ os.chmod(project.private_key_file, 0o600)
+
@staticmethod
def _loadKeys(project):
# Check the key files specified are there
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
index 49624d7..90ab39c 100644
--- a/zuul/connection/__init__.py
+++ b/zuul/connection/__init__.py
@@ -14,6 +14,7 @@
import abc
+import extras
import six
@@ -43,6 +44,26 @@
self.driver = driver
self.connection_name = connection_name
self.connection_config = connection_config
+ self.statsd = extras.try_import('statsd.statsd')
+
+ def logEvent(self, event):
+ self.log.debug(
+ 'Scheduling {driver} event from {connection}: {event}'.format(
+ driver=self.driver.name,
+ connection=self.connection_name,
+ event=event.type))
+ try:
+ if self.statsd:
+ self.statsd.incr(
+ 'zuul.event.{driver}.{event}'.format(
+ driver=self.driver.name, event=event.type))
+ self.statsd.incr(
+ 'zuul.event.{driver}.{connection}.{event}'.format(
+ driver=self.driver.name,
+ connection=self.connection_name,
+ event=event.type))
+ except:
+ self.log.exception("Exception reporting event stats")
def onLoad(self):
pass
diff --git a/zuul/driver/__init__.py b/zuul/driver/__init__.py
index 671996a..0c3105d 100644
--- a/zuul/driver/__init__.py
+++ b/zuul/driver/__init__.py
@@ -254,3 +254,27 @@
"""
pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class WrapperInterface(object):
+ """The wrapper interface to be implmeneted by a driver.
+
+ A driver which wraps execution of commands executed by Zuul should
+ implement this interface.
+
+ """
+
+ @abc.abstractmethod
+ def getPopen(self, **kwargs):
+ """Create and return a subprocess.Popen factory wrapped however the
+ driver sees fit.
+
+ This method is required by the interface
+
+ :arg dict kwargs: key/values for use by driver as needed
+
+ :returns: a callable that takes the same args as subprocess.Popen
+ :rtype: Callable
+ """
+ pass
diff --git a/zuul/driver/bubblewrap/__init__.py b/zuul/driver/bubblewrap/__init__.py
new file mode 100644
index 0000000..c93e912
--- /dev/null
+++ b/zuul/driver/bubblewrap/__init__.py
@@ -0,0 +1,173 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 OpenStack Foundation
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import grp
+import logging
+import os
+import pwd
+import subprocess
+import sys
+
+from six.moves import shlex_quote
+
+from zuul.driver import (Driver, WrapperInterface)
+
+
+class WrappedPopen(object):
+ def __init__(self, command, passwd_r, group_r):
+ self.command = command
+ self.passwd_r = passwd_r
+ self.group_r = group_r
+
+ def __call__(self, args, *sub_args, **kwargs):
+ try:
+ args = self.command + args
+ if kwargs.get('close_fds') or sys.version_info.major >= 3:
+ # The default in py3 is close_fds=True, so we need to pass
+ # our open fds in. However, this can only work right in
+ # py3.2 or later due to the lack of 'pass_fds' in prior
+ # versions. So until we are py3 only we can only bwrap
+ # things that are close_fds=False
+ pass_fds = list(kwargs.get('pass_fds', []))
+ for fd in (self.passwd_r, self.group_r):
+ if fd not in pass_fds:
+ pass_fds.append(fd)
+ kwargs['pass_fds'] = pass_fds
+ proc = subprocess.Popen(args, *sub_args, **kwargs)
+ finally:
+ self.__del__()
+ return proc
+
+ def __del__(self):
+ if self.passwd_r:
+ try:
+ os.close(self.passwd_r)
+ except OSError:
+ pass
+ self.passwd_r = None
+ if self.group_r:
+ try:
+ os.close(self.group_r)
+ except OSError:
+ pass
+ self.group_r = None
+
+
+class BubblewrapDriver(Driver, WrapperInterface):
+ name = 'bubblewrap'
+ log = logging.getLogger("zuul.BubblewrapDriver")
+
+ bwrap_command = [
+ 'bwrap',
+ '--dir', '/tmp',
+ '--tmpfs', '/tmp',
+ '--dir', '/var',
+ '--dir', '/var/tmp',
+ '--dir', '/run/user/{uid}',
+ '--ro-bind', '/usr', '/usr',
+ '--ro-bind', '/lib', '/lib',
+ '--ro-bind', '/lib64', '/lib64',
+ '--ro-bind', '/bin', '/bin',
+ '--ro-bind', '/sbin', '/sbin',
+ '--ro-bind', '/etc/resolv.conf', '/etc/resolv.conf',
+ '--ro-bind', '{ansible_dir}', '{ansible_dir}',
+ '--ro-bind', '{ssh_auth_sock}', '{ssh_auth_sock}',
+ '--dir', '{work_dir}',
+ '--bind', '{work_dir}', '{work_dir}',
+ '--dev', '/dev',
+ '--dir', '{user_home}',
+ '--chdir', '/',
+ '--unshare-all',
+ '--share-net',
+ '--uid', '{uid}',
+ '--gid', '{gid}',
+ '--file', '{uid_fd}', '/etc/passwd',
+ '--file', '{gid_fd}', '/etc/group',
+ ]
+
+ def reconfigure(self, tenant):
+ pass
+
+ def stop(self):
+ pass
+
+ def getPopen(self, **kwargs):
+ # Set zuul_dir if it was not passed in
+ if 'zuul_dir' in kwargs:
+ zuul_dir = kwargs['zuul_dir']
+ else:
+ zuul_python_dir = os.path.dirname(sys.executable)
+ # We want the dir directly above bin to get the whole venv
+ zuul_dir = os.path.normpath(os.path.join(zuul_python_dir, '..'))
+
+ bwrap_command = list(self.bwrap_command)
+ if not zuul_dir.startswith('/usr'):
+ bwrap_command.extend(['--ro-bind', zuul_dir, zuul_dir])
+
+ # Need users and groups
+ uid = os.getuid()
+ passwd = pwd.getpwuid(uid)
+ passwd_bytes = b':'.join(
+ ['{}'.format(x).encode('utf8') for x in passwd])
+ (passwd_r, passwd_w) = os.pipe()
+ os.write(passwd_w, passwd_bytes)
+ os.close(passwd_w)
+
+ gid = os.getgid()
+ group = grp.getgrgid(gid)
+ group_bytes = b':'.join(
+ ['{}'.format(x).encode('utf8') for x in group])
+ group_r, group_w = os.pipe()
+ os.write(group_w, group_bytes)
+ os.close(group_w)
+
+ kwargs = dict(kwargs) # Don't update passed in dict
+ kwargs['uid'] = uid
+ kwargs['gid'] = gid
+ kwargs['uid_fd'] = passwd_r
+ kwargs['gid_fd'] = group_r
+ kwargs['user_home'] = passwd.pw_dir
+ command = [x.format(**kwargs) for x in bwrap_command]
+
+ self.log.debug("Bubblewrap command: %s",
+ " ".join(shlex_quote(c) for c in command))
+
+ wrapped_popen = WrappedPopen(command, passwd_r, group_r)
+
+ return wrapped_popen
+
+
+def main(args=None):
+ driver = BubblewrapDriver()
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('work_dir')
+ parser.add_argument('ansible_dir')
+ parser.add_argument('run_args', nargs='+')
+ cli_args = parser.parse_args()
+
+ ssh_auth_sock = os.environ.get('SSH_AUTH_SOCK')
+
+ popen = driver.getPopen(work_dir=cli_args.work_dir,
+ ansible_dir=cli_args.ansible_dir,
+ ssh_auth_sock=ssh_auth_sock)
+ x = popen(cli_args.run_args)
+ x.wait()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index c3c8584..a5e1f22 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -142,6 +142,7 @@
self.connection._getChange(event.change_number,
event.patch_number,
refresh=True)
+ self.connection.logEvent(event)
self.connection.sched.addEvent(event)
def run(self):
@@ -624,7 +625,7 @@
if val is True:
cmd += ' --%s' % key
else:
- cmd += ' --%s %s' % (key, val)
+ cmd += ' --label %s=%s' % (key, val)
cmd += ' %s' % change
out, err = self._ssh(cmd)
return err
@@ -729,13 +730,13 @@
if stdin_data:
stdin.write(stdin_data)
- out = stdout.read()
+ out = stdout.read().decode('utf-8')
self.log.debug("SSH received stdout:\n%s" % out)
ret = stdout.channel.recv_exit_status()
self.log.debug("SSH exit status: %s" % ret)
- err = stderr.read()
+ err = stderr.read().decode('utf-8')
self.log.debug("SSH received stderr:\n%s" % err)
if ret:
raise Exception("Gerrit error executing %s" % command)
diff --git a/zuul/driver/gerrit/gerritmodel.py b/zuul/driver/gerrit/gerritmodel.py
index 009a723..818d260 100644
--- a/zuul/driver/gerrit/gerritmodel.py
+++ b/zuul/driver/gerrit/gerritmodel.py
@@ -115,6 +115,10 @@
return True
def matchesApprovals(self, change):
+ if self.required_approvals or self.reject_approvals:
+ if not hasattr(change, 'number'):
+ # Not a change, no reviews
+ return False
if (self.required_approvals and not change.approvals
or self.reject_approvals and not change.approvals):
# A change with no approvals can not match
@@ -291,10 +295,10 @@
class GerritRefFilter(RefFilter, GerritApprovalFilter):
- def __init__(self, open=None, current_patchset=None,
+ def __init__(self, connection_name, open=None, current_patchset=None,
statuses=[], required_approvals=[],
reject_approvals=[]):
- RefFilter.__init__(self)
+ RefFilter.__init__(self, connection_name)
GerritApprovalFilter.__init__(self,
required_approvals=required_approvals,
@@ -307,6 +311,7 @@
def __repr__(self):
ret = '<GerritRefFilter'
+ ret += ' connection_name: %s' % self.connection_name
if self.open is not None:
ret += ' open: %s' % self.open
if self.current_patchset is not None:
@@ -325,11 +330,21 @@
def matches(self, change):
if self.open is not None:
- if self.open != change.open:
+ # if a "change" has no number, it's not a change, but a push
+ # and cannot possibly pass this test.
+ if hasattr(change, 'number'):
+ if self.open != change.open:
+ return False
+ else:
return False
if self.current_patchset is not None:
- if self.current_patchset != change.is_current_patchset:
+ # if a "change" has no number, it's not a change, but a push
+ # and cannot possibly pass this test.
+ if hasattr(change, 'number'):
+ if self.current_patchset != change.is_current_patchset:
+ return False
+ else:
return False
if self.statuses:
diff --git a/zuul/driver/gerrit/gerritreporter.py b/zuul/driver/gerrit/gerritreporter.py
index a855db3..90c95e3 100644
--- a/zuul/driver/gerrit/gerritreporter.py
+++ b/zuul/driver/gerrit/gerritreporter.py
@@ -15,7 +15,7 @@
import logging
import voluptuous as v
-
+from zuul.driver.gerrit.gerritsource import GerritSource
from zuul.reporter import BaseReporter
@@ -25,14 +25,25 @@
name = 'gerrit'
log = logging.getLogger("zuul.GerritReporter")
- def report(self, source, pipeline, item):
+ def report(self, item):
"""Send a message to gerrit."""
- message = self._formatItemReport(pipeline, item)
+
+ # If the source is no GerritSource we cannot report anything here.
+ if not isinstance(item.change.project.source, GerritSource):
+ return
+
+ # For supporting several Gerrit connections we also must filter by
+ # the canonical hostname.
+ if item.change.project.source.connection.canonical_hostname != \
+ self.connection.canonical_hostname:
+ return
+
+ message = self._formatItemReport(item)
self.log.debug("Report change %s, params %s, message: %s" %
(item.change, self.config, message))
changeid = '%s,%s' % (item.change.number, item.change.patchset)
- item.change._ref_sha = source.getRefSha(
+ item.change._ref_sha = item.change.project.source.getRefSha(
item.change.project, 'refs/heads/' + item.change.branch)
return self.connection.review(item.change.project.name, changeid,
diff --git a/zuul/driver/gerrit/gerritsource.py b/zuul/driver/gerrit/gerritsource.py
index 6cb0c39..4571cc1 100644
--- a/zuul/driver/gerrit/gerritsource.py
+++ b/zuul/driver/gerrit/gerritsource.py
@@ -65,6 +65,7 @@
def getRequireFilters(self, config):
f = GerritRefFilter(
+ connection_name=self.connection.connection_name,
open=config.get('open'),
current_patchset=config.get('current-patchset'),
statuses=to_list(config.get('status')),
@@ -74,6 +75,7 @@
def getRejectFilters(self, config):
f = GerritRefFilter(
+ connection_name=self.connection.connection_name,
reject_approvals=to_list(config.get('approval')),
)
return [f]
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 2513569..6a3c09e 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -13,11 +13,17 @@
# under the License.
import collections
+import datetime
import logging
import hmac
import hashlib
import time
+import cachecontrol
+from cachecontrol.cache import DictCache
+import iso8601
+import jwt
+import requests
import webob
import webob.dec
import voluptuous as v
@@ -29,6 +35,25 @@
from zuul.exceptions import MergeFailure
from zuul.driver.github.githubmodel import PullRequest, GithubTriggerEvent
+ACCESS_TOKEN_URL = 'https://api.github.com/installations/%s/access_tokens'
+PREVIEW_JSON_ACCEPT = 'application/vnd.github.machine-man-preview+json'
+
+
+class UTC(datetime.tzinfo):
+ """UTC"""
+
+ def utcoffset(self, dt):
+ return datetime.timedelta(0)
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+
+utc = UTC()
+
class GithubWebhookListener():
@@ -66,17 +91,38 @@
raise webob.exc.HTTPBadRequest(message)
try:
- event = method(request)
+ json_body = request.json_body
+ except:
+ message = 'Exception deserializing JSON body'
+ self.log.exception(message)
+ raise webob.exc.HTTPBadRequest(message)
+
+ # If there's any installation mapping information in the body then
+ # update the project mapping before any requests are made.
+ installation_id = json_body.get('installation', {}).get('id')
+ project_name = json_body.get('repository', {}).get('full_name')
+
+ if installation_id and project_name:
+ old_id = self.connection.installation_map.get(project_name)
+
+ if old_id and old_id != installation_id:
+ msg = "Unexpected installation_id change for %s. %d -> %d."
+ self.log.warning(msg, project_name, old_id, installation_id)
+
+ self.connection.installation_map[project_name] = installation_id
+
+ try:
+ event = method(json_body)
except:
self.log.exception('Exception when handling event:')
+ event = None
if event:
event.project_hostname = self.connection.canonical_hostname
- self.log.debug('Scheduling github event: {0}'.format(event.type))
+ self.connection.logEvent(event)
self.connection.sched.addEvent(event)
- def _event_push(self, request):
- body = request.json_body
+ def _event_push(self, body):
base_repo = body.get('repository')
event = GithubTriggerEvent()
@@ -96,8 +142,7 @@
return event
- def _event_pull_request(self, request):
- body = request.json_body
+ def _event_pull_request(self, body):
action = body.get('action')
pr_body = body.get('pull_request')
@@ -124,9 +169,8 @@
return event
- def _event_issue_comment(self, request):
+ def _event_issue_comment(self, body):
"""Handles pull request comments"""
- body = request.json_body
action = body.get('action')
if action != 'created':
return
@@ -144,9 +188,8 @@
event.action = 'comment'
return event
- def _event_pull_request_review(self, request):
+ def _event_pull_request_review(self, body):
"""Handles pull request reviews"""
- body = request.json_body
pr_body = body.get('pull_request')
if pr_body is None:
return
@@ -162,6 +205,25 @@
event.action = body.get('action')
return event
+ def _event_status(self, body):
+ action = body.get('action')
+ if action == 'pending':
+ return
+ pr_body = self.connection.getPullBySha(body['sha'])
+ if pr_body is None:
+ return
+
+ event = self._pull_request_to_event(pr_body)
+ event.account = self._get_sender(body)
+ event.type = 'pull_request'
+ event.action = 'status'
+ # Github API is silly. Webhook blob sets author data in
+ # 'sender', but API call to get status puts it in 'creator'.
+ # Duplicate the data so our code can look in one place
+ body['creator'] = body['sender']
+ event.status = "%s:%s:%s" % _status_as_tuple(body)
+ return event
+
def _issue_to_pull_request(self, body):
number = body.get('issue').get('number')
project_name = body.get('repository').get('full_name')
@@ -257,20 +319,32 @@
driver_name = 'github'
log = logging.getLogger("connection.github")
payload_path = 'payload'
- git_user = 'git'
def __init__(self, driver, connection_name, connection_config):
super(GithubConnection, self).__init__(
driver, connection_name, connection_config)
- self.github = None
self._change_cache = {}
self.projects = {}
- self._git_ssh = bool(self.connection_config.get('sshkey', None))
+ self.git_ssh_key = self.connection_config.get('sshkey')
self.git_host = self.connection_config.get('git_host', 'github.com')
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.git_host)
self.source = driver.getSource(self)
+ self._github = None
+ self.app_id = None
+ self.app_key = None
+
+ self.installation_map = {}
+ self.installation_token_cache = {}
+
+ # NOTE(jamielennox): Better here would be to cache to memcache or file
+ # or something external - but zuul already sucks at restarting so in
+ # memory probably doesn't make this much worse.
+ self.cache_adapter = cachecontrol.CacheControlAdapter(
+ DictCache(),
+ cache_etags=True)
+
def onLoad(self):
webhook_listener = GithubWebhookListener(self)
self.registerHttpHandler(self.payload_path,
@@ -280,20 +354,107 @@
def onStop(self):
self.unregisterHttpHandler(self.payload_path)
- def _authenticateGithubAPI(self):
- token = self.connection_config.get('api_token', None)
- if token is not None:
- if self.git_host != 'github.com':
- url = 'https://%s/' % self.git_host
- self.github = github3.enterprise_login(token=token, url=url)
- else:
- self.github = github3.login(token=token)
- self.log.info("Github API Authentication successful.")
+ def _createGithubClient(self):
+ if self.git_host != 'github.com':
+ url = 'https://%s/' % self.git_host
+ github = github3.GitHubEnterprise(url)
else:
- self.github = None
- self.log.info(
- "No Github credentials found in zuul configuration, cannot "
- "authenticate.")
+ github = github3.GitHub()
+
+ # anything going through requests to http/s goes through cache
+ github.session.mount('http://', self.cache_adapter)
+ github.session.mount('https://', self.cache_adapter)
+ return github
+
+ def _authenticateGithubAPI(self):
+ config = self.connection_config
+
+ api_token = config.get('api_token')
+
+ app_id = config.get('app_id')
+ app_key = None
+ app_key_file = config.get('app_key')
+
+ self._github = self._createGithubClient()
+
+ if api_token:
+ self._github.login(token=api_token)
+
+ if app_key_file:
+ try:
+ with open(app_key_file, 'r') as f:
+ app_key = f.read()
+ except IOError:
+ m = "Failed to open app key file for reading: %s"
+ self.log.error(m, app_key_file)
+
+ if (app_id or app_key) and \
+ not (app_id and app_key):
+ self.log.warning("You must provide an app_id and "
+ "app_key to use installation based "
+ "authentication")
+
+ return
+
+ if app_id:
+ self.app_id = int(app_id)
+ if app_key:
+ self.app_key = app_key
+
+ def _get_installation_key(self, project, user_id=None):
+ installation_id = self.installation_map.get(project)
+
+ if not installation_id:
+ self.log.error("No installation ID available for project %s",
+ project)
+ return ''
+
+ now = datetime.datetime.now(utc)
+ token, expiry = self.installation_token_cache.get(installation_id,
+ (None, None))
+
+ if ((not expiry) or (not token) or (now >= expiry)):
+ expiry = now + datetime.timedelta(minutes=5)
+
+ data = {'iat': now, 'exp': expiry, 'iss': self.app_id}
+ app_token = jwt.encode(data,
+ self.app_key,
+ algorithm='RS256')
+
+ url = ACCESS_TOKEN_URL % installation_id
+ headers = {'Accept': PREVIEW_JSON_ACCEPT,
+ 'Authorization': 'Bearer %s' % app_token}
+ json_data = {'user_id': user_id} if user_id else None
+
+ response = requests.post(url, headers=headers, json=json_data)
+ response.raise_for_status()
+
+ data = response.json()
+
+ expiry = iso8601.parse_date(data['expires_at'])
+ expiry -= datetime.timedelta(minutes=2)
+ token = data['token']
+
+ self.installation_token_cache[installation_id] = (token, expiry)
+
+ return token
+
+ def getGithubClient(self,
+ project=None,
+ user_id=None,
+ use_app=True):
+ # if you're authenticating for a project and you're an integration then
+ # you need to use the installation specific token. There are some
+ # operations that are not yet supported by integrations so
+ # use_app lets you use api_key auth.
+ if use_app and project and self.app_id:
+ github = self._createGithubClient()
+ github.login(token=self._get_installation_key(project, user_id))
+ return github
+
+ # if we're using api_key authentication then this is already token
+ # authenticated, if not then anonymous is the best we have.
+ return self._github
def maintainCache(self, relevant):
for key, change in self._change_cache.items():
@@ -316,7 +477,12 @@
change.files = self.getPullFileNames(project, change.number)
change.title = event.title
change.status = self._get_statuses(project, event.patch_number)
+ change.reviews = self.getPullReviews(project, change.number)
change.source_event = event
+ change.open = self.getPullOpen(event.project_name, change.number)
+ change.is_current_patchset = self.getIsCurrent(event.project_name,
+ change.number,
+ event.patch_number)
elif event.ref:
change = Ref(project)
change.ref = event.ref
@@ -329,12 +495,16 @@
return change
def getGitUrl(self, project):
- if self._git_ssh:
- url = 'ssh://%s@%s/%s.git' % \
- (self.git_user, self.git_host, project)
- else:
- url = 'https://%s/%s' % (self.git_host, project)
- return url
+ if self.git_ssh_key:
+ return 'ssh://git@%s/%s.git' % (self.git_host, project)
+
+ if self.app_id:
+ installation_key = self._get_installation_key(project)
+ return 'https://x-access-token:%s@%s/%s' % (installation_key,
+ self.git_host,
+ project)
+
+ return 'https://%s/%s' % (self.git_host, project)
def getGitwebUrl(self, project, sha=None):
url = 'https://%s/%s' % (self.git_host, project)
@@ -349,19 +519,21 @@
self.projects[project.name] = project
def getProjectBranches(self, project):
+ github = self.getGithubClient()
owner, proj = project.name.split('/')
- repository = self.github.repository(owner, proj)
+ repository = github.repository(owner, proj)
branches = [branch.name for branch in repository.branches()]
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
return branches
def getPullUrl(self, project, number):
return '%s/pull/%s' % (self.getGitwebUrl(project), number)
def getPull(self, project_name, number):
+ github = self.getGithubClient(project_name)
owner, proj = project_name.split('/')
- pr = self.github.pull_request(owner, proj, number).as_dict()
- log_rate_limit(self.log, self.github)
+ pr = github.pull_request(owner, proj, number).as_dict()
+ log_rate_limit(self.log, github)
return pr
def canMerge(self, change, allow_needs):
@@ -377,67 +549,184 @@
# For now, just send back a True value.
return True
+ def getPullBySha(self, sha):
+ query = '%s type:pr is:open' % sha
+ pulls = []
+ github = self.getGithubClient()
+ for issue in github.search_issues(query=query):
+ pr_url = issue.issue.pull_request().as_dict().get('url')
+ if not pr_url:
+ continue
+ # the issue provides no good description of the project :\
+ owner, project, _, number = pr_url.split('/')[4:]
+ github = self.getGithubClient("%s/%s" % (owner, project))
+ pr = github.pull_request(owner, project, number)
+ if pr.head.sha != sha:
+ continue
+ if pr.as_dict() in pulls:
+ continue
+ pulls.append(pr.as_dict())
+
+ log_rate_limit(self.log, github)
+ if len(pulls) > 1:
+ raise Exception('Multiple pulls found with head sha %s' % sha)
+
+ if len(pulls) == 0:
+ return None
+ return pulls.pop()
+
def getPullFileNames(self, project, number):
+ github = self.getGithubClient(project)
owner, proj = project.name.split('/')
filenames = [f.filename for f in
- self.github.pull_request(owner, proj, number).files()]
- log_rate_limit(self.log, self.github)
+ github.pull_request(owner, proj, number).files()]
+ log_rate_limit(self.log, github)
return filenames
+ def getPullReviews(self, project, number):
+ owner, proj = project.name.split('/')
+
+ revs = self._getPullReviews(owner, proj, number)
+
+ reviews = {}
+ for rev in revs:
+ user = rev.get('user').get('login')
+ review = {
+ 'by': {
+ 'username': user,
+ 'email': rev.get('user').get('email'),
+ },
+ 'grantedOn': int(time.mktime(self._ghTimestampToDate(
+ rev.get('submitted_at')))),
+ }
+
+ review['type'] = rev.get('state').lower()
+ review['submitted_at'] = rev.get('submitted_at')
+
+ # Get user's rights. A user always has read to leave a review
+ review['permission'] = 'read'
+ permission = self.getRepoPermission(project.name, user)
+ if permission == 'write':
+ review['permission'] = 'write'
+ if permission == 'admin':
+ review['permission'] = 'admin'
+
+ if user not in reviews:
+ reviews[user] = review
+ else:
+ # if there are multiple reviews per user, keep the newest
+ # note that this breaks the ability to set the 'older-than'
+ # option on a review requirement.
+ if review['grantedOn'] > reviews[user]['grantedOn']:
+ reviews[user] = review
+
+ return reviews.values()
+
+ def _getPullReviews(self, owner, project, number):
+ # make a list out of the reviews so that we complete our
+ # API transaction
+ # reviews are not yet supported by integrations, use api_key:
+ # https://platform.github.community/t/api-endpoint-for-pr-reviews/409
+ github = self.getGithubClient("%s/%s" % (owner, project),
+ use_app=False)
+ reviews = [review.as_dict() for review in
+ github.pull_request(owner, project, number).reviews()]
+
+ log_rate_limit(self.log, github)
+ return reviews
+
def getUser(self, login):
- return GithubUser(self.github, login)
+ return GithubUser(self.getGithubClient(), login)
def getUserUri(self, login):
return 'https://%s/%s' % (self.git_host, login)
- def commentPull(self, project, pr_number, message):
+ def getRepoPermission(self, project, login):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- repository = self.github.repository(owner, proj)
+ # This gets around a missing API call
+ # need preview header
+ headers = {'Accept': 'application/vnd.github.korra-preview'}
+
+ # Create a repo object
+ repository = github.repository(owner, project)
+ # Build up a URL
+ url = repository._build_url('collaborators', login, 'permission',
+ base_url=repository._api)
+ # Get the data
+ perms = repository._get(url, headers=headers)
+
+ log_rate_limit(self.log, github)
+
+ # no known user, maybe deleted since review?
+ if perms.status_code == 404:
+ return 'none'
+
+ # get permissions from the data
+ return perms.json()['permission']
+
+ def commentPull(self, project, pr_number, message):
+ github = self.getGithubClient(project)
+ owner, proj = project.split('/')
+ repository = github.repository(owner, proj)
pull_request = repository.issue(pr_number)
pull_request.create_comment(message)
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
def mergePull(self, project, pr_number, commit_message='', sha=None):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- pull_request = self.github.pull_request(owner, proj, pr_number)
+ pull_request = github.pull_request(owner, proj, pr_number)
try:
result = pull_request.merge(commit_message=commit_message, sha=sha)
except MethodNotAllowed as e:
raise MergeFailure('Merge was not successful due to mergeability'
' conflict, original error is %s' % e)
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
if not result:
raise Exception('Pull request was not merged')
def getCommitStatuses(self, project, sha):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- repository = self.github.repository(owner, proj)
+ repository = github.repository(owner, proj)
commit = repository.commit(sha)
# make a list out of the statuses so that we complete our
# API transaction
statuses = [status.as_dict() for status in commit.statuses()]
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
return statuses
def setCommitStatus(self, project, sha, state, url='', description='',
context=''):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- repository = self.github.repository(owner, proj)
+ repository = github.repository(owner, proj)
repository.create_status(sha, state, url, description, context)
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
def labelPull(self, project, pr_number, label):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- pull_request = self.github.issue(owner, proj, pr_number)
+ pull_request = github.issue(owner, proj, pr_number)
pull_request.add_labels(label)
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
def unlabelPull(self, project, pr_number, label):
+ github = self.getGithubClient(project)
owner, proj = project.split('/')
- pull_request = self.github.issue(owner, proj, pr_number)
+ pull_request = github.issue(owner, proj, pr_number)
pull_request.remove_label(label)
- log_rate_limit(self.log, self.github)
+ log_rate_limit(self.log, github)
+
+ def getPullOpen(self, project, number):
+ pr = self.getPull(project, number)
+ return pr.get('state') == 'open'
+
+ def getIsCurrent(self, project, number, sha):
+ pr = self.getPull(project, number)
+ return pr.get('head').get('sha') == sha
def _ghTimestampToDate(self, timestamp):
return time.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
@@ -453,20 +742,27 @@
seen = []
statuses = []
for status in self.getCommitStatuses(project.name, sha):
- # creator can be None if the user has been removed.
- creator = status.get('creator')
- if not creator:
- continue
- user = creator.get('login')
- context = status.get('context')
- state = status.get('state')
- if "%s:%s" % (user, context) not in seen:
- statuses.append("%s:%s:%s" % (user, context, state))
- seen.append("%s:%s" % (user, context))
+ stuple = _status_as_tuple(status)
+ if "%s:%s" % (stuple[0], stuple[1]) not in seen:
+ statuses.append("%s:%s:%s" % stuple)
+ seen.append("%s:%s" % (stuple[0], stuple[1]))
return statuses
+def _status_as_tuple(status):
+ """Translate a status into a tuple of user, context, state"""
+
+ creator = status.get('creator')
+ if not creator:
+ user = "Unknown"
+ else:
+ user = creator.get('login')
+ context = status.get('context')
+ state = status.get('state')
+ return (user, context, state)
+
+
def log_rate_limit(log, github):
try:
rate_limit = github.rate_limit()
diff --git a/zuul/driver/github/githubmodel.py b/zuul/driver/github/githubmodel.py
index 22f549f..9516097 100644
--- a/zuul/driver/github/githubmodel.py
+++ b/zuul/driver/github/githubmodel.py
@@ -14,9 +14,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import re
+import time
from zuul.model import Change, TriggerEvent, EventFilter, RefFilter
+from zuul.driver.util import time_to_seconds
EMPTY_GIT_REF = '0' * 40 # git sha of all zeros, used during creates/deletes
@@ -27,6 +30,7 @@
super(PullRequest, self).__init__(project)
self.updated_at = None
self.title = None
+ self.reviews = []
def isUpdateOf(self, other):
if (hasattr(other, 'number') and self.number == other.number and
@@ -55,13 +59,102 @@
return False
-class GithubEventFilter(EventFilter):
+class GithubCommonFilter(object):
+ def __init__(self, required_reviews=[], required_statuses=[]):
+ self._required_reviews = copy.deepcopy(required_reviews)
+ self.required_reviews = self._tidy_reviews(required_reviews)
+ self.required_statuses = required_statuses
+
+ def _tidy_reviews(self, reviews):
+ for r in reviews:
+ for k, v in r.items():
+ if k == 'username':
+ r['username'] = re.compile(v)
+ elif k == 'email':
+ r['email'] = re.compile(v)
+ elif k == 'newer-than':
+ r[k] = time_to_seconds(v)
+ elif k == 'older-than':
+ r[k] = time_to_seconds(v)
+ return reviews
+
+ def _match_review_required_review(self, rreview, review):
+ # Check if the required review and review match
+ now = time.time()
+ by = review.get('by', {})
+ for k, v in rreview.items():
+ if k == 'username':
+ if (not v.search(by.get('username', ''))):
+ return False
+ elif k == 'email':
+ if (not v.search(by.get('email', ''))):
+ return False
+ elif k == 'newer-than':
+ t = now - v
+ if (review['grantedOn'] < t):
+ return False
+ elif k == 'older-than':
+ t = now - v
+ if (review['grantedOn'] >= t):
+ return False
+ elif k == 'type':
+ if review['type'] != v:
+ return False
+ elif k == 'permission':
+ # If permission is read, we've matched. You must have read
+ # to provide a review. Write or admin permission is different.
+ if v != 'read':
+ if review['permission'] != v:
+ return False
+ return True
+
+ def matchesReviews(self, change):
+ if self.required_reviews:
+ if not hasattr(change, 'number'):
+ # not a PR, no reviews
+ return False
+ if not change.reviews:
+ # No reviews means no matching
+ return False
+
+ return self.matchesRequiredReviews(change)
+
+ def matchesRequiredReviews(self, change):
+ for rreview in self.required_reviews:
+ matches_review = False
+ for review in change.reviews:
+ if self._match_review_required_review(rreview, review):
+ # Consider matched if any review matches
+ matches_review = True
+ break
+ if not matches_review:
+ return False
+ return True
+
+ def matchesRequiredStatuses(self, change):
+ # statuses are ORed
+ # A PR head can have multiple statuses on it. If the change
+ # statuses and the filter statuses are a null intersection, there
+ # are no matches and we return false
+ if self.required_statuses:
+ if not hasattr(change, 'number'):
+ # not a PR, no status
+ return False
+ if set(change.status).isdisjoint(set(self.required_statuses)):
+ return False
+ return True
+
+
+class GithubEventFilter(EventFilter, GithubCommonFilter):
def __init__(self, trigger, types=[], branches=[], refs=[],
comments=[], actions=[], labels=[], unlabels=[],
- states=[], ignore_deletes=True):
+ states=[], statuses=[], required_statuses=[],
+ ignore_deletes=True):
EventFilter.__init__(self, trigger)
+ GithubCommonFilter.__init__(self, required_statuses=required_statuses)
+
self._types = types
self._branches = branches
self._refs = refs
@@ -74,6 +167,8 @@
self.labels = labels
self.unlabels = unlabels
self.states = states
+ self.statuses = statuses
+ self.required_statuses = required_statuses
self.ignore_deletes = ignore_deletes
def __repr__(self):
@@ -97,6 +192,10 @@
ret += ' unlabels: %s' % ', '.join(self.unlabels)
if self.states:
ret += ' states: %s' % ', '.join(self.states)
+ if self.statuses:
+ ret += ' statuses: %s' % ', '.join(self.statuses)
+ if self.required_statuses:
+ ret += ' required_statuses: %s' % ', '.join(self.required_statuses)
ret += '>'
return ret
@@ -160,32 +259,69 @@
if self.states and event.state not in self.states:
return False
+ # statuses are ORed
+ if self.statuses and event.status not in self.statuses:
+ return False
+
+ if not self.matchesRequiredStatuses(change):
+ return False
+
return True
-class GithubRefFilter(RefFilter):
- def __init__(self, statuses=[]):
- RefFilter.__init__(self)
+class GithubRefFilter(RefFilter, GithubCommonFilter):
+ def __init__(self, connection_name, statuses=[], required_reviews=[],
+ open=None, current_patchset=None):
+ RefFilter.__init__(self, connection_name)
+ GithubCommonFilter.__init__(self, required_reviews=required_reviews,
+ required_statuses=statuses)
self.statuses = statuses
+ self.open = open
+ self.current_patchset = current_patchset
def __repr__(self):
ret = '<GithubRefFilter'
+ ret += ' connection_name: %s' % self.connection_name
if self.statuses:
ret += ' statuses: %s' % ', '.join(self.statuses)
+ if self.required_reviews:
+ ret += (' required-reviews: %s' %
+ str(self.required_reviews))
+ if self.open:
+ ret += ' open: %s' % self.open
+ if self.current_patchset:
+ ret += ' current-patchset: %s' % self.current_patchset
ret += '>'
return ret
def matches(self, change):
- # statuses are ORed
- # A PR head can have multiple statuses on it. If the change
- # statuses and the filter statuses are a null intersection, there
- # are no matches and we return false
- if self.statuses:
- if set(change.status).isdisjoint(set(self.statuses)):
+ if not self.matchesRequiredStatuses(change):
+ return False
+
+ if self.open is not None:
+ # if a "change" has no number, it's not a change, but a push
+ # and cannot possibly pass this test.
+ if hasattr(change, 'number'):
+ if self.open != change.open:
+ return False
+ else:
return False
+ if self.current_patchset is not None:
+ # if a "change" has no number, it's not a change, but a push
+ # and cannot possibly pass this test.
+ if hasattr(change, 'number'):
+ if self.current_patchset != change.is_current_patchset:
+ return False
+ else:
+ return False
+
+ # required reviews are ANDed
+ if not self.matchesReviews(change):
+ return False
+
return True
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index ffec26a..29edb8a 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -18,6 +18,7 @@
from zuul.reporter import BaseReporter
from zuul.exceptions import MergeFailure
+from zuul.driver.util import scalar_or_list
class GithubReporter(BaseReporter):
@@ -38,22 +39,25 @@
if not isinstance(self._unlabels, list):
self._unlabels = [self._unlabels]
- def report(self, source, pipeline, item):
+ def report(self, item):
"""Comment on PR and set commit status."""
if self._create_comment:
- self.addPullComment(pipeline, item)
+ self.addPullComment(item)
if (self._commit_status is not None and
hasattr(item.change, 'patchset') and
item.change.patchset is not None):
- self.setPullStatus(pipeline, item)
+ self.setPullStatus(item)
if (self._merge and
hasattr(item.change, 'number')):
self.mergePull(item)
+ if not item.change.is_merged:
+ msg = self._formatItemReportMergeFailure(item)
+ self.addPullComment(item, msg)
if self._labels or self._unlabels:
self.setLabels(item)
- def addPullComment(self, pipeline, item):
- message = self._formatItemReport(pipeline, item)
+ def addPullComment(self, item, comment=None):
+ message = comment or self._formatItemReport(item)
project = item.change.project.name
pr_number = item.change.number
self.log.debug(
@@ -61,20 +65,23 @@
(item.change, self.config, message))
self.connection.commentPull(project, pr_number, message)
- def setPullStatus(self, pipeline, item):
+ def setPullStatus(self, item):
project = item.change.project.name
sha = item.change.patchset
- context = pipeline.name
+ context = '%s/%s' % (item.pipeline.layout.tenant.name,
+ item.pipeline.name)
state = self._commit_status
- url = ''
- if self.connection.sched.config.has_option('zuul', 'status_url'):
- base = self.connection.sched.config.get('zuul', 'status_url')
- url = '%s/#%s,%s' % (base,
- item.change.number,
- item.change.patchset)
+
+ url_pattern = self.config.get('status-url')
+ if not url_pattern:
+ sched_config = self.connection.sched.config
+ if sched_config.has_option('zuul', 'status_url'):
+ url_pattern = sched_config.get('zuul', 'status_url')
+ url = item.formatUrlPattern(url_pattern) if url_pattern else ''
+
description = ''
- if pipeline.description:
- description = pipeline.description
+ if item.pipeline.description:
+ description = item.pipeline.description
self.log.debug(
'Reporting change %s, params %s, status:\n'
@@ -92,13 +99,21 @@
self.log.debug('Reporting change %s, params %s, merging via API' %
(item.change, self.config))
message = self._formatMergeMessage(item.change)
- try:
- self.connection.mergePull(project, pr_number, message, sha)
- except MergeFailure:
- time.sleep(2)
- self.log.debug('Trying to merge change %s again...' % item.change)
- self.connection.mergePull(project, pr_number, message, sha)
- item.change.is_merged = True
+
+ for i in [1, 2]:
+ try:
+ self.connection.mergePull(project, pr_number, message, sha)
+ item.change.is_merged = True
+ return
+ except MergeFailure:
+ self.log.exception(
+ 'Merge attempt of change %s %s/2 failed.' %
+ (item.change, i), exc_info=True)
+ if i == 1:
+ time.sleep(2)
+ self.log.warning(
+ 'Merge of change %s failed after 2 attempts, giving up' %
+ item.change)
def setLabels(self, item):
project = item.change.project.name
@@ -143,14 +158,12 @@
def getSchema():
- def toList(x):
- return v.Any([x], x)
-
github_reporter = v.Schema({
'status': v.Any('pending', 'success', 'failure'),
+ 'status-url': str,
'comment': bool,
'merge': bool,
- 'label': toList(str),
- 'unlabel': toList(str)
+ 'label': scalar_or_list(str),
+ 'unlabel': scalar_or_list(str)
})
return github_reporter
diff --git a/zuul/driver/github/githubsource.py b/zuul/driver/github/githubsource.py
index c0ae33f..1350b10 100644
--- a/zuul/driver/github/githubsource.py
+++ b/zuul/driver/github/githubsource.py
@@ -14,6 +14,7 @@
import logging
import time
+import voluptuous as v
from zuul.source import BaseSource
from zuul.model import Project
@@ -95,7 +96,11 @@
def getRequireFilters(self, config):
f = GithubRefFilter(
+ connection_name=self.connection.connection_name,
statuses=to_list(config.get('status')),
+ required_reviews=to_list(config.get('review')),
+ open=config.get('open'),
+ current_patchset=config.get('current-patchset'),
)
return [f]
@@ -103,10 +108,23 @@
return []
+review = v.Schema({'username': str,
+ 'email': str,
+ 'older-than': str,
+ 'newer-than': str,
+ 'type': str,
+ 'permission': v.Any('read', 'write', 'admin'),
+ })
+
+
def getRequireSchema():
- require = {'status': scalar_or_list(str)}
+ require = {'status': scalar_or_list(str),
+ 'review': scalar_or_list(review),
+ 'open': bool,
+ 'current-patchset': bool}
return require
def getRejectSchema():
- return {}
+ reject = {'review': scalar_or_list(review)}
+ return reject
diff --git a/zuul/driver/github/githubtrigger.py b/zuul/driver/github/githubtrigger.py
index f0bd2f4..328879d 100644
--- a/zuul/driver/github/githubtrigger.py
+++ b/zuul/driver/github/githubtrigger.py
@@ -16,6 +16,7 @@
import voluptuous as v
from zuul.trigger import BaseTrigger
from zuul.driver.github.githubmodel import GithubEventFilter
+from zuul.driver.util import scalar_or_list, to_list
class GithubTrigger(BaseTrigger):
@@ -23,25 +24,20 @@
log = logging.getLogger("zuul.trigger.GithubTrigger")
def getEventFilters(self, trigger_config):
- def toList(item):
- if not item:
- return []
- if isinstance(item, list):
- return item
- return [item]
-
efilters = []
- for trigger in toList(trigger_config):
+ for trigger in to_list(trigger_config):
f = GithubEventFilter(
trigger=self,
- types=toList(trigger['event']),
- actions=toList(trigger.get('action')),
- branches=toList(trigger.get('branch')),
- refs=toList(trigger.get('ref')),
- comments=toList(trigger.get('comment')),
- labels=toList(trigger.get('label')),
- unlabels=toList(trigger.get('unlabel')),
- states=toList(trigger.get('state'))
+ types=to_list(trigger['event']),
+ actions=to_list(trigger.get('action')),
+ branches=to_list(trigger.get('branch')),
+ refs=to_list(trigger.get('ref')),
+ comments=to_list(trigger.get('comment')),
+ labels=to_list(trigger.get('label')),
+ unlabels=to_list(trigger.get('unlabel')),
+ states=to_list(trigger.get('state')),
+ statuses=to_list(trigger.get('status')),
+ required_statuses=to_list(trigger.get('require-status'))
)
efilters.append(f)
@@ -52,21 +48,20 @@
def getSchema():
- def toList(x):
- return v.Any([x], x)
-
github_trigger = {
v.Required('event'):
- toList(v.Any('pull_request',
- 'pull_request_review',
- 'push')),
- 'action': toList(str),
- 'branch': toList(str),
- 'ref': toList(str),
- 'comment': toList(str),
- 'label': toList(str),
- 'unlabel': toList(str),
- 'state': toList(str),
+ scalar_or_list(v.Any('pull_request',
+ 'pull_request_review',
+ 'push')),
+ 'action': scalar_or_list(str),
+ 'branch': scalar_or_list(str),
+ 'ref': scalar_or_list(str),
+ 'comment': scalar_or_list(str),
+ 'label': scalar_or_list(str),
+ 'unlabel': scalar_or_list(str),
+ 'state': scalar_or_list(str),
+ 'require-status': scalar_or_list(str),
+ 'status': scalar_or_list(str)
}
return github_trigger
diff --git a/zuul/driver/nullwrap/__init__.py b/zuul/driver/nullwrap/__init__.py
new file mode 100644
index 0000000..ebcd1da
--- /dev/null
+++ b/zuul/driver/nullwrap/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 OpenStack Foundation
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import subprocess
+
+from zuul.driver import (Driver, WrapperInterface)
+
+
+class NullwrapDriver(Driver, WrapperInterface):
+ name = 'nullwrap'
+ log = logging.getLogger("zuul.NullwrapDriver")
+
+ def getPopen(self, **kwargs):
+ return subprocess.Popen
diff --git a/zuul/driver/smtp/smtpreporter.py b/zuul/driver/smtp/smtpreporter.py
index dd618ef..1f232e9 100644
--- a/zuul/driver/smtp/smtpreporter.py
+++ b/zuul/driver/smtp/smtpreporter.py
@@ -24,9 +24,9 @@
name = 'smtp'
log = logging.getLogger("zuul.SMTPReporter")
- def report(self, source, pipeline, item):
+ def report(self, item):
"""Send the compiled report message via smtp."""
- message = self._formatItemReport(pipeline, item)
+ message = self._formatItemReport(item)
self.log.debug("Report change %s, params %s, message: %s" %
(item.change, self.config, message))
diff --git a/zuul/driver/sql/alembic_reporter/env.py b/zuul/driver/sql/alembic_reporter/env.py
index 56a5b7e..4542a22 100644
--- a/zuul/driver/sql/alembic_reporter/env.py
+++ b/zuul/driver/sql/alembic_reporter/env.py
@@ -64,6 +64,7 @@
with context.begin_transaction():
context.run_migrations()
+
if context.is_offline_mode():
run_migrations_offline()
else:
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 4b1b1a2..e478d33 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -43,6 +43,8 @@
self.engine = sa.create_engine(self.dburi)
self._migrate()
self._setup_tables()
+ self.zuul_buildset_table, self.zuul_build_table \
+ = self._setup_tables()
self.tables_established = True
except sa.exc.NoSuchModuleError:
self.log.exception(
@@ -68,10 +70,11 @@
alembic.command.upgrade(config, 'head')
- def _setup_tables(self):
+ @staticmethod
+ def _setup_tables():
metadata = sa.MetaData()
- self.zuul_buildset_table = sa.Table(
+ zuul_buildset_table = sa.Table(
BUILDSET_TABLE, metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('zuul_ref', sa.String(255)),
@@ -84,7 +87,7 @@
sa.Column('message', sa.TEXT()),
)
- self.zuul_build_table = sa.Table(
+ zuul_build_table = sa.Table(
BUILD_TABLE, metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('buildset_id', sa.Integer,
@@ -99,6 +102,8 @@
sa.Column('node_name', sa.String(255)),
)
+ return zuul_buildset_table, zuul_build_table
+
def getSchema():
sql_connection = v.Any(str, v.Schema(dict))
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index d6e547d..5f93ce8 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -31,7 +31,7 @@
# TODO(jeblair): document this is stored as NULL if unspecified
self.result_score = config.get('score', None)
- def report(self, source, pipeline, item):
+ def report(self, item):
"""Create an entry into a database."""
if not self.connection.tables_established:
@@ -39,16 +39,19 @@
return
with self.connection.engine.begin() as conn:
+ change = getattr(item.change, 'number', '')
+ patchset = getattr(item.change, 'patchset', '')
+ refspec = getattr(item.change, 'refspec', item.change.newrev)
buildset_ins = self.connection.zuul_buildset_table.insert().values(
zuul_ref=item.current_build_set.ref,
pipeline=item.pipeline.name,
project=item.change.project.name,
- change=item.change.number,
- patchset=item.change.patchset,
- ref=item.change.refspec,
+ change=change,
+ patchset=patchset,
+ ref=refspec,
score=self.result_score,
message=self._formatItemReport(
- pipeline, item, with_jobs=False),
+ item, with_jobs=False),
)
buildset_ins_result = conn.execute(buildset_ins)
build_inserts = []
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index e1eed2d..cf8d973 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -169,7 +169,8 @@
self.log.debug("Function %s is not registered" % name)
return False
- def execute(self, job, item, pipeline, dependent_items=[]):
+ def execute(self, job, item, pipeline, dependent_items=[],
+ merger_items=[]):
tenant = pipeline.layout.tenant
uuid = str(uuid4().hex)
self.log.info(
@@ -179,8 +180,11 @@
item.current_build_set.getJobNodeSet(job.name),
item.change,
[x.change for x in dependent_items]))
+
dependent_items = dependent_items[:]
dependent_items.reverse()
+ all_items = dependent_items + [item]
+
# TODOv3(jeblair): This ansible vars data structure will
# replace the environment variables below.
project = dict(
@@ -210,7 +214,7 @@
changes_str = '^'.join(
['%s:%s:%s' % (i.change.project.name, i.change.branch,
i.change.refspec)
- for i in dependent_items + [item]])
+ for i in all_items])
params['ZUUL_BRANCH'] = item.change.branch
params['ZUUL_CHANGES'] = changes_str
params['ZUUL_REF'] = ('refs/zuul/%s/%s' %
@@ -220,7 +224,7 @@
zuul_changes = ' '.join(['%s,%s' % (i.change.number,
i.change.patchset)
- for i in dependent_items + [item]])
+ for i in all_items])
params['ZUUL_CHANGE_IDS'] = zuul_changes
params['ZUUL_CHANGE'] = str(item.change.number)
params['ZUUL_PATCHSET'] = str(item.change.patchset)
@@ -253,13 +257,16 @@
# ZUUL_OLDREV
# ZUUL_NEWREV
- all_items = dependent_items + [item]
- merger_items = [i.makeMergerItem() for i in all_items]
-
params['job'] = job.name
params['timeout'] = job.timeout
params['items'] = merger_items
params['projects'] = []
+ if hasattr(item.change, 'branch'):
+ params['branch'] = item.change.branch
+ else:
+ params['branch'] = None
+ params['override_branch'] = job.override_branch
+ params['repo_state'] = item.current_build_set.repo_state
if job.name != 'noop':
params['playbooks'] = [x.toDict() for x in job.run]
@@ -267,8 +274,9 @@
params['post_playbooks'] = [x.toDict() for x in job.post_run]
params['roles'] = [x.toDict() for x in job.roles]
+ nodeset = item.current_build_set.getJobNodeSet(job.name)
nodes = []
- for node in item.current_build_set.getJobNodeSet(job.name).getNodes():
+ for node in nodeset.getNodes():
nodes.append(dict(name=node.name, image=node.image,
az=node.az,
host_keys=node.host_keys,
@@ -278,27 +286,43 @@
public_ipv6=node.public_ipv6,
public_ipv4=node.public_ipv4))
params['nodes'] = nodes
+ params['groups'] = [group.toDict() for group in nodeset.getGroups()]
params['vars'] = copy.deepcopy(job.variables)
if job.auth:
for secret in job.auth.secrets:
params['vars'][secret.name] = copy.deepcopy(secret.secret_data)
params['vars']['zuul'] = zuul_params
projects = set()
- if job.repos:
- for repo in job.repos:
- (trusted, project) = tenant.getProject(repo)
- connection = project.source.connection
+
+ def make_project_dict(project, override_branch=None):
+ project_config = item.current_build_set.layout.project_configs.get(
+ project.canonical_name, None)
+ if project_config:
+ project_default_branch = project_config.default_branch
+ else:
+ project_default_branch = 'master'
+ connection = project.source.connection
+ return dict(connection=connection.connection_name,
+ name=project.name,
+ canonical_name=project.canonical_name,
+ override_branch=override_branch,
+ default_branch=project_default_branch)
+
+ if job.required_projects:
+ for job_project in job.required_projects.values():
+ (trusted, project) = tenant.getProject(
+ job_project.project_name)
+ if project is None:
+ raise Exception("Unknown project %s" %
+ (job_project.project_name,))
params['projects'].append(
- dict(connection=connection.connection_name,
- name=project.name))
+ make_project_dict(project,
+ job_project.override_branch))
projects.add(project)
for item in all_items:
if item.change.project not in projects:
project = item.change.project
- connection = item.change.project.source.connection
- params['projects'].append(
- dict(connection=connection.connection_name,
- name=project.name))
+ params['projects'].append(make_project_dict(project))
projects.add(project)
build = Build(job, uuid)
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 99d2a9c..8d2d577 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -27,14 +27,10 @@
from zuul.lib.yamlutil import yaml
import gear
-import git
from six.moves import shlex_quote
import zuul.merger.merger
-import zuul.ansible.action
-import zuul.ansible.callback
-import zuul.ansible.library
-import zuul.ansible.lookup
+import zuul.ansible
from zuul.lib import commandsocket
COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
@@ -79,8 +75,88 @@
self.path = None
+class SshAgent(object):
+ log = logging.getLogger("zuul.ExecutorServer")
+
+ def __init__(self):
+ self.env = {}
+ self.ssh_agent = None
+
+ def start(self):
+ if self.ssh_agent:
+ return
+ with open('/dev/null', 'r+') as devnull:
+ ssh_agent = subprocess.Popen(['ssh-agent'], close_fds=True,
+ stdout=subprocess.PIPE,
+ stderr=devnull,
+ stdin=devnull)
+ (output, _) = ssh_agent.communicate()
+ output = output.decode('utf8')
+ for line in output.split("\n"):
+ if '=' in line:
+ line = line.split(";", 1)[0]
+ (key, value) = line.split('=')
+ self.env[key] = value
+ self.log.info('Started SSH Agent, {}'.format(self.env))
+
+ def stop(self):
+ if 'SSH_AGENT_PID' in self.env:
+ try:
+ os.kill(int(self.env['SSH_AGENT_PID']), signal.SIGTERM)
+ except OSError:
+ self.log.exception(
+ 'Problem sending SIGTERM to agent {}'.format(self.env))
+ self.log.info('Sent SIGTERM to SSH Agent, {}'.format(self.env))
+ self.env = {}
+
+ def add(self, key_path):
+ env = os.environ.copy()
+ env.update(self.env)
+ key_path = os.path.expanduser(key_path)
+ self.log.debug('Adding SSH Key {}'.format(key_path))
+ output = ''
+ try:
+ output = subprocess.check_output(['ssh-add', key_path], env=env,
+ stderr=subprocess.PIPE)
+ except subprocess.CalledProcessError:
+ self.log.error('ssh-add failed: {}'.format(output))
+ raise
+ self.log.info('Added SSH Key {}'.format(key_path))
+
+ def remove(self, key_path):
+ env = os.environ.copy()
+ env.update(self.env)
+ key_path = os.path.expanduser(key_path)
+ self.log.debug('Removing SSH Key {}'.format(key_path))
+ subprocess.check_output(['ssh-add', '-d', key_path], env=env,
+ stderr=subprocess.PIPE)
+ self.log.info('Removed SSH Key {}'.format(key_path))
+
+ def list(self):
+ if 'SSH_AUTH_SOCK' not in self.env:
+ return None
+ env = os.environ.copy()
+ env.update(self.env)
+ result = []
+ for line in subprocess.Popen(['ssh-add', '-L'], env=env,
+ stdout=subprocess.PIPE).stdout:
+ line = line.decode('utf8')
+ if line.strip() == 'The agent has no identities.':
+ break
+ result.append(line.strip())
+ return result
+
+
class JobDir(object):
- def __init__(self, root=None, keep=False):
+ def __init__(self, root, keep, build_uuid):
+ '''
+ :param str root: Root directory for the individual job directories.
+ Can be None to use the default system temp root directory.
+ :param bool keep: If True, do not delete the job directory.
+ :param str build_uuid: The unique build UUID. If supplied, this will
+ be used as the temp job directory name. Using this will help the
+ log streaming daemon find job logs.
+ '''
# root
# ansible
# trusted.cfg
@@ -89,7 +165,12 @@
# src
# logs
self.keep = keep
- self.root = tempfile.mkdtemp(dir=root)
+ if root:
+ tmpdir = root
+ else:
+ tmpdir = tempfile.gettempdir()
+ self.root = os.path.join(tmpdir, build_uuid)
+ os.mkdir(self.root, 0o700)
# Work
self.work_root = os.path.join(self.root, 'work')
os.makedirs(self.work_root)
@@ -101,8 +182,7 @@
self.ansible_root = os.path.join(self.root, 'ansible')
os.makedirs(self.ansible_root)
self.known_hosts = os.path.join(self.ansible_root, 'known_hosts')
- self.inventory = os.path.join(self.ansible_root, 'inventory')
- self.vars = os.path.join(self.ansible_root, 'vars.yaml')
+ self.inventory = os.path.join(self.ansible_root, 'inventory.yaml')
self.playbooks = [] # The list of candidate playbooks
self.playbook = None # A pointer to the candidate we have chosen
self.pre_playbooks = []
@@ -168,7 +248,7 @@
self.event = threading.Event()
def __eq__(self, other):
- if (other.connection_name == self.connection_name and
+ if (other and other.connection_name == self.connection_name and
other.project_name == self.project_name):
return True
return False
@@ -222,6 +302,8 @@
def _copy_ansible_files(python_module, target_dir):
library_path = os.path.dirname(os.path.abspath(python_module.__file__))
for fn in os.listdir(library_path):
+ if fn == "__pycache__":
+ continue
full_path = os.path.join(library_path, fn)
if os.path.isdir(full_path):
shutil.copytree(full_path, os.path.join(target_dir, fn))
@@ -229,6 +311,45 @@
shutil.copy(os.path.join(library_path, fn), target_dir)
+def make_inventory_dict(nodes, groups, all_vars):
+
+ hosts = {}
+ for node in nodes:
+ hosts[node['name']] = node['host_vars']
+
+ inventory = {
+ 'all': {
+ 'hosts': hosts,
+ 'vars': all_vars,
+ }
+ }
+
+ for group in groups:
+ group_hosts = {}
+ for node_name in group['nodes']:
+ # children is a dict with None as values because we don't have
+ # and per-group variables. If we did, None would be a dict
+ # with the per-group variables
+ group_hosts[node_name] = None
+ inventory[group['name']] = {'hosts': group_hosts}
+
+ return inventory
+
+
+class ExecutorMergeWorker(gear.TextWorker):
+ def __init__(self, executor_server, *args, **kw):
+ self.zuul_executor_server = executor_server
+ super(ExecutorMergeWorker, self).__init__(*args, **kw)
+
+ def handleNoop(self, packet):
+ # Wait until the update queue is empty before responding
+ while self.zuul_executor_server.update_queue.qsize():
+ time.sleep(1)
+
+ with self.zuul_executor_server.merger_lock:
+ super(ExecutorMergeWorker, self).handleNoop(packet)
+
+
class ExecutorServer(object):
log = logging.getLogger("zuul.ExecutorServer")
@@ -241,6 +362,7 @@
# perhaps hostname+pid.
self.hostname = socket.gethostname()
self.zuul_url = config.get('merger', 'zuul_url')
+ self.merger_lock = threading.Lock()
self.command_map = dict(
stop=self.stop,
pause=self.pause,
@@ -255,6 +377,12 @@
else:
self.merge_root = '/var/lib/zuul/executor-git'
+ if self.config.has_option('executor', 'default_username'):
+ self.default_username = self.config.get('executor',
+ 'default_username')
+ else:
+ self.default_username = 'zuul'
+
if self.config.has_option('merger', 'git_user_email'):
self.merge_email = self.config.get('merger', 'git_user_email')
else:
@@ -265,6 +393,13 @@
else:
self.merge_name = None
+ if self.config.has_option('executor', 'untrusted_wrapper'):
+ untrusted_wrapper_name = self.config.get(
+ 'executor', 'untrusted_wrapper').split()
+ else:
+ untrusted_wrapper_name = 'bubblewrap'
+ self.untrusted_wrapper = connections.drivers[untrusted_wrapper_name]
+
self.connections = connections
# This merger and its git repos are used to maintain
# up-to-date copies of all the repos that are used by jobs, as
@@ -281,31 +416,38 @@
path = os.path.join(state_dir, 'executor.socket')
self.command_socket = commandsocket.CommandSocket(path)
ansible_dir = os.path.join(state_dir, 'ansible')
- self.library_dir = os.path.join(ansible_dir, 'library')
- if not os.path.exists(self.library_dir):
- os.makedirs(self.library_dir)
- self.action_dir = os.path.join(ansible_dir, 'action')
- if not os.path.exists(self.action_dir):
- os.makedirs(self.action_dir)
+ self.ansible_dir = ansible_dir
- self.callback_dir = os.path.join(ansible_dir, 'callback')
- if not os.path.exists(self.callback_dir):
- os.makedirs(self.callback_dir)
+ zuul_dir = os.path.join(ansible_dir, 'zuul')
+ plugin_dir = os.path.join(zuul_dir, 'ansible')
- self.lookup_dir = os.path.join(ansible_dir, 'lookup')
- if not os.path.exists(self.lookup_dir):
- os.makedirs(self.lookup_dir)
+ if not os.path.exists(plugin_dir):
+ os.makedirs(plugin_dir)
- _copy_ansible_files(zuul.ansible.library, self.library_dir)
- _copy_ansible_files(zuul.ansible.action, self.action_dir)
- _copy_ansible_files(zuul.ansible.callback, self.callback_dir)
- _copy_ansible_files(zuul.ansible.lookup, self.lookup_dir)
+ self.library_dir = os.path.join(plugin_dir, 'library')
+ self.action_dir = os.path.join(plugin_dir, 'action')
+ self.callback_dir = os.path.join(plugin_dir, 'callback')
+ self.lookup_dir = os.path.join(plugin_dir, 'lookup')
+
+ _copy_ansible_files(zuul.ansible, plugin_dir)
+
+ # We're copying zuul.ansible.* into a directory we are going
+ # to add to pythonpath, so our plugins can "import
+ # zuul.ansible". But we're not installing all of zuul, so
+ # create a __init__.py file for the stub "zuul" module.
+ with open(os.path.join(zuul_dir, '__init__.py'), 'w'):
+ pass
self.job_workers = {}
- def _getMerger(self, root):
+ def _getMerger(self, root, logger=None):
+ if root != self.merge_root:
+ cache_root = self.merge_root
+ else:
+ cache_root = None
return zuul.merger.merger.Merger(root, self.connections,
- self.merge_email, self.merge_name)
+ self.merge_email, self.merge_name,
+ cache_root, logger)
def start(self):
self._running = True
@@ -315,10 +457,13 @@
port = self.config.get('gearman', 'port')
else:
port = 4730
- self.worker = gear.TextWorker('Zuul Executor Server')
- self.worker.addServer(server, port)
+ self.merger_worker = ExecutorMergeWorker(self, 'Zuul Executor Merger')
+ self.merger_worker.addServer(server, port)
+ self.executor_worker = gear.TextWorker('Zuul Executor Server')
+ self.executor_worker.addServer(server, port)
self.log.debug("Waiting for server")
- self.worker.waitForServer()
+ self.merger_worker.waitForServer()
+ self.executor_worker.waitForServer()
self.log.debug("Registering")
self.register()
@@ -332,15 +477,19 @@
self.update_thread = threading.Thread(target=self._updateLoop)
self.update_thread.daemon = True
self.update_thread.start()
- self.thread = threading.Thread(target=self.run)
- self.thread.daemon = True
- self.thread.start()
+ self.merger_thread = threading.Thread(target=self.run_merger)
+ self.merger_thread.daemon = True
+ self.merger_thread.start()
+ self.executor_thread = threading.Thread(target=self.run_executor)
+ self.executor_thread.daemon = True
+ self.executor_thread.start()
def register(self):
- self.worker.registerFunction("executor:execute")
- self.worker.registerFunction("executor:stop:%s" % self.hostname)
- self.worker.registerFunction("merger:merge")
- self.worker.registerFunction("merger:cat")
+ self.executor_worker.registerFunction("executor:execute")
+ self.executor_worker.registerFunction("executor:stop:%s" %
+ self.hostname)
+ self.merger_worker.registerFunction("merger:merge")
+ self.merger_worker.registerFunction("merger:cat")
def stop(self):
self.log.debug("Stopping")
@@ -355,7 +504,8 @@
except Exception:
self.log.exception("Exception sending stop command "
"to worker:")
- self.worker.shutdown()
+ self.merger_worker.shutdown()
+ self.executor_worker.shutdown()
self.log.debug("Stopped")
def pause(self):
@@ -380,7 +530,8 @@
def join(self):
self.update_thread.join()
- self.thread.join()
+ self.merger_thread.join()
+ self.executor_thread.join()
def runCommand(self):
while self._command_running:
@@ -404,11 +555,12 @@
if task is None:
# We are asked to stop
return
- self.log.info("Updating repo %s/%s" % (
- task.connection_name, task.project_name))
- self.merger.updateRepo(task.connection_name, task.project_name)
- self.log.debug("Finished updating repo %s/%s" %
- (task.connection_name, task.project_name))
+ with self.merger_lock:
+ self.log.info("Updating repo %s/%s" % (
+ task.connection_name, task.project_name))
+ self.merger.updateRepo(task.connection_name, task.project_name)
+ self.log.debug("Finished updating repo %s/%s" %
+ (task.connection_name, task.project_name))
task.setComplete()
def update(self, connection_name, project_name):
@@ -417,11 +569,35 @@
task = self.update_queue.put(task)
return task
- def run(self):
+ def run_merger(self):
+ self.log.debug("Starting merger listener")
+ while self._running:
+ try:
+ job = self.merger_worker.getJob()
+ try:
+ if job.name == 'merger:cat':
+ self.log.debug("Got cat job: %s" % job.unique)
+ self.cat(job)
+ elif job.name == 'merger:merge':
+ self.log.debug("Got merge job: %s" % job.unique)
+ self.merge(job)
+ else:
+ self.log.error("Unable to handle job %s" % job.name)
+ job.sendWorkFail()
+ except Exception:
+ self.log.exception("Exception while running job")
+ job.sendWorkException(
+ traceback.format_exc().encode('utf8'))
+ except gear.InterruptedError:
+ pass
+ except Exception:
+ self.log.exception("Exception while getting job")
+
+ def run_executor(self):
self.log.debug("Starting executor listener")
while self._running:
try:
- job = self.worker.getJob()
+ job = self.executor_worker.getJob()
try:
if job.name == 'executor:execute':
self.log.debug("Got execute job: %s" % job.unique)
@@ -429,12 +605,6 @@
elif job.name.startswith('executor:stop'):
self.log.debug("Got stop job: %s" % job.unique)
self.stopJob(job)
- elif job.name == 'merger:cat':
- self.log.debug("Got cat job: %s" % job.unique)
- self.cat(job)
- elif job.name == 'merger:merge':
- self.log.debug("Got merge job: %s" % job.unique)
- self.merge(job)
else:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
@@ -475,8 +645,9 @@
args = json.loads(job.arguments)
task = self.update(args['connection'], args['project'])
task.wait()
- files = self.merger.getFiles(args['connection'], args['project'],
- args['branch'], args['files'])
+ with self.merger_lock:
+ files = self.merger.getFiles(args['connection'], args['project'],
+ args['branch'], args['files'])
result = dict(updated=True,
files=files,
zuul_url=self.zuul_url)
@@ -484,28 +655,35 @@
def merge(self, job):
args = json.loads(job.arguments)
- ret = self.merger.mergeChanges(args['items'], args.get('files'))
+ with self.merger_lock:
+ ret = self.merger.mergeChanges(args['items'], args.get('files'),
+ args.get('repo_state'))
result = dict(merged=(ret is not None),
zuul_url=self.zuul_url)
- if args.get('files'):
- if ret:
- result['commit'], result['files'] = ret
- else:
- result['commit'], result['files'] = (None, None)
+ if ret is None:
+ result['commit'] = result['files'] = result['repo_state'] = None
else:
- result['commit'] = ret
+ (result['commit'], result['files'], result['repo_state'],
+ recent) = ret
job.sendWorkComplete(json.dumps(result))
-class AnsibleJob(object):
- log = logging.getLogger("zuul.AnsibleJob")
+class AnsibleJobLogAdapter(logging.LoggerAdapter):
+ def process(self, msg, kwargs):
+ msg, kwargs = super(AnsibleJobLogAdapter, self).process(msg, kwargs)
+ msg = '[build: %s] %s' % (kwargs['extra']['job'], msg)
+ return msg, kwargs
+
+class AnsibleJob(object):
RESULT_NORMAL = 1
RESULT_TIMED_OUT = 2
RESULT_UNREACHABLE = 3
RESULT_ABORTED = 4
def __init__(self, executor_server, job):
+ logger = logging.getLogger("zuul.AnsibleJob")
+ self.log = AnsibleJobLogAdapter(logger, {'job': job.unique})
self.executor_server = executor_server
self.job = job
self.jobdir = None
@@ -513,6 +691,8 @@
self.proc_lock = threading.Lock()
self.running = False
self.aborted = False
+ self.thread = None
+ self.ssh_agent = None
if self.executor_server.config.has_option(
'executor', 'private_key_file'):
@@ -520,8 +700,11 @@
'executor', 'private_key_file')
else:
self.private_key_file = '~/.ssh/id_rsa'
+ self.ssh_agent = SshAgent()
def run(self):
+ self.ssh_agent.start()
+ self.ssh_agent.add(self.private_key_file)
self.running = True
self.thread = threading.Thread(target=self.execute)
self.thread.start()
@@ -529,12 +712,14 @@
def stop(self):
self.aborted = True
self.abortRunningProc()
- self.thread.join()
+ if self.thread:
+ self.thread.join()
def execute(self):
try:
- self.jobdir = JobDir(root=self.executor_server.jobdir_root,
- keep=self.executor_server.keep_jobdir)
+ self.jobdir = JobDir(self.executor_server.jobdir_root,
+ self.executor_server.keep_jobdir,
+ str(self.job.unique))
self._execute()
except Exception:
self.log.exception("Exception while executing job")
@@ -549,6 +734,11 @@
self.executor_server.finishJob(self.job.unique)
except Exception:
self.log.exception("Error finalizing job thread:")
+ if self.ssh_agent:
+ try:
+ self.ssh_agent.stop()
+ except Exception:
+ self.log.exception("Error stopping SSH agent:")
def _execute(self):
self.log.debug("Job %s: beginning" % (self.job.unique,))
@@ -567,39 +757,37 @@
task.wait()
self.log.debug("Job %s: git updates complete" % (self.job.unique,))
- repos = []
+ merger = self.executor_server._getMerger(self.jobdir.src_root,
+ self.log)
+ repos = {}
for project in args['projects']:
self.log.debug("Cloning %s/%s" % (project['connection'],
project['name'],))
- source = self.executor_server.connections.getSource(
- project['connection'])
- project_object = source.getProject(project['name'])
- url = source.getGitUrl(project_object)
- repo = git.Repo.clone_from(
- os.path.join(self.executor_server.merge_root,
- source.canonical_hostname,
- project['name']),
- os.path.join(self.jobdir.src_root,
- source.canonical_hostname,
- project['name']))
-
- repo.remotes.origin.config_writer.set('url', url)
- repos.append(repo)
+ repo = merger.getRepo(project['connection'],
+ project['name'])
+ repos[project['canonical_name']] = repo
merge_items = [i for i in args['items'] if i.get('refspec')]
if merge_items:
- commit = self.doMergeChanges(merge_items)
- if not commit:
+ if not self.doMergeChanges(merger, merge_items,
+ args['repo_state']):
# There was a merge conflict and we have already sent
# a work complete result, don't run any jobs
return
- else:
- commit = args['items'][-1]['newrev'] # noqa
+
+ for project in args['projects']:
+ repo = repos[project['canonical_name']]
+ self.checkoutBranch(repo,
+ project['name'],
+ args['branch'],
+ args['override_branch'],
+ project['override_branch'],
+ project['default_branch'])
# Delete the origin remote from each repo we set up since
# it will not be valid within the jobs.
- for repo in repos:
- repo.delete_remote(repo.remotes.origin)
+ for repo in repos.values():
+ repo.deleteRemote('origin')
# is the playbook in a repo that we have already prepared?
trusted, untrusted = self.preparePlaybookRepos(args)
@@ -637,14 +825,42 @@
result = dict(result=result)
self.job.sendWorkComplete(json.dumps(result))
- def doMergeChanges(self, items):
- # Get a merger in order to update the repos involved in this job.
- merger = self.executor_server._getMerger(self.jobdir.src_root)
- commit = merger.mergeChanges(items) # noqa
- if not commit: # merge conflict
+ def doMergeChanges(self, merger, items, repo_state):
+ ret = merger.mergeChanges(items, repo_state=repo_state)
+ if not ret: # merge conflict
result = dict(result='MERGER_FAILURE')
self.job.sendWorkComplete(json.dumps(result))
- return commit
+ return False
+ recent = ret[3]
+ for key, commit in recent.items():
+ (connection, project, branch) = key
+ repo = merger.getRepo(connection, project)
+ repo.setRef('refs/heads/' + branch, commit)
+ return True
+
+ def checkoutBranch(self, repo, project_name, zuul_branch,
+ job_branch, project_override_branch,
+ project_default_branch):
+ branches = repo.getBranches()
+ if project_override_branch in branches:
+ self.log.info("Checking out %s project override branch %s",
+ project_name, project_override_branch)
+ repo.checkoutLocalBranch(project_override_branch)
+ elif job_branch in branches:
+ self.log.info("Checking out %s job branch %s",
+ project_name, job_branch)
+ repo.checkoutLocalBranch(job_branch)
+ elif zuul_branch and zuul_branch in branches:
+ self.log.info("Checking out %s zuul branch %s",
+ project_name, zuul_branch)
+ repo.checkoutLocalBranch(zuul_branch)
+ elif project_default_branch in branches:
+ self.log.info("Checking out %s project default branch %s",
+ project_name, project_default_branch)
+ repo.checkoutLocalBranch(project_default_branch)
+ else:
+ raise Exception("Project %s does not have the default branch %s" %
+ (project_name, project_default_branch))
def runPlaybooks(self, args):
result = None
@@ -697,6 +913,7 @@
ip = node.get('interface_ip')
host_vars = dict(
ansible_host=ip,
+ ansible_user=self.executor_server.default_username,
nodepool_az=node.get('az'),
nodepool_provider=node.get('provider'),
nodepool_region=node.get('region'))
@@ -801,7 +1018,8 @@
# the stack of changes we are testing, so check out the branch
# tip into a dedicated space.
- merger = self.executor_server._getMerger(jobdir_playbook.root)
+ merger = self.executor_server._getMerger(jobdir_playbook.root,
+ self.log)
merger.checkoutBranch(playbook['connection'], project.name,
playbook['branch'])
@@ -885,7 +1103,8 @@
# in the dependency chain for the change (in which case,
# there is no existing untrusted checkout of it). Check
# out the branch tip into a dedicated space.
- merger = self.executor_server._getMerger(trusted_root)
+ merger = self.executor_server._getMerger(trusted_root,
+ self.log)
merger.checkoutBranch(role['connection'], project.name,
'master')
orig_repo_path = os.path.join(trusted_root,
@@ -933,28 +1152,24 @@
self.jobdir.trusted_roles_path.append(trusted_role_path)
def prepareAnsibleFiles(self, args):
- keys = []
- with open(self.jobdir.inventory, 'w') as inventory:
- for item in self.getHostList(args):
- inventory.write(item['name'])
- for k, v in item['host_vars'].items():
- inventory.write(' %s="%s"' % (k, v))
- inventory.write('\n')
- for key in item['host_keys']:
- keys.append(key)
+ all_vars = dict(args['vars'])
+ all_vars['zuul']['executor'] = dict(
+ hostname=self.executor_server.hostname,
+ src_root=self.jobdir.src_root,
+ log_root=self.jobdir.log_root)
+
+ nodes = self.getHostList(args)
+ inventory = make_inventory_dict(nodes, args['groups'], all_vars)
+
+ with open(self.jobdir.inventory, 'w') as inventory_yaml:
+ inventory_yaml.write(
+ yaml.safe_dump(inventory, default_flow_style=False))
with open(self.jobdir.known_hosts, 'w') as known_hosts:
- for key in keys:
- known_hosts.write('%s\n' % key)
+ for node in nodes:
+ for key in node['host_keys']:
+ known_hosts.write('%s\n' % key)
- with open(self.jobdir.vars, 'w') as vars_yaml:
- zuul_vars = dict(args['vars'])
- zuul_vars['zuul']['executor'] = dict(
- hostname=self.executor_server.hostname,
- src_root=self.jobdir.src_root,
- log_root=self.jobdir.log_root)
- vars_yaml.write(
- yaml.safe_dump(zuul_vars, default_flow_style=False))
self.writeAnsibleConfig(self.jobdir.untrusted_config)
self.writeAnsibleConfig(self.jobdir.trusted_config, trusted=True)
@@ -1032,12 +1247,26 @@
def runAnsible(self, cmd, timeout, trusted=False):
env_copy = os.environ.copy()
+ env_copy.update(self.ssh_agent.env)
env_copy['LOGNAME'] = 'zuul'
+ pythonpath = env_copy.get('PYTHONPATH')
+ if pythonpath:
+ pythonpath = [pythonpath]
+ else:
+ pythonpath = []
+ pythonpath = [self.executor_server.ansible_dir] + pythonpath
+ env_copy['PYTHONPATH'] = os.path.pathsep.join(pythonpath)
if trusted:
config_file = self.jobdir.trusted_config
+ popen = subprocess.Popen
else:
config_file = self.jobdir.untrusted_config
+ driver = self.executor_server.untrusted_wrapper
+ popen = driver.getPopen(
+ work_dir=self.jobdir.root,
+ ansible_dir=self.executor_server.ansible_dir,
+ ssh_auth_sock=env_copy.get('SSH_AUTH_SOCK'))
env_copy['ANSIBLE_CONFIG'] = config_file
@@ -1046,7 +1275,7 @@
return (self.RESULT_ABORTED, None)
self.log.debug("Ansible command: ANSIBLE_CONFIG=%s %s",
config_file, " ".join(shlex_quote(c) for c in cmd))
- self.proc = subprocess.Popen(
+ self.proc = popen(
cmd,
cwd=self.jobdir.work_root,
stdout=subprocess.PIPE,
@@ -1094,12 +1323,10 @@
else:
verbose = '-v'
- cmd = ['ansible-playbook', playbook.path]
+ cmd = ['ansible-playbook', verbose, playbook.path]
if success is not None:
cmd.extend(['-e', 'success=%s' % str(bool(success))])
- cmd.extend(['-e@%s' % self.jobdir.vars, verbose])
-
return self.runAnsible(
cmd=cmd, timeout=timeout, trusted=playbook.trusted)
diff --git a/zuul/lib/connections.py b/zuul/lib/connections.py
index 720299a..79d78f4 100644
--- a/zuul/lib/connections.py
+++ b/zuul/lib/connections.py
@@ -22,6 +22,8 @@
import zuul.driver.smtp
import zuul.driver.timer
import zuul.driver.sql
+import zuul.driver.bubblewrap
+import zuul.driver.nullwrap
from zuul.connection import BaseConnection
from zuul.driver import SourceInterface
@@ -46,6 +48,8 @@
self.registerDriver(zuul.driver.smtp.SMTPDriver())
self.registerDriver(zuul.driver.timer.TimerDriver())
self.registerDriver(zuul.driver.sql.SQLDriver())
+ self.registerDriver(zuul.driver.bubblewrap.BubblewrapDriver())
+ self.registerDriver(zuul.driver.nullwrap.NullwrapDriver())
def registerDriver(self, driver):
if driver.name in self.drivers:
@@ -105,7 +109,7 @@
# The merger and the reporter only needs source driver.
# This makes sure Reporter like the SQLDriver are only created by
# the scheduler process
- if source_only and not issubclass(driver, SourceInterface):
+ if source_only and not isinstance(driver, SourceInterface):
continue
connection = driver.getConnection(con_name, con_config)
@@ -138,10 +142,11 @@
# Create default connections for drivers which need no
# connection information (e.g., 'timer' or 'zuul').
- for driver in self.drivers.values():
- if not hasattr(driver, 'getConnection'):
- connections[driver.name] = DefaultConnection(
- driver, driver.name, {})
+ if not source_only:
+ for driver in self.drivers.values():
+ if not hasattr(driver, 'getConnection'):
+ connections[driver.name] = DefaultConnection(
+ driver, driver.name, {})
self.connections = connections
diff --git a/zuul/lib/log_streamer.py b/zuul/lib/log_streamer.py
new file mode 100644
index 0000000..de072b6
--- /dev/null
+++ b/zuul/lib/log_streamer.py
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 IBM Corp.
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import os.path
+import pwd
+import re
+import select
+import socket
+import threading
+import time
+
+try:
+ import SocketServer as ss # python 2.x
+except ImportError:
+ import socketserver as ss # python 3
+
+
+class Log(object):
+
+ def __init__(self, path):
+ self.path = path
+ self.file = open(path)
+ self.stat = os.stat(path)
+ self.size = self.stat.st_size
+
+
+class RequestHandler(ss.BaseRequestHandler):
+ '''
+ Class to handle a single log streaming request.
+
+ The log streaming code was blatantly stolen from zuul_console.py. Only
+ the (class/method/attribute) names were changed to protect the innocent.
+ '''
+
+ MAX_REQUEST_LEN = 1024
+ REQUEST_TIMEOUT = 10
+
+ def get_command(self):
+ poll = select.poll()
+ bitmask = (select.POLLIN | select.POLLERR |
+ select.POLLHUP | select.POLLNVAL)
+ poll.register(self.request, bitmask)
+ buffer = b''
+ ret = None
+ start = time.time()
+ while True:
+ elapsed = time.time() - start
+ timeout = max(self.REQUEST_TIMEOUT - elapsed, 0)
+ if not timeout:
+ raise Exception("Timeout while waiting for input")
+ for fd, event in poll.poll(timeout):
+ if event & select.POLLIN:
+ buffer += self.request.recv(self.MAX_REQUEST_LEN)
+ else:
+ raise Exception("Received error event")
+ if len(buffer) >= self.MAX_REQUEST_LEN:
+ raise Exception("Request too long")
+ try:
+ ret = buffer.decode('utf-8')
+ x = ret.find('\n')
+ if x > 0:
+ return ret[:x]
+ except UnicodeDecodeError:
+ pass
+
+ def handle(self):
+ build_uuid = self.get_command()
+ build_uuid = build_uuid.rstrip()
+
+ # validate build ID
+ if not re.match("[0-9A-Fa-f]+$", build_uuid):
+ msg = 'Build ID %s is not valid' % build_uuid
+ self.request.sendall(msg.encode("utf-8"))
+ return
+
+ job_dir = os.path.join(self.server.jobdir_root, build_uuid)
+ if not os.path.exists(job_dir):
+ msg = 'Build ID %s not found' % build_uuid
+ self.request.sendall(msg.encode("utf-8"))
+ return
+
+ # check if log file exists
+ log_file = os.path.join(job_dir, 'ansible', 'ansible_log.txt')
+ if not os.path.exists(log_file):
+ msg = 'Log not found for build ID %s' % build_uuid
+ self.request.sendall(msg.encode("utf-8"))
+ return
+
+ self.stream_log(log_file)
+
+ def stream_log(self, log_file):
+ log = None
+ while True:
+ if log is not None:
+ try:
+ log.file.close()
+ except:
+ pass
+ while True:
+ log = self.chunk_log(log_file)
+ if log:
+ break
+ time.sleep(0.5)
+ while True:
+ if self.follow_log(log):
+ break
+ else:
+ return
+
+ def chunk_log(self, log_file):
+ try:
+ log = Log(log_file)
+ except Exception:
+ return
+ while True:
+ chunk = log.file.read(4096)
+ if not chunk:
+ break
+ self.request.send(chunk.encode('utf-8'))
+ return log
+
+ def follow_log(self, log):
+ while True:
+ # As long as we have unread data, keep reading/sending
+ while True:
+ chunk = log.file.read(4096)
+ if chunk:
+ self.request.send(chunk.encode('utf-8'))
+ else:
+ break
+
+ # At this point, we are waiting for more data to be written
+ time.sleep(0.5)
+
+ # Check to see if the remote end has sent any data, if so,
+ # discard
+ r, w, e = select.select([self.request], [], [self.request], 0)
+ if self.request in e:
+ return False
+ if self.request in r:
+ ret = self.request.recv(1024)
+ # Discard anything read, if input is eof, it has
+ # disconnected.
+ if not ret:
+ return False
+
+ # See if the file has been truncated
+ try:
+ st = os.stat(log.path)
+ if (st.st_ino != log.stat.st_ino or
+ st.st_size < log.size):
+ return True
+ except Exception:
+ return True
+ log.size = st.st_size
+
+
+class CustomForkingTCPServer(ss.ForkingTCPServer):
+ '''
+ Custom version that allows us to drop privileges after port binding.
+ '''
+ def __init__(self, *args, **kwargs):
+ self.user = kwargs.pop('user')
+ self.jobdir_root = kwargs.pop('jobdir_root')
+ # For some reason, setting custom attributes does not work if we
+ # call the base class __init__ first. Wha??
+ ss.ForkingTCPServer.__init__(self, *args, **kwargs)
+
+ def change_privs(self):
+ '''
+ Drop our privileges to the zuul user.
+ '''
+ if os.getuid() != 0:
+ return
+ pw = pwd.getpwnam(self.user)
+ os.setgroups([])
+ os.setgid(pw.pw_gid)
+ os.setuid(pw.pw_uid)
+ os.umask(0o022)
+
+ def server_bind(self):
+ self.allow_reuse_address = True
+ ss.ForkingTCPServer.server_bind(self)
+ if self.user:
+ self.change_privs()
+
+ def server_close(self):
+ '''
+ Overridden from base class to shutdown the socket immediately.
+ '''
+ try:
+ self.socket.shutdown(socket.SHUT_RD)
+ self.socket.close()
+ except socket.error as e:
+ # If it's already closed, don't error.
+ if e.errno == socket.EBADF:
+ return
+ raise
+
+
+class LogStreamer(object):
+ '''
+ Class implementing log streaming over the finger daemon port.
+ '''
+
+ def __init__(self, user, host, port, jobdir_root):
+ self.server = CustomForkingTCPServer((host, port),
+ RequestHandler,
+ user=user,
+ jobdir_root=jobdir_root)
+
+ # We start the actual serving within a thread so we can return to
+ # the owner.
+ self.thd = threading.Thread(target=self.server.serve_forever)
+ self.thd.daemon = True
+ self.thd.start()
+
+ def stop(self):
+ if self.thd.isAlive():
+ self.server.shutdown()
+ self.server.server_close()
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index d13a1b4..3728c73 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -146,20 +146,17 @@
def reportStart(self, item):
if not self.pipeline._disabled:
- source = item.change.project.source
try:
self.log.info("Reporting start, action %s item %s" %
(self.pipeline.start_actions, item))
- ret = self.sendReport(self.pipeline.start_actions,
- source, item)
+ ret = self.sendReport(self.pipeline.start_actions, item)
if ret:
self.log.error("Reporting item start %s received: %s" %
(item, ret))
except:
self.log.exception("Exception while reporting start:")
- def sendReport(self, action_reporters, source, item,
- message=None):
+ def sendReport(self, action_reporters, item, message=None):
"""Sends the built message off to configured reporters.
Takes the action_reporters, item, message and extra options and
@@ -168,7 +165,7 @@
report_errors = []
if len(action_reporters) > 0:
for reporter in action_reporters:
- ret = reporter.report(source, self.pipeline, item)
+ ret = reporter.report(item)
if ret:
report_errors.append(ret)
if len(report_errors) == 0:
@@ -192,17 +189,6 @@
def getFailingDependentItems(self, item):
return None
- def getDependentItems(self, item):
- orig_item = item
- items = []
- while item.item_ahead:
- items.append(item.item_ahead)
- item = item.item_ahead
- self.log.info("Change %s depends on changes %s" %
- (orig_item.change,
- [x.change for x in items]))
- return items
-
def getItemForChange(self, change):
for item in self.pipeline.getAllItems():
if item.change.equals(change):
@@ -296,6 +282,10 @@
if not ignore_requirements:
for f in self.changeish_filters:
+ if f.connection_name != change.project.connection_name:
+ self.log.debug("Filter %s skipped for change %s due "
+ "to mismatched connections" % (f, change))
+ continue
if not f.matches(change):
self.log.debug("Change %s does not match pipeline "
"requirement %s" % (change, f))
@@ -326,9 +316,7 @@
item.enqueue_time = enqueue_time
item.live = live
self.reportStats(item)
- if not quiet:
- if len(self.pipeline.start_actions) > 0:
- self.reportStart(item)
+ item.quiet = quiet
self.enqueueChangesBehind(change, quiet, ignore_requirements,
change_queue)
zuul_driver = self.sched.connections.drivers['zuul']
@@ -364,7 +352,7 @@
def _executeJobs(self, item, jobs):
self.log.debug("Executing jobs for change %s" % item.change)
- dependent_items = self.getDependentItems(item)
+ build_set = item.current_build_set
for job in jobs:
self.log.debug("Found job %s for change %s" % (job, item.change))
try:
@@ -372,7 +360,8 @@
self.sched.nodepool.useNodeSet(nodeset)
build = self.sched.executor.execute(job, item,
self.pipeline,
- dependent_items)
+ build_set.dependent_items,
+ build_set.merger_items)
self.log.debug("Adding build %s of job %s to item %s" %
(build, job, item))
item.addBuild(build)
@@ -502,16 +491,11 @@
self.log.debug("Scheduling merge for item %s (files: %s)" %
(item, files))
- dependent_items = self.getDependentItems(item)
- dependent_items.reverse()
- all_items = dependent_items + [item]
- merger_items = [i.makeMergerItem() for i in all_items]
build_set = item.current_build_set
build_set.merge_state = build_set.PENDING
- self.sched.merger.mergeChanges(merger_items,
- item.current_build_set,
- files,
- self.pipeline.precedence)
+ self.sched.merger.mergeChanges(build_set.merger_items,
+ item.current_build_set, files,
+ precedence=self.pipeline.precedence)
return False
def prepareItem(self, item):
@@ -598,6 +582,14 @@
self.cancelJobs(item)
if actionable:
ready = self.prepareItem(item) and self.prepareJobs(item)
+ # Starting jobs reporting should only be done once if there are
+ # jobs to run for this item.
+ if ready and len(self.pipeline.start_actions) > 0 \
+ and len(item.job_graph.jobs) > 0 \
+ and not item.reported_start \
+ and not item.quiet:
+ self.reportStart(item)
+ item.reported_start = True
if item.current_build_set.unable_to_merge:
failing_reasons.append("it has a merge conflict")
if item.current_build_set.config_error:
@@ -684,6 +676,7 @@
if event.merged:
build_set.commit = event.commit
build_set.files.setFiles(event.files)
+ build_set.repo_state = event.repo_state
elif event.updated:
build_set.commit = item.change.newrev
if not build_set.commit:
@@ -737,9 +730,21 @@
def _reportItem(self, item):
self.log.debug("Reporting change %s" % item.change)
- source = item.change.project.source
ret = True # Means error as returned by trigger.report
- if item.getConfigError():
+
+ # In the case of failure, we may not hove completed an initial
+ # merge which would get the layout for this item, so in order
+ # to determine whether this item's project is in this
+ # pipeline, use the dynamic layout if available, otherwise,
+ # fall back to the current static layout as a best
+ # approximation.
+ layout = item.layout or self.pipeline.layout
+
+ if not layout.hasProject(item.change.project):
+ self.log.debug("Project %s not in pipeline %s for change %s" % (
+ item.change.project, self.pipeline, item.change))
+ actions = []
+ elif item.getConfigError():
self.log.debug("Invalid config for change %s" % item.change)
# TODOv3(jeblair): consider a new reporter action for this
actions = self.pipeline.merge_failure_actions
@@ -747,9 +752,12 @@
elif item.didMergerFail():
actions = self.pipeline.merge_failure_actions
item.setReportedResult('MERGER_FAILURE')
+ elif item.wasDequeuedNeedingChange():
+ actions = self.pipeline.failure_actions
+ item.setReportedResult('FAILURE')
elif not item.getJobs():
# We don't send empty reports with +1
- self.log.debug("No jobs for change %s" % item.change)
+ self.log.debug("No jobs for change %s" % (item.change,))
actions = []
elif item.didAllJobsSucceed():
self.log.debug("success %s" % (self.pipeline.success_actions))
@@ -760,7 +768,7 @@
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
self.pipeline._consecutive_failures += 1
- if self.pipeline._disabled:
+ if layout.hasProject(item.change.project) and self.pipeline._disabled:
actions = self.pipeline.disabled_actions
# Check here if we should disable so that we only use the disabled
# reporters /after/ the last disable_at failure is still reported as
@@ -772,7 +780,7 @@
try:
self.log.info("Reporting item %s, actions: %s" %
(item, actions))
- ret = self.sendReport(actions, source, item)
+ ret = self.sendReport(actions, item)
if ret:
self.log.error("Reporting item %s received: %s" %
(item, ret))
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index e164195..c98f20e 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -107,10 +107,11 @@
timeout=300)
return job
- def mergeChanges(self, items, build_set, files=None,
+ def mergeChanges(self, items, build_set, files=None, repo_state=None,
precedence=zuul.model.PRECEDENCE_NORMAL):
data = dict(items=items,
- files=files)
+ files=files,
+ repo_state=repo_state)
self.submitJob('merger:merge', data, build_set, precedence)
def getFiles(self, connection_name, project_name, branch, files,
@@ -129,6 +130,7 @@
updated = data.get('updated', False)
commit = data.get('commit')
files = data.get('files', {})
+ repo_state = data.get('repo_state', {})
job.files = files
self.log.info("Merge %s complete, merged: %s, updated: %s, "
"commit: %s" %
@@ -136,7 +138,8 @@
job.setComplete()
if job.build_set:
self.sched.onMergeCompleted(job.build_set, zuul_url,
- merged, updated, commit, files)
+ merged, updated, commit, files,
+ repo_state)
# The test suite expects the job to be removed from the
# internal account after the wake flag is set.
self.jobs.remove(job)
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index 714d643..6cfd904 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -14,6 +14,7 @@
# under the License.
import git
+import gitdb
import os
import logging
@@ -41,13 +42,17 @@
class Repo(object):
- log = logging.getLogger("zuul.Repo")
-
- def __init__(self, remote, local, email, username):
+ def __init__(self, remote, local, email, username,
+ cache_path=None, logger=None):
+ if logger is None:
+ self.log = logging.getLogger("zuul.Repo")
+ else:
+ self.log = logger
self.remote_url = remote
self.local_path = local
self.email = email
self.username = username
+ self.cache_path = cache_path
self._initialized = False
try:
self._ensure_cloned()
@@ -59,17 +64,32 @@
if self._initialized and repo_is_cloned:
return
# If the repo does not exist, clone the repo.
+ rewrite_url = False
if not repo_is_cloned:
self.log.debug("Cloning from %s to %s" % (self.remote_url,
self.local_path))
- git.Repo.clone_from(self.remote_url, self.local_path)
+ if self.cache_path:
+ git.Repo.clone_from(self.cache_path, self.local_path)
+ rewrite_url = True
+ else:
+ git.Repo.clone_from(self.remote_url, self.local_path)
repo = git.Repo(self.local_path)
+ # Create local branches corresponding to all the remote branches
+ if not repo_is_cloned:
+ origin = repo.remotes.origin
+ for ref in origin.refs:
+ if ref.remote_head == 'HEAD':
+ continue
+ repo.create_head(ref.remote_head, ref, force=True)
with repo.config_writer() as config_writer:
if self.email:
config_writer.set_value('user', 'email', self.email)
if self.username:
config_writer.set_value('user', 'name', self.username)
config_writer.write()
+ if rewrite_url:
+ with repo.remotes.origin.config_writer as config_writer:
+ config_writer.set('url', self.remote_url)
self._initialized = True
def isInitialized(self):
@@ -117,6 +137,10 @@
origin = repo.remotes.origin
return branch in origin.refs
+ def getBranches(self):
+ repo = self.createRepoObject()
+ return [x.name for x in repo.heads]
+
def getCommitFromRef(self, refname):
repo = self.createRepoObject()
if refname not in repo.refs:
@@ -124,6 +148,31 @@
ref = repo.refs[refname]
return ref.commit
+ def getRefs(self):
+ repo = self.createRepoObject()
+ return repo.refs
+
+ def setRef(self, path, hexsha, repo=None):
+ if repo is None:
+ repo = self.createRepoObject()
+ binsha = gitdb.util.to_bin_sha(hexsha)
+ obj = git.objects.Object.new_from_sha(repo, binsha)
+ self.log.debug("Create reference %s", path)
+ git.refs.Reference.create(repo, path, obj, force=True)
+
+ def setRefs(self, refs):
+ repo = self.createRepoObject()
+ current_refs = {}
+ for ref in repo.refs:
+ current_refs[ref.path] = ref
+ unseen = set(current_refs.keys())
+ for path, hexsha in refs.items():
+ self.setRef(path, hexsha, repo)
+ unseen.discard(path)
+ for path in unseen:
+ self.log.debug("Delete reference %s", path)
+ git.refs.SymbolicReference.delete(repo, ref.path)
+
def checkout(self, ref):
repo = self.createRepoObject()
self.log.debug("Checking out %s" % ref)
@@ -131,6 +180,13 @@
reset_repo_to_head(repo)
return repo.head.commit
+ def checkoutLocalBranch(self, branch):
+ repo = self.createRepoObject()
+ # Perform a hard reset before checking out so that we clean up
+ # anything that might be left over from a merge.
+ reset_repo_to_head(repo)
+ repo.heads[branch].checkout()
+
def cherryPick(self, ref):
repo = self.createRepoObject()
self.log.debug("Cherry-picking %s" % ref)
@@ -204,11 +260,19 @@
ret[fn] = None
return ret
+ def deleteRemote(self, remote):
+ repo = self.createRepoObject()
+ repo.delete_remote(repo.remotes[remote])
+
class Merger(object):
- log = logging.getLogger("zuul.Merger")
-
- def __init__(self, working_root, connections, email, username):
+ def __init__(self, working_root, connections, email, username,
+ cache_root=None, logger=None):
+ self.logger = logger
+ if logger is None:
+ self.log = logging.getLogger("zuul.Merger")
+ else:
+ self.log = logger
self.repos = {}
self.working_root = working_root
if not os.path.exists(working_root):
@@ -216,6 +280,7 @@
self.connections = connections
self.email = email
self.username = username
+ self.cache_root = cache_root
def _get_ssh_cmd(self, connection_name):
sshkey = self.connections.connections.get(connection_name).\
@@ -238,7 +303,13 @@
key = '/'.join([hostname, project_name])
try:
path = os.path.join(self.working_root, hostname, project_name)
- repo = Repo(url, path, self.email, self.username)
+ if self.cache_root:
+ cache_path = os.path.join(self.cache_root, hostname,
+ project_name)
+ else:
+ cache_path = None
+ repo = Repo(url, path, self.email, self.username, cache_path,
+ self.logger)
self.repos[key] = repo
except Exception:
@@ -275,15 +346,37 @@
connection_name, project_name)
def checkoutBranch(self, connection_name, project_name, branch):
+ self.log.info("Checking out %s/%s branch %s",
+ connection_name, project_name, branch)
repo = self.getRepo(connection_name, project_name)
- if repo.hasBranch(branch):
- self.log.info("Checking out branch %s of %s/%s" %
- (branch, connection_name, project_name))
- head = repo.getBranchHead(branch)
- repo.checkout(head)
- else:
- raise Exception("Project %s/%s does not have branch %s" %
- (connection_name, project_name, branch))
+ repo.checkoutLocalBranch(branch)
+
+ def _saveRepoState(self, connection_name, project_name, repo,
+ repo_state, recent):
+ projects = repo_state.setdefault(connection_name, {})
+ project = projects.setdefault(project_name, {})
+ for ref in repo.getRefs():
+ if ref.path.startswith('refs/zuul/'):
+ continue
+ if ref.path.startswith('refs/remotes/'):
+ continue
+ if ref.path.startswith('refs/heads/'):
+ branch = ref.path[len('refs/heads/'):]
+ key = (connection_name, project_name, branch)
+ if key not in recent:
+ recent[key] = ref.object
+ project[ref.path] = ref.object.hexsha
+
+ def _restoreRepoState(self, connection_name, project_name, repo,
+ repo_state):
+ projects = repo_state.get(connection_name, {})
+ project = projects.get(project_name, {})
+ if not project:
+ # We don't have a state for this project.
+ return
+ self.log.debug("Restore repo state for project %s/%s",
+ connection_name, project_name)
+ repo.setRefs(project)
def _mergeChange(self, item, ref):
repo = self.getRepo(item['connection'], item['project'])
@@ -314,27 +407,13 @@
return commit
- def _mergeItem(self, item, recent):
+ def _mergeItem(self, item, recent, repo_state):
self.log.debug("Processing refspec %s for project %s/%s / %s ref %s" %
(item['refspec'], item['connection'],
item['project'], item['branch'], item['ref']))
repo = self.getRepo(item['connection'], item['project'])
key = (item['connection'], item['project'], item['branch'])
- # See if we have a commit for this change already in this repo
- zuul_ref = item['branch'] + '/' + item['ref']
- with repo.createRepoObject().git.custom_environment(
- GIT_SSH_COMMAND=self._get_ssh_cmd(item['connection'])):
- commit = repo.getCommitFromRef(zuul_ref)
- if commit:
- self.log.debug(
- "Found commit %s for ref %s" % (commit, zuul_ref))
- # Store this as the most recent commit for this
- # project-branch
- recent[key] = commit
- return commit
-
- self.log.debug("Unable to find commit for ref %s" % (zuul_ref,))
# We need to merge the change
# Get the most recent commit for this project-branch
base = recent.get(key)
@@ -347,7 +426,14 @@
except Exception:
self.log.exception("Unable to reset repo %s" % repo)
return None
+ self._restoreRepoState(item['connection'], item['project'], repo,
+ repo_state)
+
base = repo.getBranchHead(item['branch'])
+ # Save the repo state so that later mergers can repeat
+ # this process.
+ self._saveRepoState(item['connection'], item['project'], repo,
+ repo_state, recent)
else:
self.log.debug("Found base commit %s for %s" % (base, key,))
# Merge the change
@@ -362,20 +448,26 @@
# commits of each project-branch
for key, mrc in recent.items():
connection, project, branch = key
+ zuul_ref = None
try:
repo = self.getRepo(connection, project)
zuul_ref = branch + '/' + item['ref']
- repo.createZuulRef(zuul_ref, mrc)
+ if not repo.getCommitFromRef(zuul_ref):
+ repo.createZuulRef(zuul_ref, mrc)
except Exception:
self.log.exception("Unable to set zuul ref %s for "
"item %s" % (zuul_ref, item))
return None
return commit
- def mergeChanges(self, items, files=None):
+ def mergeChanges(self, items, files=None, repo_state=None):
+ # connection+project+branch -> commit
recent = {}
commit = None
read_files = []
+ # connection -> project -> ref -> commit
+ if repo_state is None:
+ repo_state = {}
for item in items:
if item.get("number") and item.get("patchset"):
self.log.debug("Merging for change %s,%s." %
@@ -383,7 +475,7 @@
elif item.get("newrev") and item.get("oldrev"):
self.log.debug("Merging for rev %s with oldrev %s." %
(item["newrev"], item["oldrev"]))
- commit = self._mergeItem(item, recent)
+ commit = self._mergeItem(item, recent, repo_state)
if not commit:
return None
if files:
@@ -394,9 +486,10 @@
project=item['project'],
branch=item['branch'],
files=repo_files))
- if files:
- return commit.hexsha, read_files
- return commit.hexsha
+ ret_recent = {}
+ for k, v in recent.items():
+ ret_recent[k] = v.hexsha
+ return commit.hexsha, read_files, repo_state, ret_recent
def getFiles(self, connection_name, project_name, branch, files):
repo = self.getRepo(connection_name, project_name)
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index c09d7ba..1a32f96 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -103,16 +103,15 @@
def merge(self, job):
args = json.loads(job.arguments)
- ret = self.merger.mergeChanges(args['items'], args.get('files'))
+ ret = self.merger.mergeChanges(args['items'], args.get('files'),
+ args.get('repo_state'))
result = dict(merged=(ret is not None),
zuul_url=self.zuul_url)
- if args.get('files'):
- if ret:
- result['commit'], result['files'] = ret
- else:
- result['commit'], result['files'] = (None, None)
+ if ret is None:
+ result['commit'] = result['files'] = result['repo_state'] = None
else:
- result['commit'] = ret
+ (result['commit'], result['files'], result['repo_state'],
+ recent) = ret
job.sendWorkComplete(json.dumps(result))
def cat(self, job):
diff --git a/zuul/model.py b/zuul/model.py
index 4ae6f9a..6ad34ff 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -410,6 +410,37 @@
self._keys = keys
+class Group(object):
+ """A logical group of nodes for use by a job.
+
+ A Group is a named set of node names that will be provided to
+ jobs in the inventory to describe logical units where some subset of tasks
+ run.
+ """
+
+ def __init__(self, name, nodes):
+ self.name = name
+ self.nodes = nodes
+
+ def __repr__(self):
+ return '<Group %s %s>' % (self.name, str(self.nodes))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, Group):
+ return False
+ return (self.name == other.name and
+ self.nodes == other.nodes)
+
+ def toDict(self):
+ return {
+ 'name': self.name,
+ 'nodes': self.nodes
+ }
+
+
class NodeSet(object):
"""A set of nodes.
@@ -423,6 +454,7 @@
def __init__(self, name=None):
self.name = name or ''
self.nodes = OrderedDict()
+ self.groups = OrderedDict()
def __ne__(self, other):
return not self.__eq__(other)
@@ -437,6 +469,8 @@
n = NodeSet(self.name)
for name, node in self.nodes.items():
n.addNode(Node(node.name, node.image))
+ for name, group in self.groups.items():
+ n.addGroup(Group(group.name, group.nodes[:]))
return n
def addNode(self, node):
@@ -447,12 +481,20 @@
def getNodes(self):
return list(self.nodes.values())
+ def addGroup(self, group):
+ if group.name in self.groups:
+ raise Exception("Duplicate group in %s" % (self,))
+ self.groups[group.name] = group
+
+ def getGroups(self):
+ return list(self.groups.values())
+
def __repr__(self):
if self.name:
name = self.name + ' '
else:
name = ''
- return '<NodeSet %s%s>' % (name, self.nodes)
+ return '<NodeSet %s%s%s>' % (name, self.nodes, self.groups)
class NodeRequest(object):
@@ -752,8 +794,9 @@
attempts=3,
final=False,
roles=frozenset(),
- repos=frozenset(),
+ required_projects={},
allowed_projects=None,
+ override_branch=None,
)
# These are generally internal attributes which are not
@@ -820,6 +863,11 @@
Job._deepUpdate(v, other_vars)
self.variables = v
+ def updateProjects(self, other_projects):
+ required_projects = self.required_projects
+ Job._deepUpdate(required_projects, other_projects)
+ self.required_projects = required_projects
+
@staticmethod
def _deepUpdate(a, b):
# Merge nested dictionaries if possible, otherwise, overwrite
@@ -871,7 +919,8 @@
"%s=%s with variant %s" % (
repr(self), k, other._get(k),
repr(other)))
- if k not in set(['pre_run', 'post_run', 'roles', 'variables']):
+ if k not in set(['pre_run', 'post_run', 'roles', 'variables',
+ 'required_projects']):
setattr(self, k, copy.deepcopy(other._get(k)))
# Don't set final above so that we don't trip an error halfway
@@ -887,6 +936,8 @@
self.roles = self.roles.union(other.roles)
if other._get('variables') is not None:
self.updateVariables(other.variables)
+ if other._get('required_projects') is not None:
+ self.updateProjects(other.required_projects)
for k in self.context_attributes:
if (other._get(k) is not None and
@@ -914,6 +965,14 @@
return True
+class JobProject(object):
+ """ A reference to a project from a job. """
+
+ def __init__(self, project_name, override_branch=None):
+ self.project_name = project_name
+ self.override_branch = override_branch
+
+
class JobList(object):
""" A list of jobs in a project's pipeline. """
@@ -929,7 +988,7 @@
def inheritFrom(self, other):
for jobname, jobs in other.jobs.items():
if jobname in self.jobs:
- self.jobs[jobname].append(jobs)
+ self.jobs[jobname].extend(jobs)
else:
self.jobs[jobname] = jobs
@@ -1131,7 +1190,6 @@
def __init__(self, item):
self.item = item
- self.other_changes = []
self.builds = {}
self.result = None
self.next_build_set = None
@@ -1139,6 +1197,8 @@
self.ref = None
self.commit = None
self.zuul_url = None
+ self.dependent_items = None
+ self.merger_items = None
self.unable_to_merge = False
self.config_error = None # None or an error message string.
self.failing_reasons = []
@@ -1146,6 +1206,7 @@
self.nodesets = {} # job -> nodeset
self.node_requests = {} # job -> reqs
self.files = RepoFiles()
+ self.repo_state = {}
self.layout = None
self.tries = {}
@@ -1159,13 +1220,19 @@
# The change isn't enqueued until after it's created
# so we don't know what the other changes ahead will be
# until jobs start.
- if not self.other_changes:
+ if self.dependent_items is None:
+ items = []
next_item = self.item.item_ahead
while next_item:
- self.other_changes.append(next_item.change)
+ items.append(next_item)
next_item = next_item.item_ahead
+ self.dependent_items = items
if not self.ref:
self.ref = 'Z' + uuid4().hex
+ if self.merger_items is None:
+ items = [self.item] + self.dependent_items
+ items.reverse()
+ self.merger_items = [i.makeMergerItem() for i in items]
def getStateName(self, state_num):
return self.states_map.get(
@@ -1217,9 +1284,26 @@
return self.tries.get(job_name, 0)
def getMergeMode(self):
- if self.layout:
+ # We may be called before this build set has a shadow layout
+ # (ie, we are called to perform the merge to create that
+ # layout). It's possible that the change we are merging will
+ # update the merge-mode for the project, but there's not much
+ # we can do about that here. Instead, do the best we can by
+ # using the nearest shadow layout to determine the merge mode,
+ # or if that fails, the current live layout, or if that fails,
+ # use the default: merge-resolve.
+ item = self.item
+ layout = None
+ while item:
+ layout = item.current_build_set.layout
+ if layout:
+ break
+ item = item.item_ahead
+ if not layout:
+ layout = self.item.pipeline.layout
+ if layout:
project = self.item.change.project
- project_config = self.layout.project_configs.get(
+ project_config = layout.project_configs.get(
project.canonical_name)
if project_config:
return project_config.merge_mode
@@ -1248,6 +1332,8 @@
self.enqueue_time = None
self.dequeue_time = None
self.reported = False
+ self.reported_start = False
+ self.quiet = False
self.active = False # Whether an item is within an active window
self.live = True # Whether an item is intended to be processed at all
self.layout = None # This item's shadow layout
@@ -1355,6 +1441,9 @@
def getConfigError(self):
return self.current_build_set.config_error
+ def wasDequeuedNeedingChange(self):
+ return self.dequeued_needing_change
+
def isHoldingFollowingChanges(self):
if not self.live:
return False
@@ -1499,8 +1588,8 @@
# secrets, etc.
safe_change = self.change.getSafeAttributes()
safe_pipeline = self.pipeline.getSafeAttributes()
- safe_job = job.getSafeAttributes()
- safe_build = build.getSafeAttributes()
+ safe_job = job.getSafeAttributes() if job else {}
+ safe_build = build.getSafeAttributes() if build else {}
try:
url = url_pattern.format(change=safe_change,
pipeline=safe_pipeline,
@@ -1899,8 +1988,9 @@
class RefFilter(BaseFilter):
"""Allows a Manager to only enqueue Changes that meet certain criteria."""
- def __init__(self):
+ def __init__(self, connection_name):
super(RefFilter, self).__init__()
+ self.connection_name = connection_name
def matches(self, change):
return True
@@ -1919,6 +2009,7 @@
def __init__(self, name):
self.name = name
self.merge_mode = None
+ self.default_branch = None
self.pipelines = {}
self.private_key_file = None
@@ -2160,6 +2251,9 @@
self._createJobGraph(item, project_job_list, ret)
return ret
+ def hasProject(self, project):
+ return project.canonical_name in self.project_configs
+
class Semaphore(object):
def __init__(self, name, max=1):
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 582265d..dc99c8b 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -37,7 +37,7 @@
self._action = action
@abc.abstractmethod
- def report(self, source, pipeline, item):
+ def report(self, item):
"""Send the compiled report message."""
def getSubmitAllowNeeds(self):
@@ -61,57 +61,55 @@
}
return format_methods[self._action]
- # TODOv3(jeblair): Consider removing pipeline argument in favor of
- # item.pipeline
- def _formatItemReport(self, pipeline, item, with_jobs=True):
+ def _formatItemReport(self, item, with_jobs=True):
"""Format a report from the given items. Usually to provide results to
a reporter taking free-form text."""
- ret = self._getFormatter()(pipeline, item, with_jobs)
+ ret = self._getFormatter()(item, with_jobs)
- if pipeline.footer_message:
- ret += '\n' + pipeline.footer_message
+ if item.pipeline.footer_message:
+ ret += '\n' + item.pipeline.footer_message
return ret
- def _formatItemReportStart(self, pipeline, item, with_jobs=True):
+ def _formatItemReportStart(self, item, with_jobs=True):
status_url = ''
if self.connection.sched.config.has_option('zuul', 'status_url'):
status_url = self.connection.sched.config.get('zuul',
'status_url')
- return pipeline.start_message.format(pipeline=pipeline,
- status_url=status_url)
+ return item.pipeline.start_message.format(pipeline=item.pipeline,
+ status_url=status_url)
- def _formatItemReportSuccess(self, pipeline, item, with_jobs=True):
- msg = pipeline.success_message
+ def _formatItemReportSuccess(self, item, with_jobs=True):
+ msg = item.pipeline.success_message
if with_jobs:
- msg += '\n\n' + self._formatItemReportJobs(pipeline, item)
+ msg += '\n\n' + self._formatItemReportJobs(item)
return msg
- def _formatItemReportFailure(self, pipeline, item, with_jobs=True):
+ def _formatItemReportFailure(self, item, with_jobs=True):
if item.dequeued_needing_change:
msg = 'This change depends on a change that failed to merge.\n'
elif item.didMergerFail():
- msg = pipeline.merge_failure_message
+ msg = item.pipeline.merge_failure_message
elif item.getConfigError():
msg = item.getConfigError()
else:
- msg = pipeline.failure_message
+ msg = item.pipeline.failure_message
if with_jobs:
- msg += '\n\n' + self._formatItemReportJobs(pipeline, item)
+ msg += '\n\n' + self._formatItemReportJobs(item)
return msg
- def _formatItemReportMergeFailure(self, pipeline, item, with_jobs=True):
- return pipeline.merge_failure_message
+ def _formatItemReportMergeFailure(self, item, with_jobs=True):
+ return item.pipeline.merge_failure_message
- def _formatItemReportDisabled(self, pipeline, item, with_jobs=True):
+ def _formatItemReportDisabled(self, item, with_jobs=True):
if item.current_build_set.result == 'SUCCESS':
- return self._formatItemReportSuccess(pipeline, item)
+ return self._formatItemReportSuccess(item)
elif item.current_build_set.result == 'FAILURE':
- return self._formatItemReportFailure(pipeline, item)
+ return self._formatItemReportFailure(item)
else:
- return self._formatItemReport(pipeline, item)
+ return self._formatItemReport(item)
- def _formatItemReportJobs(self, pipeline, item):
+ def _formatItemReportJobs(self, item):
# Return the list of jobs portion of the report
ret = ''
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index a67973e..61f1e5f 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -138,16 +138,18 @@
:arg bool merged: Whether the merge succeeded (changes with refs).
:arg bool updated: Whether the repo was updated (changes without refs).
:arg str commit: The SHA of the merged commit (changes with refs).
+ :arg dict repo_state: The starting repo state before the merge.
"""
def __init__(self, build_set, zuul_url, merged, updated, commit,
- files):
+ files, repo_state):
self.build_set = build_set
self.zuul_url = zuul_url
self.merged = merged
self.updated = updated
self.commit = commit
self.files = files
+ self.repo_state = repo_state
class NodesProvisionedEvent(ResultEvent):
@@ -256,11 +258,6 @@
def addEvent(self, event):
self.log.debug("Adding trigger event: %s" % event)
- try:
- if self.statsd:
- self.statsd.incr('gerrit.event.%s' % event.type)
- except:
- self.log.exception("Exception reporting event stats")
self.trigger_event_queue.put(event)
self.wake_event.set()
self.log.debug("Done adding trigger event: %s" % event)
@@ -316,11 +313,11 @@
self.log.debug("Done adding complete event for build: %s" % build)
def onMergeCompleted(self, build_set, zuul_url, merged, updated,
- commit, files):
+ commit, files, repo_state):
self.log.debug("Adding merge complete event for build set: %s" %
build_set)
event = MergeCompletedEvent(build_set, zuul_url, merged,
- updated, commit, files)
+ updated, commit, files, repo_state)
self.result_event_queue.put(event)
self.wake_event.set()