Merge "Ensure only python3 is installed with bindep.txt"
diff --git a/.zuul.yaml b/.zuul.yaml
index caef296..8b5ccb9 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -44,6 +44,8 @@
- zuul/cmd/migrate.py
- playbooks/zuul-migrate/.*
- zuul-stream-functional
+ - nodepool-zuul-functional:
+ voting: false
gate:
jobs:
- build-sphinx-docs:
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index ba14752..84ebc10 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -660,6 +660,16 @@
Base URL on which the websocket service is exposed, if different
than the base URL of the web app.
+ .. attr:: stats_url
+
+ Base URL from which statistics emitted via statsd can be queried.
+
+ .. attr:: stats_type
+ :default: graphite
+
+ Type of server hosting the statistics information. Currently only
+ 'graphite' is supported by the dashboard.
+
.. attr:: static_cache_expiry
:default: 3600
diff --git a/doc/source/admin/drivers/github.rst b/doc/source/admin/drivers/github.rst
index 83ac77f..a89cfc6 100644
--- a/doc/source/admin/drivers/github.rst
+++ b/doc/source/admin/drivers/github.rst
@@ -40,60 +40,43 @@
Application
...........
+.. NOTE Duplicate content here and in zuul-from-scratch.rst. Keep them
+ in sync.
+
To create a `GitHub application
<https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/registering-github-apps/>`_:
* Go to your organization settings page to create the application, e.g.:
https://github.com/organizations/my-org/settings/apps/new
-
* Set GitHub App name to "my-org-zuul"
-
* Set Setup URL to your setup documentation, when user install the application
they are redirected to this url
-
* Set Webhook URL to
``http://<zuul-hostname>/connection/<connection-name>/payload``.
-
* Create a Webhook secret
-
* Set permissions:
* Commit statuses: Read & Write
-
* Issues: Read & Write
-
* Pull requests: Read & Write
-
* Repository contents: Read & Write (write to let zuul merge change)
+ * Repository administration: Read
* Set events subscription:
-
* Label
-
* Status
-
* Issue comment
-
* Issues
-
* Pull request
-
* Pull request review
-
* Pull request review comment
-
* Commit comment
-
* Create
-
* Push
-
* Release
* Set Where can this GitHub App be installed to "Any account"
-
* Create the App
-
* Generate a Private key in the app settings page
Then in the zuul.conf, set webhook_token, app_id and app_key.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index a2a2ee7..af83a3b 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -12,6 +12,7 @@
:maxdepth: 2
quick-start
+ zuul-from-scratch
installation
components
connections
diff --git a/doc/source/admin/monitoring.rst b/doc/source/admin/monitoring.rst
index 1c17c28..fbcedad 100644
--- a/doc/source/admin/monitoring.rst
+++ b/doc/source/admin/monitoring.rst
@@ -182,11 +182,11 @@
The one-minute load average of this executor, multiplied by 100.
- .. stat:: pct_available_ram
+ .. stat:: pct_used_ram
:type: gauge
- The available RAM (including buffers and cache) on this
- executor, as a percentage multiplied by 100.
+ The used RAM (excluding buffers and cache) on this executor, as
+ a percentage multiplied by 100.
.. stat:: zuul.nodepool
diff --git a/doc/source/admin/zuul-from-scratch.rst b/doc/source/admin/zuul-from-scratch.rst
new file mode 100644
index 0000000..141216b
--- /dev/null
+++ b/doc/source/admin/zuul-from-scratch.rst
@@ -0,0 +1,505 @@
+Zuul From Scratch
+=================
+
+.. note:: This is a work in progress that attempts to walk through all
+ of the steps needed to run Zuul on a cloud server against
+ GitHub projects.
+
+Environment Setup
+-----------------
+
+We're going to be using Fedora 27 on a cloud server for this
+installation.
+
+Login to your environment
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since we'll be using a cloud image for Fedora 27, our login user will
+be ``fedora`` which will also be the staging user for installation of
+Zuul and Nodepool.
+
+To get started, ssh to your machine as the ``fedora`` user::
+
+ ssh fedora@<ip_address>
+
+Environment Setup
+~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo dnf update -y
+ sudo systemctl reboot
+ sudo dnf install git redhat-lsb-core python3 python3-pip python3-devel make gcc openssl-devel python-openstackclient -y
+ pip3 install --user bindep
+
+Zuul and Nodepool Installation
+------------------------------
+
+Install Zookeeper
+~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo dnf install zookeeper -y
+
+Install Nodepool
+~~~~~~~~~~~~~~~~
+
+::
+
+ sudo adduser --system nodepool --home-dir /var/lib/nodepool --create-home
+ git clone https://git.openstack.org/openstack-infra/nodepool
+ cd nodepool/
+ sudo dnf -y install $(bindep -b)
+ sudo pip3 install .
+
+Install Zuul
+~~~~~~~~~~~~
+
+::
+
+ sudo adduser --system zuul --home-dir /var/lib/zuul --create-home
+ git clone https://git.openstack.org/openstack-infra/zuul
+ cd zuul/
+ sudo dnf install $(bindep -b) -y
+ sudo pip3 install git+https://github.com/sigmavirus24/github3.py.git@develop#egg=Github3.py
+ sudo pip3 install .
+
+Setup
+-----
+
+Zookeeper Setup
+~~~~~~~~~~~~~~~
+
+.. TODO recommended reading for zk clustering setup
+
+::
+
+ sudo bash -c 'echo "1" > /etc/zookeeper/myid'
+ sudo bash -c 'echo "tickTime=2000
+ dataDir=/var/lib/zookeeper
+ clientPort=2181" > /etc/zookeeper/zoo.cfg'
+
+Nodepool Setup
+~~~~~~~~~~~~~~
+
+Before starting on this, you need to download your `openrc`
+configuration from your OpenStack cloud. Put it on your server in the
+fedora user's home directory. It should be called
+``<username>-openrc.sh``. Once that is done, create a new keypair
+that will be installed when instantiating the servers::
+
+ cd ~
+ source <username>-openrc.sh # this will prompt for password - enter it
+ umask 0066
+
+ ssh-keygen -t rsa -b 2048 -f nodepool_rsa # don't enter a passphrase
+ openstack keypair create --public-key nodepool_rsa.pub nodepool
+
+We'll use the private key later wheen configuring Zuul. In the same
+session, configure nodepool to talk to your cloud::
+
+ sudo mkdir -p ~nodepool/.config/openstack
+ cat > clouds.yaml <<EOF
+ clouds:
+ mycloud:
+ auth:
+ username: $OS_USERNAME
+ password: $OS_PASSWORD
+ project_name: ${OS_PROJECT_NAME:-$OS_TENANT_NAME}
+ auth_url: $OS_AUTH_URL
+ region_name: $OS_REGION_NAME
+ EOF
+ sudo mv clouds.yaml ~nodepool/.config/openstack/
+ sudo chown -R nodepool.nodepool ~nodepool/.config
+ umask 0002
+
+Once you've written out the file, double check all the required fields have been filled out.
+
+::
+
+ sudo mkdir /etc/nodepool/
+ sudo mkdir /var/log/nodepool
+ sudo chgrp -R nodepool /var/log/nodepool/
+ sudo chmod 775 /var/log/nodepool/
+
+Nodepool Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+Inputs needed for this file:
+
+* cloud name / region name - from clouds.yaml
+* flavor-name
+* image-name - from your cloud
+
+::
+
+ sudo bash -c "cat >/etc/nodepool/nodepool.yaml <<EOF
+ zookeeper-servers:
+ - host: localhost
+ port: 2181
+
+ providers:
+ - name: myprovider # this is a nodepool identifier for this cloud provider (cloud+region combo)
+ region-name: regionOne # this needs to match the region name in clouds.yaml but is only needed if there is more than one region
+ cloud: mycloud # This needs to match the name in clouds.yaml
+ cloud-images:
+ - name: centos-7 # Defines a cloud-image for nodepool
+ image-name: CentOS-7-x86_64-GenericCloud-1706 # name of image from cloud
+ username: centos # The user Zuul should log in as
+ pools:
+ - name: main
+ max-servers: 4 # nodepool will never create more than this many servers
+ labels:
+ - name: centos-7-small # defines label that will be used to get one of these in a job
+ flavor-name: 'm1.small' # name of flavor from cloud
+ cloud-image: centos-7 # matches name from cloud-images
+ key-name: nodepool # name of the keypair to use for authentication
+
+ labels:
+ - name: centos-7-small # defines label that will be used in jobs
+ min-ready: 2 # nodepool will always keep this many booted and ready to go
+ EOF"
+
+.. warning::
+
+ `min-ready:2` may incur costs in your cloud provider
+
+
+Zuul Setup
+~~~~~~~~~~
+
+::
+
+ sudo mkdir /etc/zuul/
+ sudo mkdir /var/log/zuul/
+ sudo chown zuul.zuul /var/log/zuul/
+ sudo mkdir /var/lib/zuul/.ssh
+ sudo chmod 0700 /var/lib/zuul/.ssh
+ sudo mv nodepool_rsa /var/lib/zuul/.ssh
+ sudo chown -R zuul.zuul /var/lib/zuul/.ssh
+
+Zuul Configuration
+~~~~~~~~~~~~~~~~~~
+
+Write the Zuul config file. Note that this configures Zuul's web
+server to listen on all public addresses. This is so that Zuul may
+receive webhook events from GitHub. You may wish to proxy this or
+further restrict public access.
+
+::
+
+ sudo bash -c "cat > /etc/zuul/zuul.conf <<EOF
+ [gearman]
+ server=127.0.0.1
+
+ [gearman_server]
+ start=true
+
+ [executor]
+ private_key_file=/home/zuul/.ssh/nodepool_rsa
+
+ [web]
+ listen_address=0.0.0.0
+
+ [scheduler]
+ tenant_config=/etc/zuul/main.yaml
+ EOF"
+
+ sudo bash -c "cat > /etc/zuul/main.yaml <<EOF
+ - tenant:
+ name: quickstart
+ EOF"
+
+Service Management
+------------------
+
+Zookeeper Service Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo systemctl start zookeeper.service
+
+::
+
+ sudo systemctl status zookeeper.service
+ ● zookeeper.service - Apache ZooKeeper
+ Loaded: loaded (/usr/lib/systemd/system/zookeeper.service; disabled; vendor preset: disabled)
+ Active: active (running) since Wed 2018-01-03 14:53:47 UTC; 5s ago
+ Process: 4153 ExecStart=/usr/bin/zkServer.sh start zoo.cfg (code=exited, status=0/SUCCESS)
+ Main PID: 4160 (java)
+ Tasks: 17 (limit: 4915)
+ CGroup: /system.slice/zookeeper.service
+ └─4160 java -Dzookeeper.log.dir=/var/log/zookeeper -Dzookeeper.root.logger=INFO,CONSOLE -cp /usr/share/java/
+
+::
+
+ sudo systemctl enable zookeeper.service
+
+
+Nodepool Service Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo bash -c "cat > /etc/systemd/system/nodepool-launcher.service <<EOF
+ [Unit]
+ Description=Nodepool Launcher Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ # Options to pass to nodepool-launcher.
+ Group=nodepool
+ User=nodepool
+ RuntimeDirectory=nodepool
+ ExecStart=/usr/local/bin/nodepool-launcher
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo chmod 0644 /etc/systemd/system/nodepool-launcher.service
+ sudo systemctl daemon-reload
+ sudo systemctl start nodepool-launcher.service
+ sudo systemctl status nodepool-launcher.service
+ sudo systemctl enable nodepool-launcher.service
+
+Zuul Service Management
+~~~~~~~~~~~~~~~~~~~~~~~
+::
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-scheduler.service <<EOF
+ [Unit]
+ Description=Zuul Scheduler Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-scheduler
+ ExecStop=/usr/local/bin/zuul-scheduler stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-executor.service <<EOF
+ [Unit]
+ Description=Zuul Executor Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-executor
+ ExecStop=/usr/local/bin/zuul-executor stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-web.service <<EOF
+ [Unit]
+ Description=Zuul Web Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-web
+ ExecStop=/usr/local/bin/zuul-web stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo systemctl daemon-reload
+ sudo systemctl start zuul-scheduler.service
+ sudo systemctl status zuul-scheduler.service
+ sudo systemctl enable zuul-scheduler.service
+ sudo systemctl start zuul-executor.service
+ sudo systemctl status zuul-executor.service
+ sudo systemctl enable zuul-executor.service
+ sudo systemctl start zuul-web.service
+ sudo systemctl status zuul-web.service
+ sudo systemctl enable zuul-web.service
+
+Use Zuul Jobs
+-------------
+
+Add to ``/etc/zuul/zuul.conf``::
+
+ sudo bash -c "cat >> /etc/zuul/zuul.conf <<EOF
+
+ [connection zuul-git]
+ driver=git
+ baseurl=https://git.openstack.org/
+ EOF"
+
+Restart executor and scheduler::
+
+ sudo systemctl restart zuul-executor.service
+ sudo systemctl restart zuul-scheduler.service
+
+Configure GitHub
+----------------
+
+You'll need an organization in Github for this, so create one if you
+haven't already. In this example we will use `my-org`.
+
+.. NOTE Duplicate content here and in drivers/github.rst. Keep them
+ in sync.
+
+Create a `GitHub application
+<https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/registering-github-apps/>`_:
+
+* Go to your organization settings page to create the application, e.g.:
+ https://github.com/organizations/my-org/settings/apps/new
+* Set GitHub App name to "my-org-zuul"
+* Set Setup URL to your setup documentation, when users install the application
+ they are redirected to this url
+* Set Webhook URL to
+ ``http://<IP ADDRESS>/connection/github/payload``.
+* Create a Webhook secret, and record it for later use
+* Set permissions:
+
+ * Commit statuses: Read & Write
+ * Issues: Read & Write
+ * Pull requests: Read & Write
+ * Repository contents: Read & Write (write to let zuul merge change)
+ * Repository administration: Read
+
+* Set events subscription:
+
+ * Label
+ * Status
+ * Issue comment
+ * Issues
+ * Pull request
+ * Pull request review
+ * Pull request review comment
+ * Commit comment
+ * Create
+ * Push
+ * Release
+
+* Set Where can this GitHub App be installed to "Any account"
+* Create the App
+* Generate a Private key in the app settings page and save the file for later
+
+.. TODO See if we can script this using GitHub API
+
+Go back to the `General` settings page for the app,
+https://github.com/organizations/my-org/settings/apps/my-org-zuul
+and look for the app `ID` number, under the `About` section.
+
+Edit ``/etc/zuul/zuul.conf`` to add the following::
+
+ [connection github]
+ driver=github
+ app_id=<APP ID NUMBER>
+ app_key=/etc/zuul/github.pem
+ webhook_token=<WEBHOOK TOKEN>
+
+Upload the private key which was generated earlier, and save it in
+``/etc/zuul/github.pem``.
+
+Restart all of Zuul::
+
+ sudo systemctl restart zuul-executor.service
+ sudo systemctl restart zuul-web.service
+ sudo systemctl restart zuul-scheduler.service
+
+Go to the `Advanced` tab for the app in GitHub,
+https://github.com/organizations/my-org/settings/apps/my-org-zuul/advanced,
+and look for the initial ping from the app. It probably wasn't
+delivered since Zuul wasn't configured at the time, so click
+``Resend`` and verify that it is delivered now that Zuul is
+configured.
+
+Visit the public app page on GitHub,
+https://github.com/apps/my-org-zuul, and install the app into your org.
+
+Create two new repositories in your org. One will hold the
+configuration for this tenant in Zuul, the other should be a normal
+project repo to use for testing. We'll call them `zuul-test-config`
+and `zuul-test`, respectively.
+
+Edit ``/etc/zuul/main.yaml`` so that it looks like this::
+
+ - tenant:
+ name: quickstart
+ source:
+ zuul-git:
+ config-projects:
+ - openstack-infra/zuul-base-jobs
+ untrusted-projects:
+ - openstack-infra/zuul-jobs
+ github:
+ config-projects:
+ - my-org/zuul-test-config
+ untrusted-projects:
+ - my-org/zuul-test
+
+The first section, under 'zuul-git' imports the "standard library" of
+Zuul jobs, a collection of jobs that can be used by any Zuul
+installation.
+
+The second section is your GitHub configuration.
+
+After updating the file, restart the Zuul scheduler::
+
+ sudo systemctl restart zuul-scheduler.service
+
+Add an initial pipeline configuration to the `zuul-test-config`
+repository. Inside that project, create a ``zuul.yaml`` file with the
+following contents::
+
+ - pipeline:
+ name: check
+ description: |
+ Newly opened pull requests enter this pipeline to receive an
+ initial verification
+ manager: independent
+ trigger:
+ github:
+ - event: pull_request
+ action:
+ - opened
+ - changed
+ - reopened
+ - event: pull_request
+ action: comment
+ comment: (?i)^\s*recheck\s*$
+ start:
+ github:
+ status: pending
+ comment: false
+ success:
+ github:
+ status: 'success'
+ failure:
+ github:
+ status: 'failure'
+
+Merge that commit into the repository.
+
+In the `zuul-test` project, create a `.zuul.yaml` file with the
+following contents::
+
+ - project:
+ check:
+ jobs:
+ - noop
+
+Open a new pull request with that commit against the `zuul-test`
+project and verify that Zuul reports a successful run of the `noop`
+job.
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 8492423..0932c56 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -710,6 +710,21 @@
timeout is supplied, the job may run indefinitely. Supplying a
timeout is highly recommended.
+ This timeout only applies to the pre-run and run playbooks in a
+ job.
+
+ .. attr:: post-timeout
+
+ The time in seconds that each post playbook should be allowed to run
+ before it is automatically aborted and failure is reported. If no
+ post-timeout is supplied, the job may run indefinitely. Supplying a
+ post-timeout is highly recommended.
+
+ The post-timeout is handled separately from the above timeout because
+ the post playbooks are typically where you will copy jobs logs.
+ In the event of the pre-run or run playbooks timing out we want to
+ do our best to copy the job logs in the post-run playbooks.
+
.. attr:: attempts
:default: 3
@@ -862,6 +877,48 @@
same name will override a previously defined variable, but new
variable names will be added to the set of defined variables.
+ .. attr:: host_vars
+
+ A dictionary of host variables to supply to Ansible. The keys
+ of this dictionary are node names as defined in a
+ :ref:`nodeset`, and the values are dictionaries of variables,
+ just as in :attr:`job.vars`.
+
+ .. attr:: group_vars
+
+ A dictionary of group variables to supply to Ansible. The keys
+ of this dictionary are node groups as defined in a
+ :ref:`nodeset`, and the values are dictionaries of variables,
+ just as in :attr:`job.vars`.
+
+ An example of three kinds of variables:
+
+ .. code-block:: yaml
+
+ - job:
+ name: variable-example
+ nodeset:
+ nodes:
+ - name: controller
+ label: fedora-27
+ - name: api1
+ label: centos-7
+ - name: api2
+ label: centos-7
+ groups:
+ - name: api
+ nodes:
+ - api1
+ - api2
+ vars:
+ foo: "this variable is visible to all nodes"
+ host_vars:
+ controller:
+ bar: "this variable is visible only on the controller node"
+ group_vars:
+ api:
+ baz: "this variable is visible on api1 and api2"
+
.. attr:: dependencies
A list of other jobs upon which this job depends. Zuul will not
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 820e316..4e1c33d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -289,6 +289,10 @@
The job timeout, in seconds.
+ .. var:: post_timeout
+
+ The post-run playbook timeout, in seconds.
+
.. var:: jobtags
A list of tags associated with the job. Not to be confused with
diff --git a/etc/status/public_html/jquery.zuul.js b/etc/status/public_html/jquery.zuul.js
index 50dbed5..ac8a302 100644
--- a/etc/status/public_html/jquery.zuul.js
+++ b/etc/status/public_html/jquery.zuul.js
@@ -49,7 +49,7 @@
options = $.extend({
'enabled': true,
'graphite_url': '',
- 'source': 'status.json',
+ 'source': 'status',
'msg_id': '#zuul_msg',
'pipelines_id': '#zuul_pipelines',
'queue_events_num': '#zuul_queue_events_num',
diff --git a/etc/status/public_html/zuul.app.js b/etc/status/public_html/zuul.app.js
index bf90a4d..6e35eb3 100644
--- a/etc/status/public_html/zuul.app.js
+++ b/etc/status/public_html/zuul.app.js
@@ -55,7 +55,7 @@
var demo = location.search.match(/[?&]demo=([^?&]*)/),
source_url = location.search.match(/[?&]source_url=([^?&]*)/),
source = demo ? './status-' + (demo[1] || 'basic') + '.json-sample' :
- 'status.json';
+ 'status';
source = source_url ? source_url[1] : source;
var zuul = $.zuul({
diff --git a/requirements.txt b/requirements.txt
index 7057c5a..115b096 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,8 +4,6 @@
# is in a release
git+https://github.com/sigmavirus24/github3.py.git@develop#egg=Github3.py
PyYAML>=3.1.0
-Paste
-WebOb>=1.2.3
paramiko>=2.0.1
GitPython>=2.1.8
python-daemon>=2.0.4,<2.1.0
diff --git a/tests/base.py b/tests/base.py
index 0f2df35..013a6e1 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -529,6 +529,24 @@
}
return event
+ def getFakeBranchDeletedEvent(self, project, branch):
+ oldrev = '4abd38457c2da2a72d4d030219ab180ecdb04bf0'
+ newrev = 40 * '0'
+
+ event = {
+ "type": "ref-updated",
+ "submitter": {
+ "name": "User Name",
+ },
+ "refUpdate": {
+ "oldRev": oldrev,
+ "newRev": newrev,
+ "refName": 'refs/heads/' + branch,
+ "project": project,
+ }
+ }
+ return event
+
def review(self, project, changeid, message, action):
number, ps = changeid.split(',')
change = self.changes[int(number)]
@@ -941,7 +959,7 @@
log = logging.getLogger("zuul.test.FakeGithubConnection")
def __init__(self, driver, connection_name, connection_config, rpcclient,
- changes_db=None, upstream_root=None):
+ changes_db=None, upstream_root=None, git_url_with_auth=False):
super(FakeGithubConnection, self).__init__(driver, connection_name,
connection_config)
self.connection_name = connection_name
@@ -953,6 +971,7 @@
self.merge_not_allowed_count = 0
self.reports = []
self.github_client = tests.fakegithub.FakeGithub(changes_db)
+ self.git_url_with_auth = git_url_with_auth
self.rpcclient = rpcclient
def getGithubClient(self,
@@ -1045,7 +1064,13 @@
return 'read'
def getGitUrl(self, project):
- return os.path.join(self.upstream_root, str(project))
+ if self.git_url_with_auth:
+ auth_token = ''.join(
+ random.choice(string.ascii_lowercase) for x in range(8))
+ prefix = 'file://x-access-token:%s@' % auth_token
+ else:
+ prefix = ''
+ return prefix + os.path.join(self.upstream_root, str(project))
def real_getGitUrl(self, project):
return super(FakeGithubConnection, self).getGitUrl(project)
@@ -1325,7 +1350,7 @@
host['host_vars']['ansible_connection'] = 'local'
hosts.append(dict(
- name=['localhost'],
+ name='localhost',
host_vars=dict(ansible_connection='local'),
host_keys=[]))
return hosts
@@ -1832,7 +1857,7 @@
# from libraries that zuul depends on such as gear.
log_defaults_from_env = os.environ.get(
'OS_LOG_DEFAULTS',
- 'git.cmd=INFO,kazoo.client=WARNING,gear=INFO,paste=INFO')
+ 'git.cmd=INFO,kazoo.client=WARNING,gear=INFO')
if log_defaults_from_env:
for default in log_defaults_from_env.split(','):
@@ -1907,6 +1932,7 @@
run_ansible = False
create_project_keys = False
use_ssl = False
+ git_url_with_auth = False
def _startMerger(self):
self.merge_server = zuul.merger.server.MergeServer(self.config,
@@ -2076,10 +2102,12 @@
def getGithubConnection(driver, name, config):
server = config.get('server', 'github.com')
db = self.github_changes_dbs.setdefault(server, {})
- con = FakeGithubConnection(driver, name, config,
- self.rpcclient,
- changes_db=db,
- upstream_root=self.upstream_root)
+ con = FakeGithubConnection(
+ driver, name, config,
+ self.rpcclient,
+ changes_db=db,
+ upstream_root=self.upstream_root,
+ git_url_with_auth=self.git_url_with_auth)
self.event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
new file mode 100644
index 0000000..36e0eca
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
@@ -0,0 +1,26 @@
+- hosts: host1
+ tasks:
+ - name: Assert hostvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar == 'host'
+ - groupvar is not defined
+
+- hosts: host2
+ tasks:
+ - name: Assert groupvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar is not defined
+ - groupvar == 'group'
+
+- hosts: host3
+ tasks:
+ - name: Assert groupvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar is not defined
+ - groupvar == 'group'
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index d0a8f7b..13a19da 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -99,6 +99,12 @@
- job:
parent: python27
+ name: post-timeout
+ post-run: playbooks/timeout.yaml
+ post-timeout: 1
+
+- job:
+ parent: python27
name: check-vars
run: playbooks/check-vars.yaml
nodeset:
@@ -115,6 +121,32 @@
- job:
parent: python27
+ name: check-hostvars
+ run: playbooks/check-hostvars.yaml
+ nodeset:
+ nodes:
+ - name: host1
+ label: ubuntu-xenial
+ - name: host2
+ label: ubuntu-xenial
+ - name: host3
+ label: ubuntu-xenial
+ groups:
+ - name: group1
+ nodes:
+ - host2
+ - host3
+ vars:
+ allvar: all
+ host_vars:
+ host1:
+ hostvar: host
+ group_vars:
+ group1:
+ groupvar: group
+
+- job:
+ parent: python27
name: check-secret-names
run: playbooks/check-secret-names.yaml
nodeset:
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index 447f6cd..e332924 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -15,7 +15,9 @@
- python27
- faillocal
- check-vars
+ - check-hostvars
- check-secret-names
- timeout
+ - post-timeout
- hello-world
- failpost
diff --git a/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml b/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml b/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml
new file mode 100644
index 0000000..04091a7
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml
@@ -0,0 +1,17 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/base.yaml
diff --git a/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml b/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml
new file mode 100644
index 0000000..cf635e8
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+ name: org/project
+ check:
+ jobs: []
diff --git a/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml b/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml
new file mode 100644
index 0000000..1fc35b5
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml
@@ -0,0 +1,3 @@
+- project:
+ check:
+ jobs: []
diff --git a/tests/fixtures/config/branch-deletion/main.yaml b/tests/fixtures/config/branch-deletion/main.yaml
new file mode 100644
index 0000000..9ffae3d
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/main.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
+ - org/project1
+
diff --git a/tests/nodepool/test_nodepool_integration.py b/tests/nodepool/test_nodepool_integration.py
index 9c87a10..2f36154 100644
--- a/tests/nodepool/test_nodepool_integration.py
+++ b/tests/nodepool/test_nodepool_integration.py
@@ -30,6 +30,7 @@
def setUp(self):
super(TestNodepoolIntegration, self).setUp()
+ self.statsd = None
self.zk = zuul.zk.ZooKeeper()
self.addCleanup(self.zk.disconnect)
self.zk.connect('localhost:2181')
@@ -54,7 +55,7 @@
# Test a simple node request
nodeset = model.NodeSet()
- nodeset.addNode(model.Node('controller', 'fake-label'))
+ nodeset.addNode(model.Node(['controller'], 'fake-label'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job)
@@ -63,7 +64,7 @@
self.assertEqual(request.state, model.STATE_FULFILLED)
# Accept the nodes
- self.nodepool.acceptNodes(request)
+ self.nodepool.acceptNodes(request, request.id)
nodeset = request.nodeset
for node in nodeset.getNodes():
@@ -84,7 +85,7 @@
def test_invalid_node_request(self):
# Test requests with an invalid node type fail
nodeset = model.NodeSet()
- nodeset.addNode(model.Node('controller', 'invalid-label'))
+ nodeset.addNode(model.Node(['controller'], 'invalid-label'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job)
@@ -97,8 +98,8 @@
# Test that node requests are re-submitted after disconnect
nodeset = model.NodeSet()
- nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
- nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+ nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+ nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.paused = True
@@ -115,8 +116,8 @@
# Test that node requests can be canceled
nodeset = model.NodeSet()
- nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
- nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+ nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+ nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.paused = True
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index 46e1d99..c67eb55 100755
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -425,12 +425,14 @@
node = {'name': 'fake-host',
'host_keys': ['fake-host-key'],
'interface_ip': 'localhost'}
- keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ keys = self.test_job.getHostList({'nodes': [node],
+ 'host_vars': {}})[0]['host_keys']
self.assertEqual(keys[0], 'localhost fake-host-key')
# Test with custom connection_port set
node['connection_port'] = 22022
- keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ keys = self.test_job.getHostList({'nodes': [node],
+ 'host_vars': {}})[0]['host_keys']
self.assertEqual(keys[0], '[localhost]:22022 fake-host-key')
diff --git a/tests/unit/test_merger_repo.py b/tests/unit/test_merger_repo.py
index fb2f199..984644f 100644
--- a/tests/unit/test_merger_repo.py
+++ b/tests/unit/test_merger_repo.py
@@ -22,7 +22,7 @@
import testtools
from zuul.merger.merger import Repo
-from tests.base import ZuulTestCase, FIXTURE_DIR
+from tests.base import ZuulTestCase, FIXTURE_DIR, simple_layout
class TestMergerRepo(ZuulTestCase):
@@ -116,3 +116,63 @@
# This is created on the second fetch
self.assertTrue(os.path.exists(os.path.join(
self.workspace_root, 'stamp2')))
+
+
+class TestMergerWithAuthUrl(ZuulTestCase):
+ config_file = 'zuul-github-driver.conf'
+
+ git_url_with_auth = True
+
+ @simple_layout('layouts/merging-github.yaml', driver='github')
+ def test_changing_url(self):
+ """
+ This test checks that if getGitUrl returns different urls for the same
+ repo (which happens if an access token is part of the url) then the
+ remote urls are changed in the merger accordingly. This tests directly
+ the merger.
+ """
+
+ merger = self.executor_server.merger
+ repo = merger.getRepo('github', 'org/project')
+ first_url = repo.remote_url
+
+ repo = merger.getRepo('github', 'org/project')
+ second_url = repo.remote_url
+
+ # the urls should differ
+ self.assertNotEqual(first_url, second_url)
+
+ @simple_layout('layouts/merging-github.yaml', driver='github')
+ def test_changing_url_end_to_end(self):
+ """
+ This test checks that if getGitUrl returns different urls for the same
+ repo (which happens if an access token is part of the url) then the
+ remote urls are changed in the merger accordingly. This is an end to
+ end test.
+ """
+
+ A = self.fake_github.openFakePullRequest('org/project', 'master',
+ 'PR title')
+ self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
+ self.waitUntilSettled()
+ self.assertTrue(A.is_merged)
+
+ # get remote url of org/project in merger
+ repo = self.executor_server.merger.repos.get('github.com/org/project')
+ self.assertIsNotNone(repo)
+ git_repo = git.Repo(repo.local_path)
+ first_url = list(git_repo.remotes[0].urls)[0]
+
+ B = self.fake_github.openFakePullRequest('org/project', 'master',
+ 'PR title')
+ self.fake_github.emitEvent(B.getCommentAddedEvent('merge me again'))
+ self.waitUntilSettled()
+ self.assertTrue(B.is_merged)
+
+ repo = self.executor_server.merger.repos.get('github.com/org/project')
+ self.assertIsNotNone(repo)
+ git_repo = git.Repo(repo.local_path)
+ second_url = list(git_repo.remotes[0].urls)[0]
+
+ # the urls should differ
+ self.assertNotEqual(first_url, second_url)
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 5c586ca..6ec5232 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -23,6 +23,7 @@
from zuul import configloader
from zuul.lib import encryption
from zuul.lib import yamlutil as yaml
+import zuul.lib.connections
from tests.base import BaseTestCase, FIXTURE_DIR
@@ -36,6 +37,8 @@
class TestJob(BaseTestCase):
def setUp(self):
super(TestJob, self).setUp()
+ self.connections = zuul.lib.connections.ConnectionRegistry()
+ self.addCleanup(self.connections.stop)
self.connection = Dummy(connection_name='dummy_connection')
self.source = Dummy(canonical_hostname='git.example.com',
connection=self.connection)
@@ -47,6 +50,9 @@
self.pipeline = model.Pipeline('gate', self.layout)
self.layout.addPipeline(self.pipeline)
self.queue = model.ChangeQueue(self.pipeline)
+ self.pcontext = configloader.ParseContext(
+ self.connections, None, self.tenant, self.layout)
+ self.pcontext.setPipelines()
private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
with open(private_key_file, "rb") as f:
@@ -61,9 +67,7 @@
@property
def job(self):
- tenant = model.Tenant('tenant')
- layout = model.Layout(tenant)
- job = configloader.JobParser.fromYaml(tenant, layout, {
+ job = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'job',
@@ -147,33 +151,27 @@
job.applyVariant(bad_final)
def test_job_inheritance_job_tree(self):
- tenant = model.Tenant('tenant')
- layout = model.Layout(tenant)
-
- tpc = model.TenantProjectConfig(self.project)
- tenant.addUntrustedProject(tpc)
-
- pipeline = model.Pipeline('gate', layout)
- layout.addPipeline(pipeline)
+ pipeline = model.Pipeline('gate', self.layout)
+ self.layout.addPipeline(pipeline)
queue = model.ChangeQueue(pipeline)
- base = configloader.JobParser.fromYaml(tenant, layout, {
+ base = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'base',
'parent': None,
'timeout': 30,
})
- layout.addJob(base)
- python27 = configloader.JobParser.fromYaml(tenant, layout, {
+ self.layout.addJob(base)
+ python27 = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
'parent': 'base',
'timeout': 40,
})
- layout.addJob(python27)
- python27diablo = configloader.JobParser.fromYaml(tenant, layout, {
+ self.layout.addJob(python27)
+ python27diablo = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
@@ -182,13 +180,9 @@
],
'timeout': 50,
})
- layout.addJob(python27diablo)
+ self.layout.addJob(python27diablo)
- project_template_parser = configloader.ProjectTemplateParser(
- tenant, layout)
- project_parser = configloader.ProjectParser(
- tenant, layout, project_template_parser)
- project_config = project_parser.fromYaml([{
+ project_config = self.pcontext.project_parser.fromYaml([{
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'project',
@@ -199,12 +193,12 @@
]
}
}])
- layout.addProjectConfig(project_config)
+ self.layout.addProjectConfig(project_config)
change = model.Change(self.project)
change.branch = 'master'
item = queue.enqueueChange(change)
- item.layout = layout
+ item.layout = self.layout
self.assertTrue(base.changeMatches(change))
self.assertTrue(python27.changeMatches(change))
@@ -218,7 +212,7 @@
change.branch = 'stable/diablo'
item = queue.enqueueChange(change)
- item.layout = layout
+ item.layout = self.layout
self.assertTrue(base.changeMatches(change))
self.assertTrue(python27.changeMatches(change))
@@ -231,25 +225,19 @@
self.assertEqual(job.timeout, 70)
def test_inheritance_keeps_matchers(self):
- tenant = model.Tenant('tenant')
- layout = model.Layout(tenant)
-
- pipeline = model.Pipeline('gate', layout)
- layout.addPipeline(pipeline)
+ pipeline = model.Pipeline('gate', self.layout)
+ self.layout.addPipeline(pipeline)
queue = model.ChangeQueue(pipeline)
- project = model.Project('project', self.source)
- tpc = model.TenantProjectConfig(project)
- tenant.addUntrustedProject(tpc)
- base = configloader.JobParser.fromYaml(tenant, layout, {
+ base = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'base',
'parent': None,
'timeout': 30,
})
- layout.addJob(base)
- python27 = configloader.JobParser.fromYaml(tenant, layout, {
+ self.layout.addJob(base)
+ python27 = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
@@ -257,13 +245,9 @@
'timeout': 40,
'irrelevant-files': ['^ignored-file$'],
})
- layout.addJob(python27)
+ self.layout.addJob(python27)
- project_template_parser = configloader.ProjectTemplateParser(
- tenant, layout)
- project_parser = configloader.ProjectParser(
- tenant, layout, project_template_parser)
- project_config = project_parser.fromYaml([{
+ project_config = self.pcontext.project_parser.fromYaml([{
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'project',
@@ -273,13 +257,13 @@
]
}
}])
- layout.addProjectConfig(project_config)
+ self.layout.addProjectConfig(project_config)
- change = model.Change(project)
+ change = model.Change(self.project)
change.branch = 'master'
change.files = ['/COMMIT_MSG', 'ignored-file']
item = queue.enqueueChange(change)
- item.layout = layout
+ item.layout = self.layout
self.assertTrue(base.changeMatches(change))
self.assertFalse(python27.changeMatches(change))
@@ -288,28 +272,26 @@
self.assertEqual([], item.getJobs())
def test_job_source_project(self):
- tenant = self.tenant
- layout = self.layout
base_project = model.Project('base_project', self.source)
base_context = model.SourceContext(base_project, 'master',
'test', True)
tpc = model.TenantProjectConfig(base_project)
- tenant.addUntrustedProject(tpc)
+ self.tenant.addUntrustedProject(tpc)
- base = configloader.JobParser.fromYaml(tenant, layout, {
+ base = self.pcontext.job_parser.fromYaml({
'_source_context': base_context,
'_start_mark': self.start_mark,
'parent': None,
'name': 'base',
})
- layout.addJob(base)
+ self.layout.addJob(base)
other_project = model.Project('other_project', self.source)
other_context = model.SourceContext(other_project, 'master',
'test', True)
tpc = model.TenantProjectConfig(other_project)
- tenant.addUntrustedProject(tpc)
- base2 = configloader.JobParser.fromYaml(tenant, layout, {
+ self.tenant.addUntrustedProject(tpc)
+ base2 = self.pcontext.job_parser.fromYaml({
'_source_context': other_context,
'_start_mark': self.start_mark,
'name': 'base',
@@ -318,11 +300,11 @@
Exception,
"Job base in other_project is not permitted "
"to shadow job base in base_project"):
- layout.addJob(base2)
+ self.layout.addJob(base2)
def test_job_pipeline_allow_untrusted_secrets(self):
self.pipeline.post_review = False
- job = configloader.JobParser.fromYaml(self.tenant, self.layout, {
+ job = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'job',
@@ -332,11 +314,7 @@
self.layout.addJob(job)
- project_template_parser = configloader.ProjectTemplateParser(
- self.tenant, self.layout)
- project_parser = configloader.ProjectParser(
- self.tenant, self.layout, project_template_parser)
- project_config = project_parser.fromYaml(
+ project_config = self.pcontext.project_parser.fromYaml(
[{
'_source_context': self.context,
'_start_mark': self.start_mark,
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 573c8a6..f019ead 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -301,6 +301,106 @@
self.assertIn('Unable to modify final job', A.messages[0])
+class TestBranchDeletion(ZuulTestCase):
+ tenant_config_file = 'config/branch-deletion/main.yaml'
+
+ def test_branch_delete(self):
+ # This tests a tenant reconfiguration on deleting a branch
+ # *after* an earlier failed tenant reconfiguration. This
+ # ensures that cached data are appropriately removed, even if
+ # we are recovering from an invalid config.
+ self.create_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - nonexistent-job
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
+ files=file_dict)
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.delete_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchDeletedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - base
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='base', result='SUCCESS', changes='2,1')])
+
+ def test_branch_delete_full_reconfiguration(self):
+ # This tests a full configuration after deleting a branch
+ # *after* an earlier failed tenant reconfiguration. This
+ # ensures that cached data are appropriately removed, even if
+ # we are recovering from an invalid config.
+ self.create_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - nonexistent-job
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
+ files=file_dict)
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.delete_branch('org/project', 'stable/queens')
+ self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - base
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='base', result='SUCCESS', changes='2,1')])
+
+
class TestBranchTag(ZuulTestCase):
tenant_config_file = 'config/branch-tag/main.yaml'
@@ -1933,6 +2033,8 @@
tenant_config_file = 'config/ansible/main.yaml'
def test_playbook(self):
+ # This test runs a bit long and needs extra time.
+ self.wait_timeout = 120
# Keep the jobdir around so we can inspect contents if an
# assert fails.
self.executor_server.keep_jobdir = True
@@ -1948,6 +2050,12 @@
build_timeout = self.getJobFromHistory('timeout')
with self.jobLog(build_timeout):
self.assertEqual(build_timeout.result, 'TIMED_OUT')
+ post_flag_path = os.path.join(self.test_root, build_timeout.uuid +
+ '.post.flag')
+ self.assertTrue(os.path.exists(post_flag_path))
+ build_post_timeout = self.getJobFromHistory('post-timeout')
+ with self.jobLog(build_post_timeout):
+ self.assertEqual(build_post_timeout.result, 'POST_FAILURE')
build_faillocal = self.getJobFromHistory('faillocal')
with self.jobLog(build_faillocal):
self.assertEqual(build_faillocal.result, 'FAILURE')
diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py
index b5ebe9f..602209f 100644
--- a/tests/unit/test_web.py
+++ b/tests/unit/test_web.py
@@ -22,20 +22,30 @@
import urllib
import time
import socket
-from unittest import skip
-
-import webob
import zuul.web
from tests.base import ZuulTestCase, FIXTURE_DIR
-class TestWeb(ZuulTestCase):
+class FakeConfig(object):
+
+ def __init__(self, config):
+ self.config = config or {}
+
+ def has_option(self, section, option):
+ return option in self.config.get(section, {})
+
+ def get(self, section, option):
+ return self.config.get(section, {}).get(option)
+
+
+class BaseTestWeb(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
+ config_ini_data = {}
def setUp(self):
- super(TestWeb, self).setUp()
+ super(BaseTestWeb, self).setUp()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
@@ -45,10 +55,13 @@
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
+ self.zuul_ini_config = FakeConfig(self.config_ini_data)
# Start the web server
self.web = zuul.web.ZuulWeb(
listen_address='127.0.0.1', listen_port=0,
- gear_server='127.0.0.1', gear_port=self.gearman_server.port)
+ gear_server='127.0.0.1', gear_port=self.gearman_server.port,
+ info=zuul.model.WebInfo.fromConfig(self.zuul_ini_config)
+ )
loop = asyncio.new_event_loop()
loop.set_debug(True)
ws_thread = threading.Thread(target=self.web.run, args=(loop,))
@@ -75,7 +88,10 @@
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
- super(TestWeb, self).tearDown()
+ super(BaseTestWeb, self).tearDown()
+
+
+class TestWeb(BaseTestWeb):
def test_web_status(self):
"Test that we can retrieve JSON status info"
@@ -89,7 +105,7 @@
self.waitUntilSettled()
req = urllib.request.Request(
- "http://localhost:%s/tenant-one/status.json" % self.port)
+ "http://localhost:%s/tenant-one/status" % self.port)
f = urllib.request.urlopen(req)
headers = f.info()
self.assertIn('Content-Length', headers)
@@ -184,7 +200,6 @@
"http://localhost:%s/status/foo" % self.port)
self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, req)
- @skip("This is not supported by zuul-web")
def test_web_find_change(self):
# can we filter by change id
req = urllib.request.Request(
@@ -213,24 +228,84 @@
f = urllib.request.urlopen(req)
self.assertEqual(f.read(), public_pem)
- @skip("This may not apply to zuul-web")
- def test_web_custom_handler(self):
- def custom_handler(path, tenant_name, request):
- return webob.Response(body='ok')
-
- self.webapp.register_path('/custom', custom_handler)
- req = urllib.request.Request(
- "http://localhost:%s/custom" % self.port)
- f = urllib.request.urlopen(req)
- self.assertEqual(b'ok', f.read())
-
- self.webapp.unregister_path('/custom')
- self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, req)
-
- @skip("This returns a 500")
def test_web_404_on_unknown_tenant(self):
req = urllib.request.Request(
- "http://localhost:{}/non-tenant/status.json".format(self.port))
+ "http://localhost:{}/non-tenant/status".format(self.port))
e = self.assertRaises(
urllib.error.HTTPError, urllib.request.urlopen, req)
self.assertEqual(404, e.code)
+
+
+class TestInfo(BaseTestWeb):
+
+ def setUp(self):
+ super(TestInfo, self).setUp()
+ web_config = self.config_ini_data.get('web', {})
+ self.websocket_url = web_config.get('websocket_url')
+ self.stats_url = web_config.get('stats_url')
+ statsd_config = self.config_ini_data.get('statsd', {})
+ self.stats_prefix = statsd_config.get('prefix')
+
+ def test_info(self):
+ req = urllib.request.Request(
+ "http://localhost:%s/info" % self.port)
+ f = urllib.request.urlopen(req)
+ info = json.loads(f.read().decode('utf8'))
+ self.assertEqual(
+ info, {
+ "info": {
+ "endpoint": "http://localhost:%s" % self.port,
+ "capabilities": {
+ "job_history": False
+ },
+ "stats": {
+ "url": self.stats_url,
+ "prefix": self.stats_prefix,
+ "type": "graphite",
+ },
+ "websocket_url": self.websocket_url,
+ }
+ })
+
+ def test_tenant_info(self):
+ req = urllib.request.Request(
+ "http://localhost:%s/tenant-one/info" % self.port)
+ f = urllib.request.urlopen(req)
+ info = json.loads(f.read().decode('utf8'))
+ self.assertEqual(
+ info, {
+ "info": {
+ "endpoint": "http://localhost:%s" % self.port,
+ "tenant": "tenant-one",
+ "capabilities": {
+ "job_history": False
+ },
+ "stats": {
+ "url": self.stats_url,
+ "prefix": self.stats_prefix,
+ "type": "graphite",
+ },
+ "websocket_url": self.websocket_url,
+ }
+ })
+
+
+class TestWebSocketInfo(TestInfo):
+
+ config_ini_data = {
+ 'web': {
+ 'websocket_url': 'wss://ws.example.com'
+ }
+ }
+
+
+class TestGraphiteUrl(TestInfo):
+
+ config_ini_data = {
+ 'statsd': {
+ 'prefix': 'example'
+ },
+ 'web': {
+ 'stats_url': 'https://graphite.example.com',
+ }
+ }
diff --git a/tools/nodepool-integration-setup.sh b/tools/nodepool-integration-setup.sh
deleted file mode 100755
index 58c39cf..0000000
--- a/tools/nodepool-integration-setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash -xe
-
-/usr/zuul-env/bin/zuul-cloner --workspace /tmp --cache-dir /opt/git \
- git://git.openstack.org openstack-infra/nodepool
-
-ln -s /tmp/nodepool/log $HOME/logs
-
-cd /tmp/openstack-infra/nodepool
-/usr/local/jenkins/slave_scripts/install-distro-packages.sh
-sudo pip install .
-
-bash -xe ./tools/zuul-nodepool-integration/start.sh
diff --git a/tools/zuul-changes.py b/tools/zuul-changes.py
index d258354..cdedf51 100755
--- a/tools/zuul-changes.py
+++ b/tools/zuul-changes.py
@@ -24,7 +24,7 @@
parser.add_argument('pipeline', help='The name of the Zuul pipeline')
options = parser.parse_args()
-data = urllib2.urlopen('%s/status.json' % options.url).read()
+data = urllib2.urlopen('%s/status' % options.url).read()
data = json.loads(data)
for pipeline in data['pipelines']:
diff --git a/zuul/cmd/web.py b/zuul/cmd/web.py
index abdb1cb..8b0e3ee 100755
--- a/zuul/cmd/web.py
+++ b/zuul/cmd/web.py
@@ -20,6 +20,7 @@
import threading
import zuul.cmd
+import zuul.model
import zuul.web
from zuul.lib.config import get_default
@@ -33,8 +34,11 @@
self.web.stop()
def _run(self):
+ info = zuul.model.WebInfo.fromConfig(self.config)
+
params = dict()
+ params['info'] = info
params['listen_address'] = get_default(self.config,
'web', 'listen_address',
'127.0.0.1')
diff --git a/zuul/configloader.py b/zuul/configloader.py
index bd2ce3a..3511f96 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -365,11 +365,12 @@
schema = vs.Schema(pragma)
- def __init__(self):
+ def __init__(self, pcontext):
self.log = logging.getLogger("zuul.PragmaParser")
+ self.pcontext = pcontext
def fromYaml(self, conf):
- with configuration_exceptions('project-template', conf):
+ with configuration_exceptions('pragma', conf):
self.schema(conf)
bm = conf.get('implied-branch-matchers')
@@ -384,8 +385,13 @@
class NodeSetParser(object):
- @staticmethod
- def getSchema(anonymous=False):
+ def __init__(self, pcontext):
+ self.log = logging.getLogger("zuul.NodeSetParser")
+ self.pcontext = pcontext
+ self.schema = self.getSchema(False)
+ self.anon_schema = self.getSchema(True)
+
+ def getSchema(self, anonymous=False):
node = {vs.Required('name'): to_list(str),
vs.Required('label'): str,
}
@@ -404,9 +410,11 @@
nodeset[vs.Required('name')] = str
return vs.Schema(nodeset)
- @staticmethod
- def fromYaml(conf, anonymous=False):
- NodeSetParser.getSchema(anonymous)(conf)
+ def fromYaml(self, conf, anonymous=False):
+ if anonymous:
+ self.anon_schema(conf)
+ else:
+ self.schema(conf)
ns = model.NodeSet(conf.get('name'), conf.get('_source_context'))
node_names = set()
group_names = set()
@@ -432,8 +440,12 @@
class SecretParser(object):
- @staticmethod
- def getSchema():
+ def __init__(self, pcontext):
+ self.log = logging.getLogger("zuul.SecretParser")
+ self.pcontext = pcontext
+ self.schema = self.getSchema()
+
+ def getSchema(self):
data = {str: vs.Any(str, EncryptedPKCS1_OAEP)}
secret = {vs.Required('name'): str,
@@ -444,10 +456,9 @@
return vs.Schema(secret)
- @staticmethod
- def fromYaml(layout, conf):
+ def fromYaml(self, conf):
with configuration_exceptions('secret', conf):
- SecretParser.getSchema()(conf)
+ self.schema(conf)
s = model.Secret(conf['name'], conf['_source_context'])
s.secret_data = conf['data']
return s
@@ -491,6 +502,7 @@
# validation happens in NodeSetParser
'nodeset': vs.Any(dict, str),
'timeout': int,
+ 'post-timeout': int,
'attempts': int,
'pre-run': to_list(str),
'post-run': to_list(str),
@@ -500,6 +512,8 @@
'roles': to_list(role),
'required-projects': to_list(vs.Any(job_project, str)),
'vars': dict,
+ 'host_vars': {str: dict},
+ 'group_vars': {str: dict},
'dependencies': to_list(str),
'allowed-projects': to_list(str),
'override-branch': str,
@@ -518,6 +532,7 @@
'abstract',
'protected',
'timeout',
+ 'post-timeout',
'workspace',
'voting',
'hold-following-changes',
@@ -531,8 +546,11 @@
'override-checkout',
]
- @staticmethod
- def _getImpliedBranches(tenant, job):
+ def __init__(self, pcontext):
+ self.log = logging.getLogger("zuul.JobParser")
+ self.pcontext = pcontext
+
+ def _getImpliedBranches(self, job):
# If the user has set a pragma directive for this, use the
# value (if unset, the value is None).
if job.source_context.implied_branch_matchers is True:
@@ -549,7 +567,8 @@
# If this project only has one branch, don't create implied
# branch matchers. This way central job repos can work.
- branches = tenant.getProjectBranches(job.source_context.project)
+ branches = self.pcontext.tenant.getProjectBranches(
+ job.source_context.project)
if len(branches) == 1:
return None
@@ -557,12 +576,11 @@
return job.source_context.implied_branches
return [job.source_context.branch]
- @staticmethod
- def fromYaml(tenant, layout, conf, project_pipeline=False,
- name=None, validate=True):
+ def fromYaml(self, conf, project_pipeline=False, name=None,
+ validate=True):
if validate:
with configuration_exceptions('job', conf):
- JobParser.schema(conf)
+ self.schema(conf)
if name is None:
name = conf['name']
@@ -596,14 +614,16 @@
for secret_config in as_list(conf.get('secrets', [])):
if isinstance(secret_config, str):
secret_name = secret_config
- secret = layout.secrets.get(secret_name)
+ secret = self.pcontext.layout.secrets.get(secret_name)
else:
secret_name = secret_config['name']
- secret = layout.secrets.get(secret_config['secret'])
+ secret = self.pcontext.layout.secrets.get(
+ secret_config['secret'])
if secret is None:
raise SecretNotFoundError(secret_name)
- if secret_name == 'zuul':
- raise Exception("Secrets named 'zuul' are not allowed.")
+ if secret_name == 'zuul' or secret_name == 'nodepool':
+ raise Exception("Secrets named 'zuul' or 'nodepool' "
+ "are not allowed.")
if not secret.source_context.isSameProject(job.source_context):
raise Exception(
"Unable to use secret %s. Secrets must be "
@@ -623,9 +643,15 @@
if secrets and not conf['_source_context'].trusted:
job.post_review = True
- if conf.get('timeout') and tenant.max_job_timeout != -1 and \
- int(conf['timeout']) > tenant.max_job_timeout:
- raise MaxTimeoutError(job, tenant)
+ if (conf.get('timeout') and
+ self.pcontext.tenant.max_job_timeout != -1 and
+ int(conf['timeout']) > self.pcontext.tenant.max_job_timeout):
+ raise MaxTimeoutError(job, self.pcontext.tenant)
+
+ if (conf.get('post-timeout') and
+ self.pcontext.tenant.max_job_timeout != -1 and
+ int(conf['post-timeout']) > self.pcontext.tenant.max_job_timeout):
+ raise MaxTimeoutError(job, self.pcontext.tenant)
if 'post-review' in conf:
if conf['post-review']:
@@ -640,13 +666,13 @@
if 'roles' in conf:
for role in conf.get('roles', []):
if 'zuul' in role:
- r = JobParser._makeZuulRole(tenant, job, role)
+ r = self._makeZuulRole(job, role)
if r:
roles.append(r)
# A job's repo should be an implicit role source for that job,
# but not in a project-pipeline variant.
if not project_pipeline:
- r = JobParser._makeImplicitRole(job)
+ r = self._makeImplicitRole(job)
roles.insert(0, r)
job.addRoles(roles)
@@ -668,7 +694,7 @@
job.roles, secrets)
job.run = (run,)
- for k in JobParser.simple_attributes:
+ for k in self.simple_attributes:
a = k.replace('-', '_')
if k in conf:
setattr(job, a, conf[k])
@@ -676,14 +702,15 @@
conf_nodeset = conf['nodeset']
if isinstance(conf_nodeset, str):
# This references an existing named nodeset in the layout.
- ns = layout.nodesets.get(conf_nodeset)
+ ns = self.pcontext.layout.nodesets.get(conf_nodeset)
if ns is None:
raise NodesetNotFoundError(conf_nodeset)
else:
- ns = NodeSetParser.fromYaml(conf_nodeset, anonymous=True)
- if tenant.max_nodes_per_job != -1 and \
- len(ns) > tenant.max_nodes_per_job:
- raise MaxNodeError(job, tenant)
+ ns = self.pcontext.nodeset_parser.fromYaml(
+ conf_nodeset, anonymous=True)
+ if self.pcontext.tenant.max_nodes_per_job != -1 and \
+ len(ns) > self.pcontext.tenant.max_nodes_per_job:
+ raise MaxNodeError(job, self.pcontext.tenant)
job.nodeset = ns
if 'required-projects' in conf:
@@ -699,7 +726,8 @@
project_name = project
project_override_branch = None
project_override_checkout = None
- (trusted, project) = tenant.getProject(project_name)
+ (trusted, project) = self.pcontext.tenant.getProject(
+ project_name)
if project is None:
raise Exception("Unknown project %s" % (project_name,))
job_project = model.JobProject(project.canonical_name,
@@ -716,15 +744,30 @@
variables = conf.get('vars', None)
if variables:
- if 'zuul' in variables:
- raise Exception("Variables named 'zuul' are not allowed.")
+ if 'zuul' in variables or 'nodepool' in variables:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
job.variables = variables
+ host_variables = conf.get('host_vars', None)
+ if host_variables:
+ for host, hvars in host_variables.items():
+ if 'zuul' in hvars or 'nodepool' in hvars:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
+ job.host_variables = host_variables
+ group_variables = conf.get('group_vars', None)
+ if group_variables:
+ for group, gvars in group_variables.items():
+ if 'zuul' in group_variables or 'nodepool' in gvars:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
+ job.group_variables = group_variables
allowed_projects = conf.get('allowed-projects', None)
if allowed_projects:
allowed = []
for p in as_list(allowed_projects):
- (trusted, project) = tenant.getProject(p)
+ (trusted, project) = self.pcontext.tenant.getProject(p)
if project is None:
raise Exception("Unknown project %s" % (p,))
allowed.append(project.name)
@@ -732,7 +775,7 @@
branches = None
if ('branches' not in conf):
- branches = JobParser._getImpliedBranches(tenant, job)
+ branches = self._getImpliedBranches(job)
if (not branches) and ('branches' in conf):
branches = as_list(conf['branches'])
if branches:
@@ -750,11 +793,10 @@
matchers)
return job
- @staticmethod
- def _makeZuulRole(tenant, job, role):
+ def _makeZuulRole(self, job, role):
name = role['zuul'].split('/')[-1]
- (trusted, project) = tenant.getProject(role['zuul'])
+ (trusted, project) = self.pcontext.tenant.getProject(role['zuul'])
if project is None:
return None
@@ -762,8 +804,7 @@
project.connection_name,
project.name)
- @staticmethod
- def _makeImplicitRole(job):
+ def _makeImplicitRole(self, job):
project = job.source_context.project
name = project.name.split('/')[-1]
name = JobParser.ANSIBLE_ROLE_RE.sub('', name)
@@ -774,10 +815,9 @@
class ProjectTemplateParser(object):
- def __init__(self, tenant, layout):
+ def __init__(self, pcontext):
self.log = logging.getLogger("zuul.ProjectTemplateParser")
- self.tenant = tenant
- self.layout = layout
+ self.pcontext = pcontext
self.schema = self.getSchema()
def getSchema(self):
@@ -799,7 +839,7 @@
'jobs': job_list,
}
- for p in self.layout.pipelines.values():
+ for p in self.pcontext.layout.pipelines.values():
project_template[p.name] = pipeline_contents
return vs.Schema(project_template)
@@ -810,7 +850,7 @@
source_context = conf['_source_context']
project_template = model.ProjectConfig(conf['name'], source_context)
start_mark = conf['_start_mark']
- for pipeline in self.layout.pipelines.values():
+ for pipeline in self.pcontext.layout.pipelines.values():
conf_pipeline = conf.get(pipeline.name)
if not conf_pipeline:
continue
@@ -839,19 +879,17 @@
# validate that the job is existing
with configuration_exceptions('project or project-template',
attrs):
- self.layout.getJob(jobname)
+ self.pcontext.layout.getJob(jobname)
- job_list.addJob(JobParser.fromYaml(self.tenant, self.layout,
- attrs, project_pipeline=True,
- name=jobname, validate=False))
+ job_list.addJob(self.pcontext.job_parser.fromYaml(
+ attrs, project_pipeline=True,
+ name=jobname, validate=False))
class ProjectParser(object):
- def __init__(self, tenant, layout, project_template_parser):
+ def __init__(self, pcontext):
self.log = logging.getLogger("zuul.ProjectParser")
- self.tenant = tenant
- self.layout = layout
- self.project_template_parser = project_template_parser
+ self.pcontext = pcontext
self.schema = self.getSchema()
def getSchema(self):
@@ -874,7 +912,7 @@
'jobs': job_list
}
- for p in self.layout.pipelines.values():
+ for p in self.pcontext.layout.pipelines.values():
project[p.name] = pipeline_contents
return vs.Schema(project)
@@ -885,7 +923,7 @@
with configuration_exceptions('project', conf_list[0]):
project_name = conf_list[0]['name']
- (trusted, project) = self.tenant.getProject(project_name)
+ (trusted, project) = self.pcontext.tenant.getProject(project_name)
if project is None:
raise ProjectNotFoundError(project_name)
project_config = model.ProjectConfig(project.canonical_name)
@@ -903,16 +941,16 @@
# parsing the definition as a template, then applying
# all of the templates, including the newly parsed
# one, in order.
- project_template = self.project_template_parser.fromYaml(
- conf, validate=False)
+ project_template = self.pcontext.project_template_parser.\
+ fromYaml(conf, validate=False)
# If this project definition is in a place where it
# should get implied branch matchers, set it.
if (not conf['_source_context'].trusted):
implied_branch = conf['_source_context'].branch
for name in conf_templates:
- if name not in self.layout.project_templates:
+ if name not in self.pcontext.layout.project_templates:
raise TemplateNotFoundError(name)
- configs.extend([(self.layout.project_templates[name],
+ configs.extend([(self.pcontext.layout.project_templates[name],
implied_branch)
for name in conf_templates])
configs.append((project_template, implied_branch))
@@ -930,7 +968,7 @@
project_config.merge_mode = model.MERGER_MAP['merge-resolve']
if project_config.default_branch is None:
project_config.default_branch = 'master'
- for pipeline in self.layout.pipelines.values():
+ for pipeline in self.pcontext.layout.pipelines.values():
project_pipeline = model.ProjectPipelineConfig()
queue_name = None
debug = False
@@ -958,8 +996,6 @@
class PipelineParser(object):
- log = logging.getLogger("zuul.PipelineParser")
-
# A set of reporter configuration keys to action mapping
reporter_actions = {
'start': 'start_actions',
@@ -969,8 +1005,12 @@
'disabled': 'disabled_actions',
}
- @staticmethod
- def getDriverSchema(dtype, connections):
+ def __init__(self, pcontext):
+ self.log = logging.getLogger("zuul.PipelineParser")
+ self.pcontext = pcontext
+ self.schema = self.getSchema()
+
+ def getDriverSchema(self, dtype):
methods = {
'trigger': 'getTriggerSchema',
'reporter': 'getReporterSchema',
@@ -980,15 +1020,15 @@
schema = {}
# Add the configured connections as available layout options
- for connection_name, connection in connections.connections.items():
+ for connection_name, connection in \
+ self.pcontext.connections.connections.items():
method = getattr(connection.driver, methods[dtype], None)
if method:
schema[connection_name] = to_list(method())
return schema
- @staticmethod
- def getSchema(layout, connections):
+ def getSchema(self):
manager = vs.Any('independent',
'dependent')
@@ -1021,23 +1061,18 @@
'_source_context': model.SourceContext,
'_start_mark': ZuulMark,
}
- pipeline['require'] = PipelineParser.getDriverSchema('require',
- connections)
- pipeline['reject'] = PipelineParser.getDriverSchema('reject',
- connections)
- pipeline['trigger'] = vs.Required(
- PipelineParser.getDriverSchema('trigger', connections))
+ pipeline['require'] = self.getDriverSchema('require')
+ pipeline['reject'] = self.getDriverSchema('reject')
+ pipeline['trigger'] = vs.Required(self.getDriverSchema('trigger'))
for action in ['start', 'success', 'failure', 'merge-failure',
'disabled']:
- pipeline[action] = PipelineParser.getDriverSchema('reporter',
- connections)
+ pipeline[action] = self.getDriverSchema('reporter')
return vs.Schema(pipeline)
- @staticmethod
- def fromYaml(layout, connections, scheduler, conf):
+ def fromYaml(self, conf):
with configuration_exceptions('pipeline', conf):
- PipelineParser.getSchema(layout, connections)(conf)
- pipeline = model.Pipeline(conf['name'], layout)
+ self.schema(conf)
+ pipeline = model.Pipeline(conf['name'], self.pcontext.layout)
pipeline.description = conf.get('description')
precedence = model.PRECEDENCE_MAP[conf.get('precedence')]
@@ -1062,13 +1097,13 @@
pipeline.post_review = conf.get(
'post-review', False)
- for conf_key, action in PipelineParser.reporter_actions.items():
+ for conf_key, action in self.reporter_actions.items():
reporter_set = []
if conf.get(conf_key):
for reporter_name, params \
in conf.get(conf_key).items():
- reporter = connections.getReporter(reporter_name,
- params)
+ reporter = self.pcontext.connections.getReporter(
+ reporter_name, params)
reporter.setAction(conf_key)
reporter_set.append(reporter)
setattr(pipeline, action, reporter_set)
@@ -1094,26 +1129,27 @@
manager_name = conf['manager']
if manager_name == 'dependent':
manager = zuul.manager.dependent.DependentPipelineManager(
- scheduler, pipeline)
+ self.pcontext.scheduler, pipeline)
elif manager_name == 'independent':
manager = zuul.manager.independent.IndependentPipelineManager(
- scheduler, pipeline)
+ self.pcontext.scheduler, pipeline)
pipeline.setManager(manager)
- layout.pipelines[conf['name']] = pipeline
+ self.pcontext.layout.pipelines[conf['name']] = pipeline
for source_name, require_config in conf.get('require', {}).items():
- source = connections.getSource(source_name)
+ source = self.pcontext.connections.getSource(source_name)
manager.ref_filters.extend(
source.getRequireFilters(require_config))
for source_name, reject_config in conf.get('reject', {}).items():
- source = connections.getSource(source_name)
+ source = self.pcontext.connections.getSource(source_name)
manager.ref_filters.extend(
source.getRejectFilters(reject_config))
for trigger_name, trigger_config in conf.get('trigger').items():
- trigger = connections.getTrigger(trigger_name, trigger_config)
+ trigger = self.pcontext.connections.getTrigger(
+ trigger_name, trigger_config)
pipeline.triggers.append(trigger)
manager.event_filters.extend(
trigger.getEventFilters(conf['trigger'][trigger_name]))
@@ -1122,8 +1158,12 @@
class SemaphoreParser(object):
- @staticmethod
- def getSchema():
+ def __init__(self, pcontext):
+ self.log = logging.getLogger("zuul.SemaphoreParser")
+ self.pcontext = pcontext
+ self.schema = self.getSchema()
+
+ def getSchema(self):
semaphore = {vs.Required('name'): str,
'max': int,
'_source_context': model.SourceContext,
@@ -1132,16 +1172,43 @@
return vs.Schema(semaphore)
- @staticmethod
- def fromYaml(conf):
- SemaphoreParser.getSchema()(conf)
+ def fromYaml(self, conf):
+ self.schema(conf)
semaphore = model.Semaphore(conf['name'], conf.get('max', 1))
semaphore.source_context = conf.get('_source_context')
return semaphore
+class ParseContext(object):
+ """Hold information about a particular run of the parser"""
+
+ def __init__(self, connections, scheduler, tenant, layout):
+ self.connections = connections
+ self.scheduler = scheduler
+ self.tenant = tenant
+ self.layout = layout
+ self.pragma_parser = PragmaParser(self)
+ self.pipeline_parser = PipelineParser(self)
+ self.nodeset_parser = NodeSetParser(self)
+ self.secret_parser = SecretParser(self)
+ self.job_parser = JobParser(self)
+ self.semaphore_parser = SemaphoreParser(self)
+ self.project_template_parser = None
+ self.project_parser = None
+
+ def setPipelines(self):
+ # Call after pipelines are fixed in the layout to construct
+ # the project parser, which relies on them.
+ self.project_template_parser = ProjectTemplateParser(self)
+ self.project_parser = ProjectParser(self)
+
+
class TenantParser(object):
- log = logging.getLogger("zuul.TenantParser")
+ def __init__(self, connections, scheduler, merger):
+ self.log = logging.getLogger("zuul.TenantParser")
+ self.connections = connections
+ self.scheduler = scheduler
+ self.merger = merger
classes = vs.Any('pipeline', 'job', 'semaphore', 'project',
'project-template', 'nodeset', 'secret')
@@ -1168,36 +1235,31 @@
'untrusted-projects': to_list(project_or_group),
})
- @staticmethod
- def validateTenantSources(connections):
+ def validateTenantSources(self):
def v(value, path=[]):
if isinstance(value, dict):
for k, val in value.items():
- connections.getSource(k)
- TenantParser.validateTenantSource(val, path + [k])
+ self.connections.getSource(k)
+ self.validateTenantSource(val, path + [k])
else:
raise vs.Invalid("Invalid tenant source", path)
return v
- @staticmethod
- def validateTenantSource(value, path=[]):
- TenantParser.tenant_source(value)
+ def validateTenantSource(self, value, path=[]):
+ self.tenant_source(value)
- @staticmethod
- def getSchema(connections=None):
+ def getSchema(self):
tenant = {vs.Required('name'): str,
'max-nodes-per-job': int,
'max-job-timeout': int,
- 'source': TenantParser.validateTenantSources(connections),
+ 'source': self.validateTenantSources(),
'exclude-unprotected-branches': bool,
'default-parent': str,
}
return vs.Schema(tenant)
- @staticmethod
- def fromYaml(base, project_key_dir, connections, scheduler, merger, conf,
- old_tenant):
- TenantParser.getSchema(connections)(conf)
+ def fromYaml(self, base, project_key_dir, conf, old_tenant):
+ self.getSchema()(conf)
tenant = model.Tenant(conf['name'])
if conf.get('max-nodes-per-job') is not None:
tenant.max_nodes_per_job = conf['max-nodes-per-job']
@@ -1212,48 +1274,41 @@
unparsed_config = model.UnparsedTenantConfig()
# tpcs is TenantProjectConfigs
config_tpcs, untrusted_tpcs = \
- TenantParser._loadTenantProjects(
- project_key_dir, connections, conf)
+ self._loadTenantProjects(project_key_dir, conf)
for tpc in config_tpcs:
tenant.addConfigProject(tpc)
for tpc in untrusted_tpcs:
tenant.addUntrustedProject(tpc)
for tpc in config_tpcs + untrusted_tpcs:
- TenantParser._getProjectBranches(tenant, tpc, old_tenant)
- TenantParser._resolveShadowProjects(tenant, tpc)
+ self._getProjectBranches(tenant, tpc, old_tenant)
+ self._resolveShadowProjects(tenant, tpc)
if old_tenant:
cached = True
else:
cached = False
tenant.config_projects_config, tenant.untrusted_projects_config = \
- TenantParser._loadTenantInRepoLayouts(merger, connections,
- tenant.config_projects,
- tenant.untrusted_projects,
- cached, tenant)
- unparsed_config.extend(tenant.config_projects_config, tenant)
- unparsed_config.extend(tenant.untrusted_projects_config, tenant)
- tenant.layout = TenantParser._parseLayout(base, tenant,
- unparsed_config,
- scheduler,
- connections)
+ self._loadTenantInRepoLayouts(tenant.config_projects,
+ tenant.untrusted_projects,
+ cached, tenant)
+ unparsed_config.extend(tenant.config_projects_config)
+ unparsed_config.extend(tenant.untrusted_projects_config)
+ tenant.layout = self._parseLayout(base, tenant, unparsed_config)
return tenant
- @staticmethod
- def _resolveShadowProjects(tenant, tpc):
+ def _resolveShadowProjects(self, tenant, tpc):
shadow_projects = []
for sp in tpc.shadow_projects:
shadow_projects.append(tenant.getProject(sp)[1])
tpc.shadow_projects = frozenset(shadow_projects)
- @staticmethod
- def _getProjectBranches(tenant, tpc, old_tenant):
+ def _getProjectBranches(self, tenant, tpc, old_tenant):
# If we're performing a tenant reconfiguration, we will have
# an old_tenant object, however, we may be doing so because of
# a branch creation event, so if we don't have any cached
# data, query the branches again as well.
- if old_tenant and tpc.project.unparsed_config:
+ if old_tenant and tpc.project.unparsed_branch_config:
branches = old_tenant.getProjectBranches(tpc.project)[:]
else:
branches = sorted(tpc.project.source.getProjectBranches(
@@ -1263,17 +1318,15 @@
branches = ['master'] + branches
tpc.branches = branches
- @staticmethod
- def _loadProjectKeys(project_key_dir, connection_name, project):
+ def _loadProjectKeys(self, project_key_dir, connection_name, project):
project.private_key_file = (
os.path.join(project_key_dir, connection_name,
project.name + '.pem'))
- TenantParser._generateKeys(project)
- TenantParser._loadKeys(project)
+ self._generateKeys(project)
+ self._loadKeys(project)
- @staticmethod
- def _generateKeys(project):
+ def _generateKeys(self, project):
if os.path.isfile(project.private_key_file):
return
@@ -1281,7 +1334,7 @@
if not os.path.isdir(key_dir):
os.makedirs(key_dir, 0o700)
- TenantParser.log.info(
+ self.log.info(
"Generating RSA keypair for project %s" % (project.name,)
)
private_key, public_key = encryption.generate_rsa_keypair()
@@ -1289,7 +1342,7 @@
# Dump keys to filesystem. We only save the private key
# because the public key can be constructed from it.
- TenantParser.log.info(
+ self.log.info(
"Saving RSA keypair for project %s to %s" % (
project.name, project.private_key_file)
)
@@ -1344,14 +1397,12 @@
return tenant_project_config
- @staticmethod
- def _getProjects(source, conf, current_include):
+ def _getProjects(self, source, conf, current_include):
# Return a project object whether conf is a dict or a str
projects = []
if isinstance(conf, str):
# A simple project name string
- projects.append(TenantParser._getProject(
- source, conf, current_include))
+ projects.append(self._getProject(source, conf, current_include))
elif len(conf.keys()) > 1 and 'projects' in conf:
# This is a project group
if 'include' in conf:
@@ -1362,19 +1413,18 @@
exclude = set(as_list(conf['exclude']))
current_include = current_include - exclude
for project in conf['projects']:
- sub_projects = TenantParser._getProjects(
+ sub_projects = self._getProjects(
source, project, current_include)
projects.extend(sub_projects)
elif len(conf.keys()) == 1:
# A project with overrides
- projects.append(TenantParser._getProject(
+ projects.append(self._getProject(
source, conf, current_include))
else:
raise Exception("Unable to parse project %s", conf)
return projects
- @staticmethod
- def _loadTenantProjects(project_key_dir, connections, conf_tenant):
+ def _loadTenantProjects(self, project_key_dir, conf_tenant):
config_projects = []
untrusted_projects = []
@@ -1382,38 +1432,32 @@
'secret', 'project-template', 'nodeset'])
for source_name, conf_source in conf_tenant.get('source', {}).items():
- source = connections.getSource(source_name)
+ source = self.connections.getSource(source_name)
current_include = default_include
for conf_repo in conf_source.get('config-projects', []):
# tpcs = TenantProjectConfigs
- tpcs = TenantParser._getProjects(source, conf_repo,
- current_include)
+ tpcs = self._getProjects(source, conf_repo, current_include)
for tpc in tpcs:
- TenantParser._loadProjectKeys(
+ self._loadProjectKeys(
project_key_dir, source_name, tpc.project)
config_projects.append(tpc)
current_include = frozenset(default_include - set(['pipeline']))
for conf_repo in conf_source.get('untrusted-projects', []):
- tpcs = TenantParser._getProjects(source, conf_repo,
- current_include)
+ tpcs = self._getProjects(source, conf_repo,
+ current_include)
for tpc in tpcs:
- TenantParser._loadProjectKeys(
+ self._loadProjectKeys(
project_key_dir, source_name, tpc.project)
untrusted_projects.append(tpc)
return config_projects, untrusted_projects
- @staticmethod
- def _loadTenantInRepoLayouts(merger, connections, config_projects,
- untrusted_projects, cached, tenant):
+ def _loadTenantInRepoLayouts(self, config_projects, untrusted_projects,
+ cached, tenant):
config_projects_config = model.UnparsedTenantConfig()
untrusted_projects_config = model.UnparsedTenantConfig()
- # project -> config; these will replace
- # project.unparsed_config if this method succesfully
- # completes
- new_project_unparsed_config = {}
# project -> branch -> config; these will replace
# project.unparsed_branch_config if this method succesfully
# completes
@@ -1426,21 +1470,28 @@
# data and is inserted in the ordered jobs list for later
# processing.
class CachedDataJob(object):
- def __init__(self, config_project, project):
+ def __init__(self, config_project, project, branch):
self.config_project = config_project
self.project = project
+ self.branch = branch
for project in config_projects:
# If we have cached data (this is a reconfiguration) use it.
- if cached and project.unparsed_config:
- jobs.append(CachedDataJob(True, project))
+ if cached and project.unparsed_branch_config:
+ # Note: this should only be one branch (master), as
+ # that's all we will initially load below in the
+ # un-cached case.
+ for branch in project.unparsed_branch_config.keys():
+ jobs.append(CachedDataJob(True, project, branch))
continue
# Otherwise, prepare an empty unparsed config object to
# hold cached data later.
- new_project_unparsed_config[project] = model.UnparsedTenantConfig()
+ new_project_unparsed_branch_config[project] = {}
+ new_project_unparsed_branch_config[project]['master'] = \
+ model.UnparsedTenantConfig()
# Get main config files. These files are permitted the
# full range of configuration.
- job = merger.getFiles(
+ job = self.merger.getFiles(
project.source.connection.connection_name,
project.name, 'master',
files=['zuul.yaml', '.zuul.yaml'],
@@ -1456,12 +1507,12 @@
if not tpc.load_classes:
continue
# If we have cached data (this is a reconfiguration) use it.
- if cached and project.unparsed_config:
- jobs.append(CachedDataJob(False, project))
+ if cached and project.unparsed_branch_config:
+ for branch in project.unparsed_branch_config.keys():
+ jobs.append(CachedDataJob(False, project, branch))
continue
# Otherwise, prepare an empty unparsed config object to
# hold cached data later.
- new_project_unparsed_config[project] = model.UnparsedTenantConfig()
new_project_unparsed_branch_config[project] = {}
# Get in-project-repo config files which have a restricted
# set of options.
@@ -1473,7 +1524,7 @@
for branch in branches:
new_project_unparsed_branch_config[project][branch] = \
model.UnparsedTenantConfig()
- job = merger.getFiles(
+ job = self.merger.getFiles(
project.source.connection.connection_name,
project.name, branch,
files=['zuul.yaml', '.zuul.yaml'],
@@ -1488,22 +1539,22 @@
# same order they were defined in the main config file.
# This is important for correct inheritance.
if isinstance(job, CachedDataJob):
- TenantParser.log.info(
+ self.log.info(
"Loading previously parsed configuration from %s" %
(job.project,))
if job.config_project:
config_projects_config.extend(
- job.project.unparsed_config, tenant)
+ job.project.unparsed_branch_config[job.branch])
else:
untrusted_projects_config.extend(
- job.project.unparsed_config, tenant)
+ job.project.unparsed_branch_config[job.branch])
continue
- TenantParser.log.debug("Waiting for cat job %s" % (job,))
+ self.log.debug("Waiting for cat job %s" % (job,))
job.wait()
if not job.updated:
raise Exception("Cat job %s failed" % (job,))
- TenantParser.log.debug("Cat job %s got files %s" %
- (job, job.files.keys()))
+ self.log.debug("Cat job %s got files %s" %
+ (job, job.files.keys()))
loaded = False
files = sorted(job.files.keys())
for conf_root in ['zuul.yaml', 'zuul.d', '.zuul.yaml', '.zuul.d']:
@@ -1513,115 +1564,105 @@
continue
# Don't load from more than configuration in a repo-branch
if loaded and loaded != conf_root:
- TenantParser.log.warning(
+ self.log.warning(
"Multiple configuration files in %s" %
(job.source_context,))
continue
loaded = conf_root
source_context = job.source_context.copy()
source_context.path = fn
- TenantParser.log.info(
+ self.log.info(
"Loading configuration from %s" %
(source_context,))
project = source_context.project
branch = source_context.branch
if source_context.trusted:
- incdata = TenantParser._parseConfigProjectLayout(
- job.files[fn], source_context, tenant)
- config_projects_config.extend(incdata, tenant)
+ incdata = self.loadConfigProjectLayout(
+ job.files[fn], source_context)
+ config_projects_config.extend(incdata)
else:
- incdata = TenantParser._parseUntrustedProjectLayout(
- job.files[fn], source_context, tenant)
- untrusted_projects_config.extend(incdata, tenant)
- new_project_unparsed_config[project].extend(
- incdata, tenant)
- if branch in new_project_unparsed_branch_config.get(
- project, {}):
- new_project_unparsed_branch_config[project][branch].\
- extend(incdata, tenant)
+ incdata = self.loadUntrustedProjectLayout(
+ job.files[fn], source_context)
+ untrusted_projects_config.extend(incdata)
+ new_project_unparsed_branch_config[project][branch].\
+ extend(incdata)
# Now that we've sucessfully loaded all of the configuration,
# cache the unparsed data on the project objects.
- for project, data in new_project_unparsed_config.items():
- project.unparsed_config = data
for project, branch_config in \
new_project_unparsed_branch_config.items():
- for branch, data in branch_config.items():
- project.unparsed_branch_config[branch] = data
+ project.unparsed_branch_config = branch_config
return config_projects_config, untrusted_projects_config
- @staticmethod
- def _parseConfigProjectLayout(data, source_context, tenant):
+ def loadConfigProjectLayout(self, data, source_context):
# This is the top-level configuration for a tenant.
config = model.UnparsedTenantConfig()
with early_configuration_exceptions(source_context):
- config.extend(safe_load_yaml(data, source_context), tenant)
+ config.extend(safe_load_yaml(data, source_context))
return config
- @staticmethod
- def _parseUntrustedProjectLayout(data, source_context, tenant):
+ def loadUntrustedProjectLayout(self, data, source_context):
config = model.UnparsedTenantConfig()
with early_configuration_exceptions(source_context):
- config.extend(safe_load_yaml(data, source_context), tenant)
+ config.extend(safe_load_yaml(data, source_context))
if config.pipelines:
with configuration_exceptions('pipeline', config.pipelines[0]):
raise PipelineNotPermittedError()
return config
- @staticmethod
- def _getLoadClasses(tenant, conf_object):
+ def _getLoadClasses(self, tenant, conf_object):
project = conf_object['_source_context'].project
tpc = tenant.project_configs[project.canonical_name]
return tpc.load_classes
- @staticmethod
- def _parseLayoutItems(layout, tenant, data, scheduler, connections,
+ def _parseLayoutItems(self, layout, tenant, data,
skip_pipelines=False, skip_semaphores=False):
+ pcontext = ParseContext(self.connections, self.scheduler,
+ tenant, layout)
# Handle pragma items first since they modify the source context
# used by other classes.
- pragma_parser = PragmaParser()
for config_pragma in data.pragmas:
- pragma_parser.fromYaml(config_pragma)
+ pcontext.pragma_parser.fromYaml(config_pragma)
if not skip_pipelines:
for config_pipeline in data.pipelines:
- classes = TenantParser._getLoadClasses(
- tenant, config_pipeline)
+ classes = self._getLoadClasses(tenant, config_pipeline)
if 'pipeline' not in classes:
continue
- layout.addPipeline(PipelineParser.fromYaml(
- layout, connections,
- scheduler, config_pipeline))
+ layout.addPipeline(pcontext.pipeline_parser.fromYaml(
+ config_pipeline))
+ pcontext.setPipelines()
for config_nodeset in data.nodesets:
- classes = TenantParser._getLoadClasses(tenant, config_nodeset)
+ classes = self._getLoadClasses(tenant, config_nodeset)
if 'nodeset' not in classes:
continue
with configuration_exceptions('nodeset', config_nodeset):
- layout.addNodeSet(NodeSetParser.fromYaml(
+ layout.addNodeSet(pcontext.nodeset_parser.fromYaml(
config_nodeset))
for config_secret in data.secrets:
- classes = TenantParser._getLoadClasses(tenant, config_secret)
+ classes = self._getLoadClasses(tenant, config_secret)
if 'secret' not in classes:
continue
with configuration_exceptions('secret', config_secret):
- layout.addSecret(SecretParser.fromYaml(layout, config_secret))
+ layout.addSecret(pcontext.secret_parser.fromYaml(
+ config_secret))
for config_job in data.jobs:
- classes = TenantParser._getLoadClasses(tenant, config_job)
+ classes = self._getLoadClasses(tenant, config_job)
if 'job' not in classes:
continue
with configuration_exceptions('job', config_job):
- job = JobParser.fromYaml(tenant, layout, config_job)
+ job = pcontext.job_parser.fromYaml(config_job)
added = layout.addJob(job)
if not added:
- TenantParser.log.debug(
+ self.log.debug(
"Skipped adding job %s which shadows an existing job" %
(job,))
# Now that all the jobs are loaded, verify their parents exist
for config_job in data.jobs:
- classes = TenantParser._getLoadClasses(tenant, config_job)
+ classes = self._getLoadClasses(tenant, config_job)
if 'job' not in classes:
continue
with configuration_exceptions('job', config_job):
@@ -1638,25 +1679,26 @@
else:
semaphore_layout = layout
for config_semaphore in data.semaphores:
- classes = TenantParser._getLoadClasses(
+ classes = self._getLoadClasses(
tenant, config_semaphore)
if 'semaphore' not in classes:
continue
with configuration_exceptions('semaphore', config_semaphore):
- semaphore = SemaphoreParser.fromYaml(config_semaphore)
+ semaphore = pcontext.semaphore_parser.fromYaml(
+ config_semaphore)
semaphore_layout.addSemaphore(semaphore)
- project_template_parser = ProjectTemplateParser(tenant, layout)
for config_template in data.project_templates:
- classes = TenantParser._getLoadClasses(tenant, config_template)
+ classes = self._getLoadClasses(tenant, config_template)
if 'project-template' not in classes:
continue
with configuration_exceptions('project-template', config_template):
- layout.addProjectTemplate(project_template_parser.fromYaml(
- config_template))
+ layout.addProjectTemplate(
+ pcontext.project_template_parser.fromYaml(
+ config_template))
- project_parser = ProjectParser(tenant, layout, project_template_parser)
- for config_projects in data.projects.values():
+ flattened_projects = self._flattenProjects(data.projects, tenant)
+ for config_projects in flattened_projects.values():
# Unlike other config classes, we expect multiple project
# stanzas with the same name, so that a config repo can
# define a project-pipeline and the project itself can
@@ -1666,25 +1708,43 @@
# the include/exclude rules before parsing them.
filtered_projects = []
for config_project in config_projects:
- classes = TenantParser._getLoadClasses(tenant, config_project)
+ classes = self._getLoadClasses(tenant, config_project)
if 'project' in classes:
filtered_projects.append(config_project)
if not filtered_projects:
continue
- layout.addProjectConfig(project_parser.fromYaml(
+ layout.addProjectConfig(pcontext.project_parser.fromYaml(
filtered_projects))
- @staticmethod
- def _parseLayout(base, tenant, data, scheduler, connections):
+ def _flattenProjects(self, projects, tenant):
+ # Group together all of the project stanzas for each project.
+ result_projects = {}
+ for config_project in projects:
+ with configuration_exceptions('project', config_project):
+ name = config_project.get('name')
+ if not name:
+ # There is no name defined so implicitly add the name
+ # of the project where it is defined.
+ name = (config_project['_source_context'].
+ project.canonical_name)
+ else:
+ trusted, project = tenant.getProject(name)
+ if project is None:
+ raise ProjectNotFoundError(name)
+ name = project.canonical_name
+ config_project['name'] = name
+ result_projects.setdefault(name, []).append(config_project)
+ return result_projects
+
+ def _parseLayout(self, base, tenant, data):
# Don't call this method from dynamic reconfiguration because
# it interacts with drivers and connections.
layout = model.Layout(tenant)
- TenantParser.log.debug("Created layout id %s", layout.uuid)
+ self.log.debug("Created layout id %s", layout.uuid)
- TenantParser._parseLayoutItems(layout, tenant, data,
- scheduler, connections)
+ self._parseLayoutItems(layout, tenant, data)
for pipeline in layout.pipelines.values():
pipeline.manager._postConfig(layout)
@@ -1695,6 +1755,12 @@
class ConfigLoader(object):
log = logging.getLogger("zuul.ConfigLoader")
+ def __init__(self, connections, scheduler, merger):
+ self.connections = connections
+ self.scheduler = scheduler
+ self.merger = merger
+ self.tenant_parser = TenantParser(connections, scheduler, merger)
+
def expandConfigPath(self, config_path):
if config_path:
config_path = os.path.expanduser(config_path)
@@ -1703,28 +1769,27 @@
config_path)
return config_path
- def loadConfig(self, config_path, project_key_dir, scheduler, merger,
- connections):
- abide = model.Abide()
-
+ def readConfig(self, config_path):
config_path = self.expandConfigPath(config_path)
with open(config_path) as config_file:
self.log.info("Loading configuration from %s" % (config_path,))
data = yaml.safe_load(config_file)
- config = model.UnparsedAbideConfig()
- config.extend(data)
base = os.path.dirname(os.path.realpath(config_path))
+ unparsed_abide = model.UnparsedAbideConfig(base)
+ unparsed_abide.extend(data)
+ return unparsed_abide
- for conf_tenant in config.tenants:
+ def loadConfig(self, unparsed_abide, project_key_dir):
+ abide = model.Abide()
+ for conf_tenant in unparsed_abide.tenants:
# When performing a full reload, do not use cached data.
- tenant = TenantParser.fromYaml(
- base, project_key_dir, connections, scheduler, merger,
- conf_tenant, old_tenant=None)
+ tenant = self.tenant_parser.fromYaml(unparsed_abide.base,
+ project_key_dir,
+ conf_tenant, old_tenant=None)
abide.tenants[tenant.name] = tenant
return abide
- def reloadTenant(self, config_path, project_key_dir, scheduler,
- merger, connections, abide, tenant):
+ def reloadTenant(self, config_path, project_key_dir, abide, tenant):
new_abide = model.Abide()
new_abide.tenants = abide.tenants.copy()
@@ -1732,13 +1797,14 @@
base = os.path.dirname(os.path.realpath(config_path))
# When reloading a tenant only, use cached data if available.
- new_tenant = TenantParser.fromYaml(
- base, project_key_dir, connections, scheduler, merger,
+ new_tenant = self.tenant_parser.fromYaml(
+ base, project_key_dir,
tenant.unparsed_config, old_tenant=tenant)
new_abide.tenants[tenant.name] = new_tenant
return new_abide
- def _loadDynamicProjectData(self, config, project, files, trusted, tenant):
+ def _loadDynamicProjectData(self, config, project,
+ files, trusted, tenant):
if trusted:
branches = ['master']
else:
@@ -1758,12 +1824,9 @@
# If there is no files entry at all for this
# project-branch, then use the cached config.
if files_entry is None:
- if trusted:
- incdata = project.unparsed_config
- else:
- incdata = project.unparsed_branch_config.get(branch)
+ incdata = project.unparsed_branch_config.get(branch)
if incdata:
- config.extend(incdata, tenant)
+ config.extend(incdata)
continue
# Otherwise, do not use the cached config (even if the
# files are empty as that likely means they were deleted).
@@ -1785,19 +1848,21 @@
# Prevent mixing configuration source
conf_root = fn.split('/')[0]
if loaded and loaded != conf_root:
- TenantParser.log.warning(
+ self.log.warning(
"Multiple configuration in %s" % source_context)
continue
loaded = conf_root
if trusted:
- incdata = TenantParser._parseConfigProjectLayout(
- data, source_context, tenant)
+ incdata = (self.tenant_parser.
+ loadConfigProjectLayout(
+ data, source_context))
else:
- incdata = TenantParser._parseUntrustedProjectLayout(
- data, source_context, tenant)
+ incdata = (self.tenant_parser.
+ loadUntrustedProjectLayout(
+ data, source_context))
- config.extend(incdata, tenant)
+ config.extend(incdata)
def createDynamicLayout(self, tenant, files,
include_config_projects=False,
@@ -1809,8 +1874,10 @@
config, project, files, True, tenant)
else:
config = tenant.config_projects_config.copy()
+
for project in tenant.untrusted_projects:
- self._loadDynamicProjectData(config, project, files, False, tenant)
+ self._loadDynamicProjectData(config, project, files,
+ False, tenant)
layout = model.Layout(tenant)
self.log.debug("Created layout id %s", layout.uuid)
@@ -1834,9 +1901,8 @@
else:
skip_pipelines = skip_semaphores = False
- TenantParser._parseLayoutItems(layout, tenant, config,
- scheduler, connections,
- skip_pipelines=skip_pipelines,
- skip_semaphores=skip_semaphores)
+ self.tenant_parser._parseLayoutItems(layout, tenant, config,
+ skip_pipelines=skip_pipelines,
+ skip_semaphores=skip_semaphores)
return layout
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
index 86f14d6..1c62f4d 100644
--- a/zuul/connection/__init__.py
+++ b/zuul/connection/__init__.py
@@ -75,11 +75,14 @@
still in use. Anything in our cache that isn't in the supplied
list should be safe to remove from the cache."""
- def getWebHandlers(self, zuul_web):
+ def getWebHandlers(self, zuul_web, info):
"""Return a list of web handlers to register with zuul-web.
:param zuul.web.ZuulWeb zuul_web:
Zuul Web instance.
+ :param zuul.model.WebInfo info:
+ The WebInfo object for the Zuul Web instance. Can be used by
+ plugins to toggle API capabilities.
:returns: List of `zuul.web.handler.BaseWebHandler` instances.
"""
return []
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 6dfcdd3..772ba9b 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -1141,7 +1141,7 @@
return statuses
- def getWebHandlers(self, zuul_web):
+ def getWebHandlers(self, zuul_web, info):
return [GithubWebhookHandler(self, zuul_web, 'POST', 'payload')]
def validateWebConfig(self, config, connections):
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 501a2c5..e931301 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -125,9 +125,10 @@
return zuul_buildset_table, zuul_build_table
- def getWebHandlers(self, zuul_web):
+ def getWebHandlers(self, zuul_web, info):
+ info.capabilities.job_history = True
return [
- SqlWebHandler(self, zuul_web, 'GET', '/{tenant}/builds.json'),
+ SqlWebHandler(self, zuul_web, 'GET', '/{tenant}/builds'),
StaticHandler(zuul_web, '/{tenant}/builds.html'),
]
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index d561232..fe0f28d 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
import gear
import json
import logging
@@ -186,6 +185,7 @@
params = dict()
params['job'] = job.name
params['timeout'] = job.timeout
+ params['post_timeout'] = job.post_timeout
params['items'] = merger_items
params['projects'] = []
if hasattr(item.change, 'branch'):
@@ -208,7 +208,9 @@
nodes.append(n)
params['nodes'] = nodes
params['groups'] = [group.toDict() for group in nodeset.getGroups()]
- params['vars'] = copy.deepcopy(job.variables)
+ params['vars'] = job.variables
+ params['host_vars'] = job.host_variables
+ params['group_vars'] = job.group_variables
params['zuul'] = zuul_params
projects = set()
required_projects = set()
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 53ef173..d140a00 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -497,16 +497,22 @@
shutil.copy(os.path.join(library_path, fn), target_dir)
-def make_setup_inventory_dict(nodes):
+def check_varnames(var):
+ # We block these in configloader, but block it here too to make
+ # sure that a job doesn't pass variables named zuul or nodepool.
+ if 'zuul' in var:
+ raise Exception("Defining variables named 'zuul' is not allowed")
+ if 'nodepool' in var:
+ raise Exception("Defining variables named 'nodepool' is not allowed")
+
+def make_setup_inventory_dict(nodes):
hosts = {}
for node in nodes:
if (node['host_vars']['ansible_connection'] in
BLACKLISTED_ANSIBLE_CONNECTION_TYPES):
continue
-
- for name in node['name']:
- hosts[name] = node['host_vars']
+ hosts[node['name']] = node['host_vars']
inventory = {
'all': {
@@ -517,12 +523,10 @@
return inventory
-def make_inventory_dict(nodes, groups, all_vars):
-
+def make_inventory_dict(nodes, args, all_vars):
hosts = {}
for node in nodes:
- for name in node['name']:
- hosts[name] = node['host_vars']
+ hosts[node['name']] = node['host_vars']
inventory = {
'all': {
@@ -531,14 +535,16 @@
}
}
- for group in groups:
+ for group in args['groups']:
group_hosts = {}
for node_name in group['nodes']:
- # children is a dict with None as values because we don't have
- # and per-group variables. If we did, None would be a dict
- # with the per-group variables
group_hosts[node_name] = None
- inventory[group['name']] = {'hosts': group_hosts}
+ group_vars = args['group_vars'].get(group['name'], {}).copy()
+ check_varnames(group_vars)
+ inventory[group['name']] = {
+ 'hosts': group_hosts,
+ 'vars': group_vars,
+ }
return inventory
@@ -878,8 +884,10 @@
success = False
self.started = True
time_started = time.time()
- # timeout value is total job timeout or put another way
- # the cummulative time that pre, run, and post can consume.
+ # timeout value is "total" job timeout which accounts for
+ # pre-run and run playbooks. post-run is different because
+ # it is used to copy out job logs and we want to do our best
+ # to copy logs even when the job has timed out.
job_timeout = args['timeout']
for index, playbook in enumerate(self.jobdir.pre_playbooks):
# TODOv3(pabelanger): Implement pre-run timeout setting.
@@ -914,11 +922,15 @@
# run it again.
return None
+ post_timeout = args['post_timeout']
for index, playbook in enumerate(self.jobdir.post_playbooks):
- # TODOv3(pabelanger): Implement post-run timeout setting.
- ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
+ # Post timeout operates a little differently to the main job
+ # timeout. We give each post playbook the full post timeout to
+ # do its job because post is where you'll often record job logs
+ # which are vital to understanding why timeouts have happened in
+ # the first place.
post_status, post_code = self.runAnsiblePlaybook(
- playbook, ansible_timeout, success, phase='post', index=index)
+ playbook, post_timeout, success, phase='post', index=index)
if post_status == self.RESULT_ABORTED:
return 'ABORTED'
if post_status != self.RESULT_NORMAL or post_code != 0:
@@ -968,42 +980,45 @@
# set to True in the clouds.yaml for a cloud if this
# results in the wrong thing being in interface_ip
# TODO(jeblair): Move this notice to the docs.
- ip = node.get('interface_ip')
- port = node.get('connection_port', node.get('ssh_port', 22))
- host_vars = dict(
- ansible_host=ip,
- ansible_user=self.executor_server.default_username,
- ansible_port=port,
- nodepool=dict(
- label=node.get('label'),
- az=node.get('az'),
- cloud=node.get('cloud'),
- provider=node.get('provider'),
- region=node.get('region'),
- interface_ip=node.get('interface_ip'),
- public_ipv4=node.get('public_ipv4'),
- private_ipv4=node.get('private_ipv4'),
- public_ipv6=node.get('public_ipv6')))
+ for name in node['name']:
+ ip = node.get('interface_ip')
+ port = node.get('connection_port', node.get('ssh_port', 22))
+ host_vars = args['host_vars'].get(name, {}).copy()
+ check_varnames(host_vars)
+ host_vars.update(dict(
+ ansible_host=ip,
+ ansible_user=self.executor_server.default_username,
+ ansible_port=port,
+ nodepool=dict(
+ label=node.get('label'),
+ az=node.get('az'),
+ cloud=node.get('cloud'),
+ provider=node.get('provider'),
+ region=node.get('region'),
+ interface_ip=node.get('interface_ip'),
+ public_ipv4=node.get('public_ipv4'),
+ private_ipv4=node.get('private_ipv4'),
+ public_ipv6=node.get('public_ipv6'))))
- username = node.get('username')
- if username:
- host_vars['ansible_user'] = username
+ username = node.get('username')
+ if username:
+ host_vars['ansible_user'] = username
- connection_type = node.get('connection_type')
- if connection_type:
- host_vars['ansible_connection'] = connection_type
+ connection_type = node.get('connection_type')
+ if connection_type:
+ host_vars['ansible_connection'] = connection_type
- host_keys = []
- for key in node.get('host_keys'):
- if port != 22:
- host_keys.append("[%s]:%s %s" % (ip, port, key))
- else:
- host_keys.append("%s %s" % (ip, key))
+ host_keys = []
+ for key in node.get('host_keys'):
+ if port != 22:
+ host_keys.append("[%s]:%s %s" % (ip, port, key))
+ else:
+ host_keys.append("%s %s" % (ip, key))
- hosts.append(dict(
- name=node['name'],
- host_vars=host_vars,
- host_keys=host_keys))
+ hosts.append(dict(
+ name=name,
+ host_vars=host_vars,
+ host_keys=host_keys))
return hosts
def _blockPluginDirs(self, path):
@@ -1096,10 +1111,7 @@
secrets = playbook['secrets']
if secrets:
- if 'zuul' in secrets:
- # We block this in configloader, but block it here too to make
- # sure that a job doesn't pass secrets named zuul.
- raise Exception("Defining secrets named 'zuul' is not allowed")
+ check_varnames(secrets)
jobdir_playbook.secrets_content = yaml.safe_dump(
secrets, default_flow_style=False)
@@ -1200,12 +1212,9 @@
def prepareAnsibleFiles(self, args):
all_vars = args['vars'].copy()
+ check_varnames(all_vars)
# TODO(mordred) Hack to work around running things with python3
all_vars['ansible_python_interpreter'] = '/usr/bin/python2'
- if 'zuul' in all_vars:
- # We block this in configloader, but block it here too to make
- # sure that a job doesn't pass variables named zuul.
- raise Exception("Defining vars named 'zuul' is not allowed")
all_vars['zuul'] = args['zuul'].copy()
all_vars['zuul']['executor'] = dict(
hostname=self.executor_server.hostname,
@@ -1216,7 +1225,7 @@
nodes = self.getHostList(args)
setup_inventory = make_setup_inventory_dict(nodes)
- inventory = make_inventory_dict(nodes, args['groups'], all_vars)
+ inventory = make_inventory_dict(nodes, args, all_vars)
with open(self.jobdir.setup_inventory, 'w') as setup_inventory_yaml:
setup_inventory_yaml.write(
@@ -1856,7 +1865,7 @@
if self.statsd:
base_key = 'zuul.executor.%s' % self.hostname
self.statsd.gauge(base_key + '.load_average', 0)
- self.statsd.gauge(base_key + '.pct_available_ram', 0)
+ self.statsd.gauge(base_key + '.pct_used_ram', 0)
self.statsd.gauge(base_key + '.running_builds', 0)
self.log.debug("Stopped")
@@ -2055,8 +2064,8 @@
base_key = 'zuul.executor.%s' % self.hostname
self.statsd.gauge(base_key + '.load_average',
int(load_avg * 100))
- self.statsd.gauge(base_key + '.pct_available_ram',
- int(avail_mem_pct * 100))
+ self.statsd.gauge(base_key + '.pct_used_ram',
+ int((100.0 - avail_mem_pct) * 100))
self.statsd.gauge(base_key + '.running_builds',
len(self.job_workers))
self.statsd.gauge(base_key + '.starting_builds',
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 88ddf7d..506b94f 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -460,7 +460,8 @@
# Load layout
# Late import to break an import loop
import zuul.configloader
- loader = zuul.configloader.ConfigLoader()
+ loader = zuul.configloader.ConfigLoader(
+ self.sched.connections, self.sched, None)
self.log.debug("Loading dynamic layout")
(trusted_updates, untrusted_updates) = item.includesConfigUpdates()
@@ -476,9 +477,7 @@
loader.createDynamicLayout(
item.pipeline.layout.tenant,
build_set.files,
- include_config_projects=True,
- scheduler=self.sched,
- connections=self.sched.connections)
+ include_config_projects=True)
trusted_layout_verified = True
# Then create the config a second time but without changes
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index 5e102b4..aba8645 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -79,6 +79,8 @@
self.retry_interval = retry_interval
try:
self._ensure_cloned()
+ self._git_set_remote_url(
+ git.Repo(self.local_path), self.remote_url)
except Exception:
self.log.exception("Unable to initialize repo for %s" % remote)
@@ -112,8 +114,7 @@
config_writer.set_value('user', 'name', self.username)
config_writer.write()
if rewrite_url:
- with repo.remotes.origin.config_writer as config_writer:
- config_writer.set('url', self.remote_url)
+ self._git_set_remote_url(repo, self.remote_url)
self._initialized = True
def isInitialized(self):
@@ -154,6 +155,10 @@
else:
raise
+ def _git_set_remote_url(self, repo, url):
+ with repo.remotes.origin.config_writer as config_writer:
+ config_writer.set('url', url)
+
def createRepoObject(self):
self._ensure_cloned()
repo = git.Repo(self.local_path)
@@ -350,6 +355,13 @@
repo = self.createRepoObject()
repo.delete_remote(repo.remotes[remote])
+ def setRemoteUrl(self, url):
+ if self.remote_url == url:
+ return
+ self.log.debug("Set remote url to %s" % url)
+ self.remote_url = url
+ self._git_set_remote_url(self.createRepoObject(), self.remote_url)
+
class Merger(object):
def __init__(self, working_root, connections, email, username,
@@ -397,7 +409,9 @@
url = source.getGitUrl(project)
key = '/'.join([hostname, project_name])
if key in self.repos:
- return self.repos[key]
+ repo = self.repos[key]
+ repo.setRemoteUrl(url)
+ return repo
sshkey = self.connections.connections.get(connection_name).\
connection_config.get('sshkey')
if not url:
diff --git a/zuul/model.py b/zuul/model.py
index 45fc1a8..44e8d06 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -24,6 +24,7 @@
import textwrap
from zuul import change_matcher
+from zuul.lib.config import get_default
MERGER_MERGE = 1 # "git merge"
MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
@@ -349,7 +350,6 @@
# when deciding whether to enqueue their changes
# TODOv3 (jeblair): re-add support for foreign projects if needed
self.foreign = foreign
- self.unparsed_config = None
self.unparsed_branch_config = {} # branch -> UnparsedTenantConfig
def __str__(self):
@@ -839,7 +839,10 @@
self.execution_attributes = dict(
parent=None,
timeout=None,
+ post_timeout=None,
variables={},
+ host_variables={},
+ group_variables={},
nodeset=NodeSet(),
workspace=None,
pre_run=(),
@@ -981,10 +984,19 @@
matchers.append(self.branch_matcher)
self.branch_matcher = change_matcher.MatchAll(matchers)
- def updateVariables(self, other_vars):
- v = copy.deepcopy(self.variables)
- Job._deepUpdate(v, other_vars)
- self.variables = v
+ def updateVariables(self, other_vars, other_host_vars, other_group_vars):
+ if other_vars is not None:
+ v = copy.deepcopy(self.variables)
+ Job._deepUpdate(v, other_vars)
+ self.variables = v
+ if other_host_vars is not None:
+ v = copy.deepcopy(self.host_variables)
+ Job._deepUpdate(v, other_host_vars)
+ self.host_variables = v
+ if other_group_vars is not None:
+ v = copy.deepcopy(self.group_variables)
+ Job._deepUpdate(v, other_group_vars)
+ self.group_variables = v
def updateParentData(self, other_vars):
# Update variables, but give the current values priority (used
@@ -1061,7 +1073,8 @@
"from other projects."
% (repr(self), this_origin))
if k not in set(['pre_run', 'run', 'post_run', 'roles',
- 'variables', 'required_projects',
+ 'variables', 'host_variables',
+ 'group_variables', 'required_projects',
'allowed_projects']):
# TODO(jeblair): determine if deepcopy is required
setattr(self, k, copy.deepcopy(other._get(k)))
@@ -1102,8 +1115,8 @@
if other._get('post_run') is not None:
other_post_run = self.freezePlaybooks(other.post_run)
self.post_run = other_post_run + self.post_run
- if other._get('variables') is not None:
- self.updateVariables(other.variables)
+ self.updateVariables(other.variables, other.host_variables,
+ other.group_variables)
if other._get('required_projects') is not None:
self.updateProjects(other.required_projects)
if (other._get('allowed_projects') is not None and
@@ -2434,8 +2447,10 @@
An Abide is a collection of tenants.
"""
- def __init__(self):
+ def __init__(self, base=None):
self.tenants = []
+ self.known_tenants = set()
+ self.base = base
def extend(self, conf):
if isinstance(conf, UnparsedAbideConfig):
@@ -2453,6 +2468,8 @@
key, value = list(item.items())[0]
if key == 'tenant':
self.tenants.append(value)
+ if 'name' in value:
+ self.known_tenants.add(value['name'])
else:
raise ConfigItemUnknownError()
@@ -2465,7 +2482,7 @@
self.pipelines = []
self.jobs = []
self.project_templates = []
- self.projects = {}
+ self.projects = []
self.nodesets = []
self.secrets = []
self.semaphores = []
@@ -2482,23 +2499,13 @@
r.semaphores = copy.deepcopy(self.semaphores)
return r
- def extend(self, conf, tenant):
+ def extend(self, conf):
if isinstance(conf, UnparsedTenantConfig):
self.pragmas.extend(conf.pragmas)
self.pipelines.extend(conf.pipelines)
self.jobs.extend(conf.jobs)
self.project_templates.extend(conf.project_templates)
- for k, v in conf.projects.items():
- name = k
- # Add the projects to the according canonical name instead of
- # the given project name. If it is not found, it's ok to add
- # this to the given name. We also don't need to throw the
- # ProjectNotFoundException here as semantic validation occurs
- # later where it will fail then.
- trusted, project = tenant.getProject(k)
- if project is not None:
- name = project.canonical_name
- self.projects.setdefault(name, []).extend(v)
+ self.projects.extend(conf.projects)
self.nodesets.extend(conf.nodesets)
self.secrets.extend(conf.secrets)
self.semaphores.extend(conf.semaphores)
@@ -2514,13 +2521,7 @@
raise ConfigItemMultipleKeysError()
key, value = list(item.items())[0]
if key == 'project':
- name = value.get('name')
- if not name:
- # There is no name defined so implicitly add the name
- # of the project where it is defined.
- name = value['_source_context'].project.canonical_name
- value['name'] = name
- self.projects.setdefault(name, []).append(value)
+ self.projects.append(value)
elif key == 'job':
self.jobs.append(value)
elif key == 'project-template':
@@ -3182,3 +3183,80 @@
td = self._getTD(build)
td.add(elapsed, result)
td.save()
+
+
+class Capabilities(object):
+ """The set of capabilities this Zuul installation has.
+
+ Some plugins add elements to the external API. In order to
+ facilitate consumers knowing if functionality is available
+ or not, keep track of distinct capability flags.
+ """
+ def __init__(self, job_history=False):
+ self.job_history = job_history
+
+ def __repr__(self):
+ return '<Capabilities 0x%x %s>' % (id(self), self._renderFlags())
+
+ def _renderFlags(self):
+ d = self.toDict()
+ return " ".join(['{k}={v}'.format(k=k, v=v) for (k, v) in d.items()])
+
+ def copy(self):
+ return Capabilities(**self.toDict())
+
+ def toDict(self):
+ d = dict()
+ d['job_history'] = self.job_history
+ return d
+
+
+class WebInfo(object):
+ """Information about the system needed by zuul-web /info."""
+
+ def __init__(self, websocket_url=None, endpoint=None,
+ capabilities=None, stats_url=None,
+ stats_prefix=None, stats_type=None):
+ self.capabilities = capabilities or Capabilities()
+ self.websocket_url = websocket_url
+ self.stats_url = stats_url
+ self.stats_prefix = stats_prefix
+ self.stats_type = stats_type
+ self.endpoint = endpoint
+ self.tenant = None
+
+ def __repr__(self):
+ return '<WebInfo 0x%x capabilities=%s>' % (
+ id(self), str(self.capabilities))
+
+ def copy(self):
+ return WebInfo(
+ websocket_url=self.websocket_url,
+ endpoint=self.endpoint,
+ stats_url=self.stats_url,
+ stats_prefix=self.stats_prefix,
+ stats_type=self.stats_type,
+ capabilities=self.capabilities.copy())
+
+ @staticmethod
+ def fromConfig(config):
+ return WebInfo(
+ websocket_url=get_default(config, 'web', 'websocket_url', None),
+ stats_url=get_default(config, 'web', 'stats_url', None),
+ stats_prefix=get_default(config, 'statsd', 'prefix'),
+ stats_type=get_default(config, 'web', 'stats_type', 'graphite'),
+ )
+
+ def toDict(self):
+ d = dict()
+ d['websocket_url'] = self.websocket_url
+ stats = dict()
+ stats['url'] = self.stats_url
+ stats['prefix'] = self.stats_prefix
+ stats['type'] = self.stats_type
+ d['stats'] = stats
+ d['endpoint'] = self.endpoint
+ d['capabilities'] = self.capabilities.toDict()
+ if self.tenant:
+ d['tenant'] = self.tenant
+ return d
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index c06497d..c58bfc7 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -246,6 +246,7 @@
self.result_event_queue = queue.Queue()
self.management_event_queue = zuul.lib.queue.MergedQueue()
self.abide = model.Abide()
+ self.unparsed_abide = model.UnparsedAbideConfig()
if not testonly:
time_dir = self._get_time_database_dir()
@@ -547,42 +548,44 @@
self.layout_lock.acquire()
self.config = event.config
try:
- self.log.debug("Full reconfiguration beginning")
- loader = configloader.ConfigLoader()
+ self.log.info("Full reconfiguration beginning")
+ loader = configloader.ConfigLoader(
+ self.connections, self, self.merger)
+ self.unparsed_abide = loader.readConfig(
+ self.config.get('scheduler', 'tenant_config'))
abide = loader.loadConfig(
- self.config.get('scheduler', 'tenant_config'),
- self._get_project_key_dir(),
- self, self.merger, self.connections)
+ self.unparsed_abide,
+ self._get_project_key_dir())
for tenant in abide.tenants.values():
self._reconfigureTenant(tenant)
self.abide = abide
finally:
self.layout_lock.release()
- self.log.debug("Full reconfiguration complete")
+ self.log.info("Full reconfiguration complete")
def _doTenantReconfigureEvent(self, event):
# This is called in the scheduler loop after another thread submits
# a request
self.layout_lock.acquire()
try:
- self.log.debug("Tenant reconfiguration beginning")
+ self.log.info("Tenant reconfiguration beginning")
# If a change landed to a project, clear out the cached
# config before reconfiguring.
for project in event.projects:
- project.unparsed_config = None
+ project.unparsed_branch_config = {}
old_tenant = self.abide.tenants[event.tenant_name]
- loader = configloader.ConfigLoader()
+ loader = configloader.ConfigLoader(
+ self.connections, self, self.merger)
abide = loader.reloadTenant(
self.config.get('scheduler', 'tenant_config'),
self._get_project_key_dir(),
- self, self.merger, self.connections,
self.abide, old_tenant)
tenant = abide.tenants[event.tenant_name]
self._reconfigureTenant(tenant)
self.abide = abide
finally:
self.layout_lock.release()
- self.log.debug("Tenant reconfiguration complete")
+ self.log.info("Tenant reconfiguration complete")
def _reenqueueGetProject(self, tenant, item):
project = item.change.project
@@ -1149,9 +1152,16 @@
data['pipelines'] = pipelines
tenant = self.abide.tenants.get(tenant_name)
if not tenant:
+ if tenant_name not in self.unparsed_abide.known_tenants:
+ return json.dumps({
+ "message": "Unknown tenant",
+ "code": 404
+ })
self.log.warning("Tenant %s isn't loaded" % tenant_name)
- return json.dumps(
- {"message": "Tenant %s isn't ready" % tenant_name})
+ return json.dumps({
+ "message": "Tenant %s isn't ready" % tenant_name,
+ "code": 204
+ })
for pipeline in tenant.layout.pipelines.values():
pipelines.append(pipeline.formatStatusJSON(websocket_url))
return json.dumps(data)
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index e962738..31eac7d 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -16,6 +16,7 @@
import asyncio
+import copy
import json
import logging
import os
@@ -25,6 +26,7 @@
import aiohttp
from aiohttp import web
+import zuul.model
import zuul.rpcclient
from zuul.web.handler import StaticHandler
@@ -158,41 +160,47 @@
'key_get': self.key_get,
}
- async def tenant_list(self, request):
+ async def tenant_list(self, request, result_filter=None):
job = self.rpc.submitJob('zuul:tenant_list', {})
return web.json_response(json.loads(job.data[0]))
- async def status_get(self, request):
+ async def status_get(self, request, result_filter=None):
tenant = request.match_info["tenant"]
if tenant not in self.cache or \
(time.time() - self.cache_time[tenant]) > self.cache_expiry:
job = self.rpc.submitJob('zuul:status_get', {'tenant': tenant})
self.cache[tenant] = json.loads(job.data[0])
self.cache_time[tenant] = time.time()
- resp = web.json_response(self.cache[tenant])
+ payload = self.cache[tenant]
+ if payload.get('code') == 404:
+ return web.HTTPNotFound(reason=payload['message'])
+ if result_filter:
+ payload = result_filter.filterPayload(payload)
+ resp = web.json_response(payload)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers["Cache-Control"] = "public, max-age=%d" % \
self.cache_expiry
resp.last_modified = self.cache_time[tenant]
return resp
- async def job_list(self, request):
+ async def job_list(self, request, result_filter=None):
tenant = request.match_info["tenant"]
job = self.rpc.submitJob('zuul:job_list', {'tenant': tenant})
resp = web.json_response(json.loads(job.data[0]))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
- async def key_get(self, request):
+ async def key_get(self, request, result_filter=None):
tenant = request.match_info["tenant"]
project = request.match_info["project"]
job = self.rpc.submitJob('zuul:key_get', {'tenant': tenant,
'project': project})
return web.Response(body=job.data[0])
- async def processRequest(self, request, action):
+ async def processRequest(self, request, action, result_filter=None):
+ resp = None
try:
- resp = await self.controllers[action](request)
+ resp = await self.controllers[action](request, result_filter)
except asyncio.CancelledError:
self.log.debug("request handling cancelled")
except Exception as e:
@@ -202,6 +210,24 @@
return resp
+class ChangeFilter(object):
+ def __init__(self, desired):
+ self.desired = desired
+
+ def filterPayload(self, payload):
+ status = []
+ for pipeline in payload['pipelines']:
+ for change_queue in pipeline['change_queues']:
+ for head in change_queue['heads']:
+ for change in head:
+ if self.wantChange(change):
+ status.append(copy.deepcopy(change))
+ return status
+
+ def wantChange(self, change):
+ return change['id'] == self.desired
+
+
class ZuulWeb(object):
log = logging.getLogger("zuul.web.ZuulWeb")
@@ -210,13 +236,16 @@
gear_server, gear_port,
ssl_key=None, ssl_cert=None, ssl_ca=None,
static_cache_expiry=3600,
- connections=None):
+ connections=None,
+ info=None):
+ self.start_time = time.time()
self.listen_address = listen_address
self.listen_port = listen_port
self.event_loop = None
self.term = None
self.server = None
self.static_cache_expiry = static_cache_expiry
+ self.info = info
# instanciate handlers
self.rpc = zuul.rpcclient.RPCClient(gear_server, gear_port,
ssl_key, ssl_cert, ssl_ca)
@@ -225,12 +254,37 @@
self._plugin_routes = [] # type: List[zuul.web.handler.BaseWebHandler]
connections = connections or []
for connection in connections:
- self._plugin_routes.extend(connection.getWebHandlers(self))
+ self._plugin_routes.extend(
+ connection.getWebHandlers(self, self.info))
async def _handleWebsocket(self, request):
return await self.log_streaming_handler.processRequest(
request)
+ async def _handleRootInfo(self, request):
+ info = self.info.copy()
+ info.endpoint = str(request.url.parent)
+ return self._handleInfo(info)
+
+ def _handleTenantInfo(self, request):
+ info = self.info.copy()
+ info.tenant = request.match_info["tenant"]
+ # yarl.URL.parent on a root url returns the root url, so this is
+ # both safe and accurate for white-labeled tenants like OpenStack,
+ # zuul-web running on / and zuul-web running on a sub-url like
+ # softwarefactory-project.io
+ info.endpoint = str(request.url.parent.parent.parent)
+ return self._handleInfo(info)
+
+ def _handleInfo(self, info):
+ resp = web.json_response({'info': info.toDict()}, status=200)
+ resp.headers['Access-Control-Allow-Origin'] = '*'
+ if self.static_cache_expiry:
+ resp.headers['Cache-Control'] = "public, max-age=%d" % \
+ self.static_cache_expiry
+ resp.last_modified = self.start_time
+ return resp
+
async def _handleTenantsRequest(self, request):
return await self.gearman_handler.processRequest(request,
'tenant_list')
@@ -238,6 +292,11 @@
async def _handleStatusRequest(self, request):
return await self.gearman_handler.processRequest(request, 'status_get')
+ async def _handleStatusChangeRequest(self, request):
+ change = request.match_info["change"]
+ return await self.gearman_handler.processRequest(
+ request, 'status_get', ChangeFilter(change))
+
async def _handleJobsRequest(self, request):
return await self.gearman_handler.processRequest(request, 'job_list')
@@ -256,9 +315,13 @@
is run within a separate (non-main) thread.
"""
routes = [
- ('GET', '/tenants.json', self._handleTenantsRequest),
- ('GET', '/{tenant}/status.json', self._handleStatusRequest),
- ('GET', '/{tenant}/jobs.json', self._handleJobsRequest),
+ ('GET', '/info', self._handleRootInfo),
+ ('GET', '/{tenant}/info', self._handleTenantInfo),
+ ('GET', '/tenants', self._handleTenantsRequest),
+ ('GET', '/{tenant}/status', self._handleStatusRequest),
+ ('GET', '/{tenant}/jobs', self._handleJobsRequest),
+ ('GET', '/{tenant}/status/change/{change}',
+ self._handleStatusChangeRequest),
('GET', '/{tenant}/console-stream', self._handleWebsocket),
('GET', '/{tenant}/{project:.*}.pub', self._handleKeyRequest),
]
diff --git a/zuul/web/static/javascripts/jquery.zuul.js b/zuul/web/static/javascripts/jquery.zuul.js
index 7e6788b..7da81dc 100644
--- a/zuul/web/static/javascripts/jquery.zuul.js
+++ b/zuul/web/static/javascripts/jquery.zuul.js
@@ -49,7 +49,7 @@
options = $.extend({
'enabled': true,
'graphite_url': '',
- 'source': 'status.json',
+ 'source': 'status',
'msg_id': '#zuul_msg',
'pipelines_id': '#zuul_pipelines',
'queue_events_num': '#zuul_queue_events_num',
diff --git a/zuul/web/static/javascripts/zuul.angular.js b/zuul/web/static/javascripts/zuul.angular.js
index 87cbbdd..49f2518 100644
--- a/zuul/web/static/javascripts/zuul.angular.js
+++ b/zuul/web/static/javascripts/zuul.angular.js
@@ -23,7 +23,7 @@
{
$scope.tenants = undefined;
$scope.tenants_fetch = function() {
- $http.get("tenants.json")
+ $http.get("tenants")
.then(function success(result) {
$scope.tenants = result.data;
});
@@ -36,7 +36,7 @@
{
$scope.jobs = undefined;
$scope.jobs_fetch = function() {
- $http.get("jobs.json")
+ $http.get("jobs")
.then(function success(result) {
$scope.jobs = result.data;
});
@@ -78,7 +78,7 @@
if ($scope.job_name) {query_string += "&job_name="+$scope.job_name;}
if ($scope.project) {query_string += "&project="+$scope.project;}
if (query_string != "") {query_string = "?" + query_string.substr(1);}
- $http.get("builds.json" + query_string)
+ $http.get("builds" + query_string)
.then(function success(result) {
for (build_pos = 0;
build_pos < result.data.length;
diff --git a/zuul/web/static/javascripts/zuul.app.js b/zuul/web/static/javascripts/zuul.app.js
index bf90a4d..6e35eb3 100644
--- a/zuul/web/static/javascripts/zuul.app.js
+++ b/zuul/web/static/javascripts/zuul.app.js
@@ -55,7 +55,7 @@
var demo = location.search.match(/[?&]demo=([^?&]*)/),
source_url = location.search.match(/[?&]source_url=([^?&]*)/),
source = demo ? './status-' + (demo[1] || 'basic') + '.json-sample' :
- 'status.json';
+ 'status';
source = source_url ? source_url[1] : source;
var zuul = $.zuul({