Merge "Add host/group vars"
diff --git a/.zuul.yaml b/.zuul.yaml
index d73be8f..caef296 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -25,8 +25,9 @@
required-projects:
- openstack/ara
files:
- - zuul/ansible/callback/.*
+ - zuul/ansible/.*
- playbooks/zuul-stream/.*
+ - requirements.txt
- project:
check:
diff --git a/doc/source/admin/drivers/github.rst b/doc/source/admin/drivers/github.rst
index 83ac77f..a89cfc6 100644
--- a/doc/source/admin/drivers/github.rst
+++ b/doc/source/admin/drivers/github.rst
@@ -40,60 +40,43 @@
Application
...........
+.. NOTE Duplicate content here and in zuul-from-scratch.rst. Keep them
+ in sync.
+
To create a `GitHub application
<https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/registering-github-apps/>`_:
* Go to your organization settings page to create the application, e.g.:
https://github.com/organizations/my-org/settings/apps/new
-
* Set GitHub App name to "my-org-zuul"
-
* Set Setup URL to your setup documentation, when user install the application
they are redirected to this url
-
* Set Webhook URL to
``http://<zuul-hostname>/connection/<connection-name>/payload``.
-
* Create a Webhook secret
-
* Set permissions:
* Commit statuses: Read & Write
-
* Issues: Read & Write
-
* Pull requests: Read & Write
-
* Repository contents: Read & Write (write to let zuul merge change)
+ * Repository administration: Read
* Set events subscription:
-
* Label
-
* Status
-
* Issue comment
-
* Issues
-
* Pull request
-
* Pull request review
-
* Pull request review comment
-
* Commit comment
-
* Create
-
* Push
-
* Release
* Set Where can this GitHub App be installed to "Any account"
-
* Create the App
-
* Generate a Private key in the app settings page
Then in the zuul.conf, set webhook_token, app_id and app_key.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index a2a2ee7..af83a3b 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -12,6 +12,7 @@
:maxdepth: 2
quick-start
+ zuul-from-scratch
installation
components
connections
diff --git a/doc/source/admin/monitoring.rst b/doc/source/admin/monitoring.rst
index 1c17c28..fbcedad 100644
--- a/doc/source/admin/monitoring.rst
+++ b/doc/source/admin/monitoring.rst
@@ -182,11 +182,11 @@
The one-minute load average of this executor, multiplied by 100.
- .. stat:: pct_available_ram
+ .. stat:: pct_used_ram
:type: gauge
- The available RAM (including buffers and cache) on this
- executor, as a percentage multiplied by 100.
+ The used RAM (excluding buffers and cache) on this executor, as
+ a percentage multiplied by 100.
.. stat:: zuul.nodepool
diff --git a/doc/source/admin/tenants.rst b/doc/source/admin/tenants.rst
index 48e7ba8..5bcd2a2 100644
--- a/doc/source/admin/tenants.rst
+++ b/doc/source/admin/tenants.rst
@@ -25,7 +25,7 @@
------
A tenant is a collection of projects which share a Zuul
-configuration. An example tenant definition is:
+configuration. Some examples of tenant definitions are:
.. code-block:: yaml
@@ -46,6 +46,27 @@
- project2:
exclude-unprotected-branches: true
+.. code-block:: yaml
+
+ - tenant:
+ name: my-tenant
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - exclude:
+ - job
+ - semaphore
+ - project
+ - project-template
+ - nodeset
+ - secret
+ projects:
+ - project1
+ - project2:
+ exclude-unprotected-branches: true
+
.. attr:: tenant
The following attributes are supported:
@@ -157,6 +178,24 @@
processed. Defaults to the tenant wide setting of
exclude-unprotected-branches.
+ .. attr:: <project-group>
+
+ The items in the list are dictionaries with the following
+ attributes. A **configuration items** definition is applied
+ to the list of projects.
+
+ .. attr:: include
+
+ A list of **configuration items** that should be loaded.
+
+ .. attr:: exclude
+
+ A list of **configuration items** that should not be loaded.
+
+ .. attr:: projects
+
+ A list of **project** items.
+
.. attr:: max-nodes-per-job
:default: 5
diff --git a/doc/source/admin/zuul-from-scratch.rst b/doc/source/admin/zuul-from-scratch.rst
new file mode 100644
index 0000000..141216b
--- /dev/null
+++ b/doc/source/admin/zuul-from-scratch.rst
@@ -0,0 +1,505 @@
+Zuul From Scratch
+=================
+
+.. note:: This is a work in progress that attempts to walk through all
+ of the steps needed to run Zuul on a cloud server against
+ GitHub projects.
+
+Environment Setup
+-----------------
+
+We're going to be using Fedora 27 on a cloud server for this
+installation.
+
+Login to your environment
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since we'll be using a cloud image for Fedora 27, our login user will
+be ``fedora`` which will also be the staging user for installation of
+Zuul and Nodepool.
+
+To get started, ssh to your machine as the ``fedora`` user::
+
+ ssh fedora@<ip_address>
+
+Environment Setup
+~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo dnf update -y
+ sudo systemctl reboot
+ sudo dnf install git redhat-lsb-core python3 python3-pip python3-devel make gcc openssl-devel python-openstackclient -y
+ pip3 install --user bindep
+
+Zuul and Nodepool Installation
+------------------------------
+
+Install Zookeeper
+~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo dnf install zookeeper -y
+
+Install Nodepool
+~~~~~~~~~~~~~~~~
+
+::
+
+ sudo adduser --system nodepool --home-dir /var/lib/nodepool --create-home
+ git clone https://git.openstack.org/openstack-infra/nodepool
+ cd nodepool/
+ sudo dnf -y install $(bindep -b)
+ sudo pip3 install .
+
+Install Zuul
+~~~~~~~~~~~~
+
+::
+
+ sudo adduser --system zuul --home-dir /var/lib/zuul --create-home
+ git clone https://git.openstack.org/openstack-infra/zuul
+ cd zuul/
+ sudo dnf install $(bindep -b) -y
+ sudo pip3 install git+https://github.com/sigmavirus24/github3.py.git@develop#egg=Github3.py
+ sudo pip3 install .
+
+Setup
+-----
+
+Zookeeper Setup
+~~~~~~~~~~~~~~~
+
+.. TODO recommended reading for zk clustering setup
+
+::
+
+ sudo bash -c 'echo "1" > /etc/zookeeper/myid'
+ sudo bash -c 'echo "tickTime=2000
+ dataDir=/var/lib/zookeeper
+ clientPort=2181" > /etc/zookeeper/zoo.cfg'
+
+Nodepool Setup
+~~~~~~~~~~~~~~
+
+Before starting on this, you need to download your `openrc`
+configuration from your OpenStack cloud. Put it on your server in the
+fedora user's home directory. It should be called
+``<username>-openrc.sh``. Once that is done, create a new keypair
+that will be installed when instantiating the servers::
+
+ cd ~
+ source <username>-openrc.sh # this will prompt for password - enter it
+ umask 0066
+
+ ssh-keygen -t rsa -b 2048 -f nodepool_rsa # don't enter a passphrase
+ openstack keypair create --public-key nodepool_rsa.pub nodepool
+
+We'll use the private key later wheen configuring Zuul. In the same
+session, configure nodepool to talk to your cloud::
+
+ sudo mkdir -p ~nodepool/.config/openstack
+ cat > clouds.yaml <<EOF
+ clouds:
+ mycloud:
+ auth:
+ username: $OS_USERNAME
+ password: $OS_PASSWORD
+ project_name: ${OS_PROJECT_NAME:-$OS_TENANT_NAME}
+ auth_url: $OS_AUTH_URL
+ region_name: $OS_REGION_NAME
+ EOF
+ sudo mv clouds.yaml ~nodepool/.config/openstack/
+ sudo chown -R nodepool.nodepool ~nodepool/.config
+ umask 0002
+
+Once you've written out the file, double check all the required fields have been filled out.
+
+::
+
+ sudo mkdir /etc/nodepool/
+ sudo mkdir /var/log/nodepool
+ sudo chgrp -R nodepool /var/log/nodepool/
+ sudo chmod 775 /var/log/nodepool/
+
+Nodepool Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+Inputs needed for this file:
+
+* cloud name / region name - from clouds.yaml
+* flavor-name
+* image-name - from your cloud
+
+::
+
+ sudo bash -c "cat >/etc/nodepool/nodepool.yaml <<EOF
+ zookeeper-servers:
+ - host: localhost
+ port: 2181
+
+ providers:
+ - name: myprovider # this is a nodepool identifier for this cloud provider (cloud+region combo)
+ region-name: regionOne # this needs to match the region name in clouds.yaml but is only needed if there is more than one region
+ cloud: mycloud # This needs to match the name in clouds.yaml
+ cloud-images:
+ - name: centos-7 # Defines a cloud-image for nodepool
+ image-name: CentOS-7-x86_64-GenericCloud-1706 # name of image from cloud
+ username: centos # The user Zuul should log in as
+ pools:
+ - name: main
+ max-servers: 4 # nodepool will never create more than this many servers
+ labels:
+ - name: centos-7-small # defines label that will be used to get one of these in a job
+ flavor-name: 'm1.small' # name of flavor from cloud
+ cloud-image: centos-7 # matches name from cloud-images
+ key-name: nodepool # name of the keypair to use for authentication
+
+ labels:
+ - name: centos-7-small # defines label that will be used in jobs
+ min-ready: 2 # nodepool will always keep this many booted and ready to go
+ EOF"
+
+.. warning::
+
+ `min-ready:2` may incur costs in your cloud provider
+
+
+Zuul Setup
+~~~~~~~~~~
+
+::
+
+ sudo mkdir /etc/zuul/
+ sudo mkdir /var/log/zuul/
+ sudo chown zuul.zuul /var/log/zuul/
+ sudo mkdir /var/lib/zuul/.ssh
+ sudo chmod 0700 /var/lib/zuul/.ssh
+ sudo mv nodepool_rsa /var/lib/zuul/.ssh
+ sudo chown -R zuul.zuul /var/lib/zuul/.ssh
+
+Zuul Configuration
+~~~~~~~~~~~~~~~~~~
+
+Write the Zuul config file. Note that this configures Zuul's web
+server to listen on all public addresses. This is so that Zuul may
+receive webhook events from GitHub. You may wish to proxy this or
+further restrict public access.
+
+::
+
+ sudo bash -c "cat > /etc/zuul/zuul.conf <<EOF
+ [gearman]
+ server=127.0.0.1
+
+ [gearman_server]
+ start=true
+
+ [executor]
+ private_key_file=/home/zuul/.ssh/nodepool_rsa
+
+ [web]
+ listen_address=0.0.0.0
+
+ [scheduler]
+ tenant_config=/etc/zuul/main.yaml
+ EOF"
+
+ sudo bash -c "cat > /etc/zuul/main.yaml <<EOF
+ - tenant:
+ name: quickstart
+ EOF"
+
+Service Management
+------------------
+
+Zookeeper Service Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo systemctl start zookeeper.service
+
+::
+
+ sudo systemctl status zookeeper.service
+ ● zookeeper.service - Apache ZooKeeper
+ Loaded: loaded (/usr/lib/systemd/system/zookeeper.service; disabled; vendor preset: disabled)
+ Active: active (running) since Wed 2018-01-03 14:53:47 UTC; 5s ago
+ Process: 4153 ExecStart=/usr/bin/zkServer.sh start zoo.cfg (code=exited, status=0/SUCCESS)
+ Main PID: 4160 (java)
+ Tasks: 17 (limit: 4915)
+ CGroup: /system.slice/zookeeper.service
+ └─4160 java -Dzookeeper.log.dir=/var/log/zookeeper -Dzookeeper.root.logger=INFO,CONSOLE -cp /usr/share/java/
+
+::
+
+ sudo systemctl enable zookeeper.service
+
+
+Nodepool Service Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ sudo bash -c "cat > /etc/systemd/system/nodepool-launcher.service <<EOF
+ [Unit]
+ Description=Nodepool Launcher Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ # Options to pass to nodepool-launcher.
+ Group=nodepool
+ User=nodepool
+ RuntimeDirectory=nodepool
+ ExecStart=/usr/local/bin/nodepool-launcher
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo chmod 0644 /etc/systemd/system/nodepool-launcher.service
+ sudo systemctl daemon-reload
+ sudo systemctl start nodepool-launcher.service
+ sudo systemctl status nodepool-launcher.service
+ sudo systemctl enable nodepool-launcher.service
+
+Zuul Service Management
+~~~~~~~~~~~~~~~~~~~~~~~
+::
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-scheduler.service <<EOF
+ [Unit]
+ Description=Zuul Scheduler Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-scheduler
+ ExecStop=/usr/local/bin/zuul-scheduler stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-executor.service <<EOF
+ [Unit]
+ Description=Zuul Executor Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-executor
+ ExecStop=/usr/local/bin/zuul-executor stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo bash -c "cat > /etc/systemd/system/zuul-web.service <<EOF
+ [Unit]
+ Description=Zuul Web Service
+ After=syslog.target network.target
+
+ [Service]
+ Type=simple
+ Group=zuul
+ User=zuul
+ RuntimeDirectory=zuul
+ ExecStart=/usr/local/bin/zuul-web
+ ExecStop=/usr/local/bin/zuul-web stop
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF"
+
+ sudo systemctl daemon-reload
+ sudo systemctl start zuul-scheduler.service
+ sudo systemctl status zuul-scheduler.service
+ sudo systemctl enable zuul-scheduler.service
+ sudo systemctl start zuul-executor.service
+ sudo systemctl status zuul-executor.service
+ sudo systemctl enable zuul-executor.service
+ sudo systemctl start zuul-web.service
+ sudo systemctl status zuul-web.service
+ sudo systemctl enable zuul-web.service
+
+Use Zuul Jobs
+-------------
+
+Add to ``/etc/zuul/zuul.conf``::
+
+ sudo bash -c "cat >> /etc/zuul/zuul.conf <<EOF
+
+ [connection zuul-git]
+ driver=git
+ baseurl=https://git.openstack.org/
+ EOF"
+
+Restart executor and scheduler::
+
+ sudo systemctl restart zuul-executor.service
+ sudo systemctl restart zuul-scheduler.service
+
+Configure GitHub
+----------------
+
+You'll need an organization in Github for this, so create one if you
+haven't already. In this example we will use `my-org`.
+
+.. NOTE Duplicate content here and in drivers/github.rst. Keep them
+ in sync.
+
+Create a `GitHub application
+<https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/registering-github-apps/>`_:
+
+* Go to your organization settings page to create the application, e.g.:
+ https://github.com/organizations/my-org/settings/apps/new
+* Set GitHub App name to "my-org-zuul"
+* Set Setup URL to your setup documentation, when users install the application
+ they are redirected to this url
+* Set Webhook URL to
+ ``http://<IP ADDRESS>/connection/github/payload``.
+* Create a Webhook secret, and record it for later use
+* Set permissions:
+
+ * Commit statuses: Read & Write
+ * Issues: Read & Write
+ * Pull requests: Read & Write
+ * Repository contents: Read & Write (write to let zuul merge change)
+ * Repository administration: Read
+
+* Set events subscription:
+
+ * Label
+ * Status
+ * Issue comment
+ * Issues
+ * Pull request
+ * Pull request review
+ * Pull request review comment
+ * Commit comment
+ * Create
+ * Push
+ * Release
+
+* Set Where can this GitHub App be installed to "Any account"
+* Create the App
+* Generate a Private key in the app settings page and save the file for later
+
+.. TODO See if we can script this using GitHub API
+
+Go back to the `General` settings page for the app,
+https://github.com/organizations/my-org/settings/apps/my-org-zuul
+and look for the app `ID` number, under the `About` section.
+
+Edit ``/etc/zuul/zuul.conf`` to add the following::
+
+ [connection github]
+ driver=github
+ app_id=<APP ID NUMBER>
+ app_key=/etc/zuul/github.pem
+ webhook_token=<WEBHOOK TOKEN>
+
+Upload the private key which was generated earlier, and save it in
+``/etc/zuul/github.pem``.
+
+Restart all of Zuul::
+
+ sudo systemctl restart zuul-executor.service
+ sudo systemctl restart zuul-web.service
+ sudo systemctl restart zuul-scheduler.service
+
+Go to the `Advanced` tab for the app in GitHub,
+https://github.com/organizations/my-org/settings/apps/my-org-zuul/advanced,
+and look for the initial ping from the app. It probably wasn't
+delivered since Zuul wasn't configured at the time, so click
+``Resend`` and verify that it is delivered now that Zuul is
+configured.
+
+Visit the public app page on GitHub,
+https://github.com/apps/my-org-zuul, and install the app into your org.
+
+Create two new repositories in your org. One will hold the
+configuration for this tenant in Zuul, the other should be a normal
+project repo to use for testing. We'll call them `zuul-test-config`
+and `zuul-test`, respectively.
+
+Edit ``/etc/zuul/main.yaml`` so that it looks like this::
+
+ - tenant:
+ name: quickstart
+ source:
+ zuul-git:
+ config-projects:
+ - openstack-infra/zuul-base-jobs
+ untrusted-projects:
+ - openstack-infra/zuul-jobs
+ github:
+ config-projects:
+ - my-org/zuul-test-config
+ untrusted-projects:
+ - my-org/zuul-test
+
+The first section, under 'zuul-git' imports the "standard library" of
+Zuul jobs, a collection of jobs that can be used by any Zuul
+installation.
+
+The second section is your GitHub configuration.
+
+After updating the file, restart the Zuul scheduler::
+
+ sudo systemctl restart zuul-scheduler.service
+
+Add an initial pipeline configuration to the `zuul-test-config`
+repository. Inside that project, create a ``zuul.yaml`` file with the
+following contents::
+
+ - pipeline:
+ name: check
+ description: |
+ Newly opened pull requests enter this pipeline to receive an
+ initial verification
+ manager: independent
+ trigger:
+ github:
+ - event: pull_request
+ action:
+ - opened
+ - changed
+ - reopened
+ - event: pull_request
+ action: comment
+ comment: (?i)^\s*recheck\s*$
+ start:
+ github:
+ status: pending
+ comment: false
+ success:
+ github:
+ status: 'success'
+ failure:
+ github:
+ status: 'failure'
+
+Merge that commit into the repository.
+
+In the `zuul-test` project, create a `.zuul.yaml` file with the
+following contents::
+
+ - project:
+ check:
+ jobs:
+ - noop
+
+Open a new pull request with that commit against the `zuul-test`
+project and verify that Zuul reports a successful run of the `noop`
+job.
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 18d28c4..0932c56 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -546,6 +546,12 @@
from this job. Once this is set to ``true`` it cannot be reset to
``false``.
+ .. attr:: abstract
+ :default: false
+
+ To indicate a job is not intended to be run directly, but
+ instead must be inherited from, set this attribute to ``true``.
+
.. attr:: success-message
:default: SUCCESS
@@ -704,6 +710,21 @@
timeout is supplied, the job may run indefinitely. Supplying a
timeout is highly recommended.
+ This timeout only applies to the pre-run and run playbooks in a
+ job.
+
+ .. attr:: post-timeout
+
+ The time in seconds that each post playbook should be allowed to run
+ before it is automatically aborted and failure is reported. If no
+ post-timeout is supplied, the job may run indefinitely. Supplying a
+ post-timeout is highly recommended.
+
+ The post-timeout is handled separately from the above timeout because
+ the post playbooks are typically where you will copy jobs logs.
+ In the event of the pre-run or run playbooks timing out we want to
+ do our best to copy the job logs in the post-run playbooks.
+
.. attr:: attempts
:default: 3
diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst
index 820e316..4e1c33d 100644
--- a/doc/source/user/jobs.rst
+++ b/doc/source/user/jobs.rst
@@ -289,6 +289,10 @@
The job timeout, in seconds.
+ .. var:: post_timeout
+
+ The post-run playbook timeout, in seconds.
+
.. var:: jobtags
A list of tags associated with the job. Not to be confused with
diff --git a/tests/base.py b/tests/base.py
index 96491f9..be8c17b 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -529,6 +529,24 @@
}
return event
+ def getFakeBranchDeletedEvent(self, project, branch):
+ oldrev = '4abd38457c2da2a72d4d030219ab180ecdb04bf0'
+ newrev = 40 * '0'
+
+ event = {
+ "type": "ref-updated",
+ "submitter": {
+ "name": "User Name",
+ },
+ "refUpdate": {
+ "oldRev": oldrev,
+ "newRev": newrev,
+ "refName": 'refs/heads/' + branch,
+ "project": project,
+ }
+ }
+ return event
+
def review(self, project, changeid, message, action):
number, ps = changeid.split(',')
change = self.changes[int(number)]
@@ -941,7 +959,7 @@
log = logging.getLogger("zuul.test.FakeGithubConnection")
def __init__(self, driver, connection_name, connection_config, rpcclient,
- changes_db=None, upstream_root=None):
+ changes_db=None, upstream_root=None, git_url_with_auth=False):
super(FakeGithubConnection, self).__init__(driver, connection_name,
connection_config)
self.connection_name = connection_name
@@ -953,6 +971,7 @@
self.merge_not_allowed_count = 0
self.reports = []
self.github_client = tests.fakegithub.FakeGithub(changes_db)
+ self.git_url_with_auth = git_url_with_auth
self.rpcclient = rpcclient
def getGithubClient(self,
@@ -1045,7 +1064,13 @@
return 'read'
def getGitUrl(self, project):
- return os.path.join(self.upstream_root, str(project))
+ if self.git_url_with_auth:
+ auth_token = ''.join(
+ random.choice(string.ascii_lowercase) for x in range(8))
+ prefix = 'file://x-access-token:%s@' % auth_token
+ else:
+ prefix = ''
+ return prefix + os.path.join(self.upstream_root, str(project))
def real_getGitUrl(self, project):
return super(FakeGithubConnection, self).getGitUrl(project)
@@ -1432,6 +1457,7 @@
def __init__(self, use_ssl=False):
self.hold_jobs_in_queue = False
self.hold_merge_jobs_in_queue = False
+ self.jobs_history = []
if use_ssl:
ssl_ca = os.path.join(FIXTURE_DIR, 'gearman/root-ca.pem')
ssl_cert = os.path.join(FIXTURE_DIR, 'gearman/server.pem')
@@ -1448,6 +1474,7 @@
def getJobForConnection(self, connection, peek=False):
for job_queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in job_queue:
+ self.jobs_history.append(job)
if not hasattr(job, 'waiting'):
if job.name.startswith(b'executor:execute'):
job.waiting = self.hold_jobs_in_queue
@@ -1905,6 +1932,7 @@
run_ansible = False
create_project_keys = False
use_ssl = False
+ git_url_with_auth = False
def _startMerger(self):
self.merge_server = zuul.merger.server.MergeServer(self.config,
@@ -2074,10 +2102,12 @@
def getGithubConnection(driver, name, config):
server = config.get('server', 'github.com')
db = self.github_changes_dbs.setdefault(server, {})
- con = FakeGithubConnection(driver, name, config,
- self.rpcclient,
- changes_db=db,
- upstream_root=self.upstream_root)
+ con = FakeGithubConnection(
+ driver, name, config,
+ self.rpcclient,
+ changes_db=db,
+ upstream_root=self.upstream_root,
+ git_url_with_auth=self.git_url_with_auth)
self.event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
diff --git a/tests/fixtures/config/abstract/git/common-config/playbooks/base.yaml b/tests/fixtures/config/abstract/git/common-config/playbooks/base.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/abstract/git/common-config/playbooks/base.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/abstract/git/common-config/zuul.yaml b/tests/fixtures/config/abstract/git/common-config/zuul.yaml
new file mode 100644
index 0000000..4aeb947
--- /dev/null
+++ b/tests/fixtures/config/abstract/git/common-config/zuul.yaml
@@ -0,0 +1,25 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/base.yaml
+
+- job:
+ name: job-abstract
+ abstract: true
+
+- job:
+ name: job-child
+ parent: job-abstract
diff --git a/tests/fixtures/config/abstract/git/org_project/zuul.yaml b/tests/fixtures/config/abstract/git/org_project/zuul.yaml
new file mode 100644
index 0000000..cf635e8
--- /dev/null
+++ b/tests/fixtures/config/abstract/git/org_project/zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+ name: org/project
+ check:
+ jobs: []
diff --git a/tests/fixtures/config/abstract/main.yaml b/tests/fixtures/config/abstract/main.yaml
new file mode 100644
index 0000000..208e274
--- /dev/null
+++ b/tests/fixtures/config/abstract/main.yaml
@@ -0,0 +1,8 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index 7637a08..13a19da 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -99,6 +99,12 @@
- job:
parent: python27
+ name: post-timeout
+ post-run: playbooks/timeout.yaml
+ post-timeout: 1
+
+- job:
+ parent: python27
name: check-vars
run: playbooks/check-vars.yaml
nodeset:
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index 142625f..e332924 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -18,5 +18,6 @@
- check-hostvars
- check-secret-names
- timeout
+ - post-timeout
- hello-world
- failpost
diff --git a/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml b/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/common-config/playbooks/base.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml b/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml
new file mode 100644
index 0000000..04091a7
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/common-config/zuul.yaml
@@ -0,0 +1,17 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/base.yaml
diff --git a/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml b/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml
new file mode 100644
index 0000000..cf635e8
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/org_project/zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+ name: org/project
+ check:
+ jobs: []
diff --git a/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml b/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml
new file mode 100644
index 0000000..1fc35b5
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/git/org_project1/zuul.yaml
@@ -0,0 +1,3 @@
+- project:
+ check:
+ jobs: []
diff --git a/tests/fixtures/config/branch-deletion/main.yaml b/tests/fixtures/config/branch-deletion/main.yaml
new file mode 100644
index 0000000..9ffae3d
--- /dev/null
+++ b/tests/fixtures/config/branch-deletion/main.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
+ - org/project1
+
diff --git a/tests/fixtures/config/tenant-parser/groups4.yaml b/tests/fixtures/config/tenant-parser/groups4.yaml
new file mode 100644
index 0000000..3512673
--- /dev/null
+++ b/tests/fixtures/config/tenant-parser/groups4.yaml
@@ -0,0 +1,11 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - include: []
+ projects:
+ - org/project1
+ - org/project2
diff --git a/tests/nodepool/test_nodepool_integration.py b/tests/nodepool/test_nodepool_integration.py
index 9c87a10..bd22da3 100644
--- a/tests/nodepool/test_nodepool_integration.py
+++ b/tests/nodepool/test_nodepool_integration.py
@@ -30,6 +30,7 @@
def setUp(self):
super(TestNodepoolIntegration, self).setUp()
+ self.statsd = None
self.zk = zuul.zk.ZooKeeper()
self.addCleanup(self.zk.disconnect)
self.zk.connect('localhost:2181')
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index f7d580c..49220f2 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -213,6 +213,33 @@
project2_config.pipelines['check'].job_list.jobs)
+class TestTenantGroups4(TenantParserTestCase):
+ tenant_config_file = 'config/tenant-parser/groups4.yaml'
+
+ def test_tenant_groups(self):
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(['common-config'],
+ [x.name for x in tenant.config_projects])
+ self.assertEqual(['org/project1', 'org/project2'],
+ [x.name for x in tenant.untrusted_projects])
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(set([]),
+ tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(set([]),
+ tpc.load_classes)
+ # Check that only one merger:cat job was requested
+ # org/project1 and org/project2 have an empty load_classes
+ cat_jobs = [job for job in self.gearman_server.jobs_history
+ if job.name == b'merger:cat']
+ self.assertEqual(1, len(cat_jobs))
+
+
class TestTenantUnprotectedBranches(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/unprotected-branches.yaml'
diff --git a/tests/unit/test_merger_repo.py b/tests/unit/test_merger_repo.py
index fb2f199..984644f 100644
--- a/tests/unit/test_merger_repo.py
+++ b/tests/unit/test_merger_repo.py
@@ -22,7 +22,7 @@
import testtools
from zuul.merger.merger import Repo
-from tests.base import ZuulTestCase, FIXTURE_DIR
+from tests.base import ZuulTestCase, FIXTURE_DIR, simple_layout
class TestMergerRepo(ZuulTestCase):
@@ -116,3 +116,63 @@
# This is created on the second fetch
self.assertTrue(os.path.exists(os.path.join(
self.workspace_root, 'stamp2')))
+
+
+class TestMergerWithAuthUrl(ZuulTestCase):
+ config_file = 'zuul-github-driver.conf'
+
+ git_url_with_auth = True
+
+ @simple_layout('layouts/merging-github.yaml', driver='github')
+ def test_changing_url(self):
+ """
+ This test checks that if getGitUrl returns different urls for the same
+ repo (which happens if an access token is part of the url) then the
+ remote urls are changed in the merger accordingly. This tests directly
+ the merger.
+ """
+
+ merger = self.executor_server.merger
+ repo = merger.getRepo('github', 'org/project')
+ first_url = repo.remote_url
+
+ repo = merger.getRepo('github', 'org/project')
+ second_url = repo.remote_url
+
+ # the urls should differ
+ self.assertNotEqual(first_url, second_url)
+
+ @simple_layout('layouts/merging-github.yaml', driver='github')
+ def test_changing_url_end_to_end(self):
+ """
+ This test checks that if getGitUrl returns different urls for the same
+ repo (which happens if an access token is part of the url) then the
+ remote urls are changed in the merger accordingly. This is an end to
+ end test.
+ """
+
+ A = self.fake_github.openFakePullRequest('org/project', 'master',
+ 'PR title')
+ self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
+ self.waitUntilSettled()
+ self.assertTrue(A.is_merged)
+
+ # get remote url of org/project in merger
+ repo = self.executor_server.merger.repos.get('github.com/org/project')
+ self.assertIsNotNone(repo)
+ git_repo = git.Repo(repo.local_path)
+ first_url = list(git_repo.remotes[0].urls)[0]
+
+ B = self.fake_github.openFakePullRequest('org/project', 'master',
+ 'PR title')
+ self.fake_github.emitEvent(B.getCommentAddedEvent('merge me again'))
+ self.waitUntilSettled()
+ self.assertTrue(B.is_merged)
+
+ repo = self.executor_server.merger.repos.get('github.com/org/project')
+ self.assertIsNotNone(repo)
+ git_repo = git.Repo(repo.local_path)
+ second_url = list(git_repo.remotes[0].urls)[0]
+
+ # the urls should differ
+ self.assertNotEqual(first_url, second_url)
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 44eda82..e36c8f6 100755
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -74,44 +74,43 @@
class TestProtected(ZuulTestCase):
-
tenant_config_file = 'config/protected/main.yaml'
def test_protected_ok(self):
- # test clean usage of final parent job
- in_repo_conf = textwrap.dedent(
- """
- - job:
- name: job-protected
- protected: true
- run: playbooks/job-protected.yaml
+ # test clean usage of final parent job
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: job-protected
+ protected: true
+ run: playbooks/job-protected.yaml
- - project:
- name: org/project
- check:
- jobs:
- - job-child-ok
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - job-child-ok
- - job:
- name: job-child-ok
- parent: job-protected
+ - job:
+ name: job-child-ok
+ parent: job-protected
- - project:
- name: org/project
- check:
- jobs:
- - job-child-ok
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - job-child-ok
- """)
+ """)
- file_dict = {'zuul.yaml': in_repo_conf}
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
- files=file_dict)
- self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
- self.waitUntilSettled()
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
- self.assertEqual(A.reported, 1)
- self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
def test_protected_reset(self):
# try to reset protected flag
@@ -177,6 +176,47 @@
"and cannot be inherited from other projects.", A.messages[0])
+class TestAbstract(ZuulTestCase):
+ tenant_config_file = 'config/abstract/main.yaml'
+
+ def test_abstract_fail(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - job-abstract
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
+ self.assertIn('may not be directly run', A.messages[0])
+
+ def test_child_of_abstract(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - job-child
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
+
+
class TestFinal(ZuulTestCase):
tenant_config_file = 'config/final/main.yaml'
@@ -261,6 +301,106 @@
self.assertIn('Unable to modify final job', A.messages[0])
+class TestBranchDeletion(ZuulTestCase):
+ tenant_config_file = 'config/branch-deletion/main.yaml'
+
+ def test_branch_delete(self):
+ # This tests a tenant reconfiguration on deleting a branch
+ # *after* an earlier failed tenant reconfiguration. This
+ # ensures that cached data are appropriately removed, even if
+ # we are recovering from an invalid config.
+ self.create_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - nonexistent-job
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
+ files=file_dict)
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.delete_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchDeletedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - base
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='base', result='SUCCESS', changes='2,1')])
+
+ def test_branch_delete_full_reconfiguration(self):
+ # This tests a full configuration after deleting a branch
+ # *after* an earlier failed tenant reconfiguration. This
+ # ensures that cached data are appropriately removed, even if
+ # we are recovering from an invalid config.
+ self.create_branch('org/project', 'stable/queens')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'stable/queens'))
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - nonexistent-job
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
+ files=file_dict)
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.delete_branch('org/project', 'stable/queens')
+ self.sched.reconfigure(self.config)
+ self.waitUntilSettled()
+
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ check:
+ jobs:
+ - base
+ """)
+
+ file_dict = {'zuul.yaml': in_repo_conf}
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+ files=file_dict)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='base', result='SUCCESS', changes='2,1')])
+
+
class TestBranchTag(ZuulTestCase):
tenant_config_file = 'config/branch-tag/main.yaml'
@@ -1908,6 +2048,12 @@
build_timeout = self.getJobFromHistory('timeout')
with self.jobLog(build_timeout):
self.assertEqual(build_timeout.result, 'TIMED_OUT')
+ post_flag_path = os.path.join(self.test_root, build_timeout.uuid +
+ '.post.flag')
+ self.assertTrue(os.path.exists(post_flag_path))
+ build_post_timeout = self.getJobFromHistory('post-timeout')
+ with self.jobLog(build_post_timeout):
+ self.assertEqual(build_post_timeout.result, 'POST_FAILURE')
build_faillocal = self.getJobFromHistory('faillocal')
with self.jobLog(build_faillocal):
self.assertEqual(build_faillocal.result, 'FAILURE')
diff --git a/zuul/ansible/library/zuul_console.py b/zuul/ansible/library/zuul_console.py
index f84766d..6703cc1 100644
--- a/zuul/ansible/library/zuul_console.py
+++ b/zuul/ansible/library/zuul_console.py
@@ -277,7 +277,7 @@
inode = get_inode()
if not inode:
module.fail_json(
- "Could not find inode for port",
+ msg="Could not find inode for port",
exceptions=[])
pid, exceptions = get_pid_from_inode(inode)
diff --git a/zuul/cmd/__init__.py b/zuul/cmd/__init__.py
index b299219..6ab7598 100755
--- a/zuul/cmd/__init__.py
+++ b/zuul/cmd/__init__.py
@@ -59,9 +59,12 @@
thread = threads.get(thread_id)
if thread:
thread_name = thread.name
+ thread_is_daemon = str(thread.daemon)
else:
thread_name = thread.ident
- log_str += "Thread: %s %s\n" % (thread_id, thread_name)
+ thread_is_daemon = '(Unknown)'
+ log_str += "Thread: %s %s d: %s\n"\
+ % (thread_id, thread_name, thread_is_daemon)
log_str += "".join(traceback.format_stack(stack_frame))
log.debug(log_str)
except Exception:
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index 68c9000..a3a53cf 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -159,6 +159,7 @@
self.log.exception("Error starting Zuul:")
# TODO(jeblair): If we had all threads marked as daemon,
# we might be able to have a nicer way of exiting here.
+ self.sched.stop()
sys.exit(1)
signal.signal(signal.SIGHUP, self.reconfigure_handler)
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 0b78993..4745144 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -384,8 +384,12 @@
class NodeSetParser(object):
- @staticmethod
- def getSchema(anonymous=False):
+ def __init__(self, tenant, layout):
+ self.log = logging.getLogger("zuul.NodeSetParser")
+ self.tenant = tenant
+ self.layout = layout
+
+ def getSchema(self, anonymous=False):
node = {vs.Required('name'): to_list(str),
vs.Required('label'): str,
}
@@ -404,9 +408,8 @@
nodeset[vs.Required('name')] = str
return vs.Schema(nodeset)
- @staticmethod
- def fromYaml(conf, anonymous=False):
- NodeSetParser.getSchema(anonymous)(conf)
+ def fromYaml(self, conf, anonymous=False):
+ self.getSchema(anonymous)(conf)
ns = model.NodeSet(conf.get('name'), conf.get('_source_context'))
node_names = set()
group_names = set()
@@ -432,8 +435,13 @@
class SecretParser(object):
- @staticmethod
- def getSchema():
+ def __init__(self, tenant, layout):
+ self.log = logging.getLogger("zuul.SecretParser")
+ self.tenant = tenant
+ self.layout = layout
+ self.schema = self.getSchema()
+
+ def getSchema(self):
data = {str: vs.Any(str, EncryptedPKCS1_OAEP)}
secret = {vs.Required('name'): str,
@@ -444,10 +452,9 @@
return vs.Schema(secret)
- @staticmethod
- def fromYaml(layout, conf):
+ def fromYaml(self, conf):
with configuration_exceptions('secret', conf):
- SecretParser.getSchema()(conf)
+ self.schema(conf)
s = model.Secret(conf['name'], conf['_source_context'])
s.secret_data = conf['data']
return s
@@ -474,6 +481,7 @@
# Attributes of a job that can also be used in Project and ProjectTemplate
job_attributes = {'parent': vs.Any(str, None),
'final': bool,
+ 'abstract': bool,
'protected': bool,
'failure-message': str,
'success-message': str,
@@ -490,6 +498,7 @@
# validation happens in NodeSetParser
'nodeset': vs.Any(dict, str),
'timeout': int,
+ 'post-timeout': int,
'attempts': int,
'pre-run': to_list(str),
'post-run': to_list(str),
@@ -516,8 +525,10 @@
simple_attributes = [
'final',
+ 'abstract',
'protected',
'timeout',
+ 'post-timeout',
'workspace',
'voting',
'hold-following-changes',
@@ -628,6 +639,10 @@
int(conf['timeout']) > tenant.max_job_timeout:
raise MaxTimeoutError(job, tenant)
+ if conf.get('post-timeout') and tenant.max_job_timeout != -1 and \
+ int(conf['post-timeout']) > tenant.max_job_timeout:
+ raise MaxTimeoutError(job, tenant)
+
if 'post-review' in conf:
if conf['post-review']:
job.post_review = True
@@ -674,6 +689,7 @@
if k in conf:
setattr(job, a, conf[k])
if 'nodeset' in conf:
+ nodeset_parser = NodeSetParser(tenant, layout)
conf_nodeset = conf['nodeset']
if isinstance(conf_nodeset, str):
# This references an existing named nodeset in the layout.
@@ -681,7 +697,7 @@
if ns is None:
raise NodesetNotFoundError(conf_nodeset)
else:
- ns = NodeSetParser.fromYaml(conf_nodeset, anonymous=True)
+ ns = nodeset_parser.fromYaml(conf_nodeset, anonymous=True)
if tenant.max_nodes_per_job != -1 and \
len(ns) > tenant.max_nodes_per_job:
raise MaxNodeError(job, tenant)
@@ -1466,6 +1482,11 @@
jobs.append(job)
for project in untrusted_projects:
+ tpc = tenant.project_configs[project.canonical_name]
+ # If all config classes are excluded then does not request a
+ # getFiles jobs.
+ if not tpc.load_classes:
+ continue
# If we have cached data (this is a reconfiguration) use it.
if cached and project.unparsed_config:
jobs.append(CachedDataJob(False, project))
@@ -1556,8 +1577,7 @@
project.unparsed_config = data
for project, branch_config in \
new_project_unparsed_branch_config.items():
- for branch, data in branch_config.items():
- project.unparsed_branch_config[branch] = data
+ project.unparsed_branch_config = branch_config
return config_projects_config, untrusted_projects_config
@staticmethod
@@ -1603,20 +1623,22 @@
layout, connections,
scheduler, config_pipeline))
+ nodeset_parser = NodeSetParser(tenant, layout)
for config_nodeset in data.nodesets:
classes = TenantParser._getLoadClasses(tenant, config_nodeset)
if 'nodeset' not in classes:
continue
with configuration_exceptions('nodeset', config_nodeset):
- layout.addNodeSet(NodeSetParser.fromYaml(
+ layout.addNodeSet(nodeset_parser.fromYaml(
config_nodeset))
+ secret_parser = SecretParser(tenant, layout)
for config_secret in data.secrets:
classes = TenantParser._getLoadClasses(tenant, config_secret)
if 'secret' not in classes:
continue
with configuration_exceptions('secret', config_secret):
- layout.addSecret(SecretParser.fromYaml(layout, config_secret))
+ layout.addSecret(secret_parser.fromYaml(config_secret))
for config_job in data.jobs:
classes = TenantParser._getLoadClasses(tenant, config_job)
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index 35f3199..fe0f28d 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -185,6 +185,7 @@
params = dict()
params['job'] = job.name
params['timeout'] = job.timeout
+ params['post_timeout'] = job.post_timeout
params['items'] = merger_items
params['projects'] = []
if hasattr(item.change, 'branch'):
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index de85647..d140a00 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -783,7 +783,16 @@
return data
def doMergeChanges(self, merger, items, repo_state):
- ret = merger.mergeChanges(items, repo_state=repo_state)
+ try:
+ ret = merger.mergeChanges(items, repo_state=repo_state)
+ except ValueError as e:
+ # Return ABORTED so that we'll try again. At this point all of
+ # the refs we're trying to merge should be valid refs. If we
+ # can't fetch them, it should resolve itself.
+ self.log.exception("Could not fetch refs to merge from remote")
+ result = dict(result='ABORTED')
+ self.job.sendWorkComplete(json.dumps(result))
+ return False
if not ret: # merge conflict
result = dict(result='MERGER_FAILURE')
if self.executor_server.statsd:
@@ -875,8 +884,10 @@
success = False
self.started = True
time_started = time.time()
- # timeout value is total job timeout or put another way
- # the cummulative time that pre, run, and post can consume.
+ # timeout value is "total" job timeout which accounts for
+ # pre-run and run playbooks. post-run is different because
+ # it is used to copy out job logs and we want to do our best
+ # to copy logs even when the job has timed out.
job_timeout = args['timeout']
for index, playbook in enumerate(self.jobdir.pre_playbooks):
# TODOv3(pabelanger): Implement pre-run timeout setting.
@@ -911,11 +922,15 @@
# run it again.
return None
+ post_timeout = args['post_timeout']
for index, playbook in enumerate(self.jobdir.post_playbooks):
- # TODOv3(pabelanger): Implement post-run timeout setting.
- ansible_timeout = self.getAnsibleTimeout(time_started, job_timeout)
+ # Post timeout operates a little differently to the main job
+ # timeout. We give each post playbook the full post timeout to
+ # do its job because post is where you'll often record job logs
+ # which are vital to understanding why timeouts have happened in
+ # the first place.
post_status, post_code = self.runAnsiblePlaybook(
- playbook, ansible_timeout, success, phase='post', index=index)
+ playbook, post_timeout, success, phase='post', index=index)
if post_status == self.RESULT_ABORTED:
return 'ABORTED'
if post_status != self.RESULT_NORMAL or post_code != 0:
@@ -1850,7 +1865,7 @@
if self.statsd:
base_key = 'zuul.executor.%s' % self.hostname
self.statsd.gauge(base_key + '.load_average', 0)
- self.statsd.gauge(base_key + '.pct_available_ram', 0)
+ self.statsd.gauge(base_key + '.pct_used_ram', 0)
self.statsd.gauge(base_key + '.running_builds', 0)
self.log.debug("Stopped")
@@ -2049,8 +2064,8 @@
base_key = 'zuul.executor.%s' % self.hostname
self.statsd.gauge(base_key + '.load_average',
int(load_avg * 100))
- self.statsd.gauge(base_key + '.pct_available_ram',
- int(avail_mem_pct * 100))
+ self.statsd.gauge(base_key + '.pct_used_ram',
+ int((100.0 - avail_mem_pct) * 100))
self.statsd.gauge(base_key + '.running_builds',
len(self.job_workers))
self.statsd.gauge(base_key + '.starting_builds',
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index 5e102b4..aba8645 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -79,6 +79,8 @@
self.retry_interval = retry_interval
try:
self._ensure_cloned()
+ self._git_set_remote_url(
+ git.Repo(self.local_path), self.remote_url)
except Exception:
self.log.exception("Unable to initialize repo for %s" % remote)
@@ -112,8 +114,7 @@
config_writer.set_value('user', 'name', self.username)
config_writer.write()
if rewrite_url:
- with repo.remotes.origin.config_writer as config_writer:
- config_writer.set('url', self.remote_url)
+ self._git_set_remote_url(repo, self.remote_url)
self._initialized = True
def isInitialized(self):
@@ -154,6 +155,10 @@
else:
raise
+ def _git_set_remote_url(self, repo, url):
+ with repo.remotes.origin.config_writer as config_writer:
+ config_writer.set('url', url)
+
def createRepoObject(self):
self._ensure_cloned()
repo = git.Repo(self.local_path)
@@ -350,6 +355,13 @@
repo = self.createRepoObject()
repo.delete_remote(repo.remotes[remote])
+ def setRemoteUrl(self, url):
+ if self.remote_url == url:
+ return
+ self.log.debug("Set remote url to %s" % url)
+ self.remote_url = url
+ self._git_set_remote_url(self.createRepoObject(), self.remote_url)
+
class Merger(object):
def __init__(self, working_root, connections, email, username,
@@ -397,7 +409,9 @@
url = source.getGitUrl(project)
key = '/'.join([hostname, project_name])
if key in self.repos:
- return self.repos[key]
+ repo = self.repos[key]
+ repo.setRemoteUrl(url)
+ return repo
sshkey = self.connections.connections.get(connection_name).\
connection_config.get('sshkey')
if not url:
diff --git a/zuul/model.py b/zuul/model.py
index 997b129..2ccaade 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -839,6 +839,7 @@
self.execution_attributes = dict(
parent=None,
timeout=None,
+ post_timeout=None,
variables={},
host_variables={},
group_variables={},
@@ -850,6 +851,7 @@
semaphore=None,
attempts=3,
final=False,
+ abstract=False,
protected=None,
roles=(),
required_projects={},
@@ -1055,7 +1057,7 @@
for k in self.execution_attributes:
if (other._get(k) is not None and
- k not in set(['final', 'protected'])):
+ k not in set(['final', 'abstract', 'protected'])):
if self.final:
raise Exception("Unable to modify final job %s attribute "
"%s=%s with variant %s" % (
@@ -1082,6 +1084,13 @@
if other.final != self.attributes['final']:
self.final = other.final
+ # Abstract may not be reset by a variant, it may only be
+ # cleared by inheriting.
+ if other.name != self.name:
+ self.abstract = other.abstract
+ elif other.abstract:
+ self.abstract = True
+
# Protected may only be set to true
if other.protected is not None:
# don't allow to reset protected flag
@@ -2848,6 +2857,10 @@
item.debug("No matching pipeline variants for {jobname}".
format(jobname=jobname), indent=2)
continue
+ if frozen_job.abstract:
+ raise Exception("Job %s is abstract and may not be "
+ "directly run" %
+ (frozen_job.name,))
if (frozen_job.allowed_projects is not None and
change.project.name not in frozen_job.allowed_projects):
raise Exception("Project %s is not allowed to run job %s" %
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 2bce43f..7a0e28c 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -231,6 +231,7 @@
self.statsd = get_statsd(config)
self.rpc = rpclistener.RPCListener(config, self)
self.stats_thread = threading.Thread(target=self.runStats)
+ self.stats_thread.daemon = True
self.stats_stop = threading.Event()
# TODO(jeblair): fix this
# Despite triggers being part of the pipeline, there is one trigger set
@@ -546,7 +547,7 @@
self.layout_lock.acquire()
self.config = event.config
try:
- self.log.debug("Full reconfiguration beginning")
+ self.log.info("Full reconfiguration beginning")
loader = configloader.ConfigLoader()
abide = loader.loadConfig(
self.config.get('scheduler', 'tenant_config'),
@@ -557,18 +558,19 @@
self.abide = abide
finally:
self.layout_lock.release()
- self.log.debug("Full reconfiguration complete")
+ self.log.info("Full reconfiguration complete")
def _doTenantReconfigureEvent(self, event):
# This is called in the scheduler loop after another thread submits
# a request
self.layout_lock.acquire()
try:
- self.log.debug("Tenant reconfiguration beginning")
+ self.log.info("Tenant reconfiguration beginning")
# If a change landed to a project, clear out the cached
# config before reconfiguring.
for project in event.projects:
project.unparsed_config = None
+ project.unparsed_branch_config = {}
old_tenant = self.abide.tenants[event.tenant_name]
loader = configloader.ConfigLoader()
abide = loader.reloadTenant(
@@ -581,7 +583,7 @@
self.abide = abide
finally:
self.layout_lock.release()
- self.log.debug("Tenant reconfiguration complete")
+ self.log.info("Tenant reconfiguration complete")
def _reenqueueGetProject(self, tenant, item):
project = item.change.project
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index adbafb5..e962738 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -101,8 +101,7 @@
)
except Exception as e:
self.log.exception("Finger client exception:")
- msg = "Failure from finger client: %s" % e
- await ws.send_str(msg.decode('utf8'))
+ await ws.send_str("Failure from finger client: %s" % e)
return (1000, "No more data")