Merge "Add job dependencies to status.json" into feature/zuulv3
diff --git a/README.rst b/README.rst
index 16e7385..52b89df 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,9 @@
preparation for the third major version of Zuul. We call this effort
`Zuul v3`_ and it is described in more detail below.
+The latest documentation for Zuul v3 is published at:
+https://docs.openstack.org/infra/zuul/feature/zuulv3/
+
Contributing
------------
diff --git a/doc/source/admin/components.rst b/doc/source/admin/components.rst
index a24b833..26a85b2 100644
--- a/doc/source/admin/components.rst
+++ b/doc/source/admin/components.rst
@@ -323,3 +323,41 @@
To enable or disable running Ansible in verbose mode (with the '-vvv'
argument to ansible-playbook) run ``zuul-executor verbose`` and
``zuul-executor unverbose``.
+
+Web Server
+----------
+
+The Zuul web server currently acts as a websocket interface to live log
+streaming. Eventually, it will serve as the single process handling all
+HTTP interactions with Zuul.
+
+Configuration
+~~~~~~~~~~~~~
+
+In addition to the ``gearman`` common configuration section, the following
+sections of **zuul.conf** are used by the web server:
+
+web
+"""
+
+**listen_address**
+ IP address or domain name on which to listen (default: 127.0.0.1).
+ ``listen_address=127.0.0.1``
+
+**log_config**
+ Path to log config file for the web server process.
+ ``log_config=/etc/zuul/logging.yaml``
+
+**pidfile**
+ Path to PID lock file for the web server process.
+ ``pidfile=/var/run/zuul-web/zuul-web.pid``
+
+**port**
+ Port to use for web server process.
+ ``port=9000``
+
+Operation
+~~~~~~~~~
+
+To start the web server, run ``zuul-web``. To stop it, kill the
+PID which was saved in the pidfile specified in the configuration.
diff --git a/doc/source/admin/quick-start.rst b/doc/source/admin/quick-start.rst
index 9993775..77dee4a 100644
--- a/doc/source/admin/quick-start.rst
+++ b/doc/source/admin/quick-start.rst
@@ -33,6 +33,9 @@
merger if dedicated mergers are not provided. One or more of
these must be run.
+ - **zuul-web**: A web server that currently provides websocket access to
+ live-streaming of logs.
+
- **gearman**: optional builtin gearman daemon provided by zuul-scheduler
External components:
diff --git a/etc/zuul.conf-sample b/etc/zuul.conf-sample
index 48f23a5..9b8406c 100644
--- a/etc/zuul.conf-sample
+++ b/etc/zuul.conf-sample
@@ -33,6 +33,10 @@
trusted_ro_dirs=/opt/zuul-scripts:/var/cache
trusted_rw_dirs=/opt/zuul-logs
+[web]
+listen_address=127.0.0.1
+port=9000
+
[webapp]
listen_address=0.0.0.0
port=8001
diff --git a/requirements.txt b/requirements.txt
index 5caa1b5..69509d0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -24,3 +24,5 @@
cachecontrol
pyjwt
iso8601
+aiohttp
+uvloop;python_version>='3.5'
diff --git a/setup.cfg b/setup.cfg
index 0d22cb1..ce7a40e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -26,6 +26,7 @@
zuul-cloner = zuul.cmd.cloner:main
zuul-executor = zuul.cmd.executor:main
zuul-bwrap = zuul.driver.bubblewrap:main
+ zuul-web = zuul.cmd.web:main
[build_sphinx]
source-dir = doc/source
diff --git a/tests/base.py b/tests/base.py
index 921fcd1..9709bf7 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1266,7 +1266,6 @@
self.build_history = []
self.fail_tests = {}
self.job_builds = {}
- self.hostname = 'zl.example.com'
def failJob(self, name, change):
"""Instruct the executor to report matching builds as failures.
@@ -2149,6 +2148,8 @@
# Make sure we set up an RSA key for the project so that we
# don't spend time generating one:
+ if isinstance(project, dict):
+ project = list(project.keys())[0]
key_root = os.path.join(self.state_root, 'keys')
if not os.path.isdir(key_root):
os.mkdir(key_root, 0o700)
@@ -2341,6 +2342,11 @@
if count is not None and i >= count:
break
+ def getSortedBuilds(self):
+ "Return the list of currently running builds sorted by name"
+
+ return sorted(self.builds, key=lambda x: x.name)
+
def release(self, job):
if isinstance(job, FakeBuild):
job.release()
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
index d528be1..36a22e4 100644
--- a/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/hello-post.yaml
@@ -10,3 +10,7 @@
that:
- st.stat.exists
- st.stat.isreg
+
+ - name: Simple shell task.
+ shell: |+
+ echo "Hello world"
diff --git a/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml b/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml
new file mode 100644
index 0000000..3fd423b
--- /dev/null
+++ b/tests/fixtures/config/in-repo/git/org_project1/.zuul.yaml
@@ -0,0 +1,5 @@
+- project:
+ name: org/project1
+ tenant-one-gate:
+ jobs:
+ - project-test1
diff --git a/tests/fixtures/config/shadow/git/local-config/playbooks/base.yaml b/tests/fixtures/config/shadow/git/local-config/playbooks/base.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/local-config/playbooks/base.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/shadow/git/local-config/playbooks/test2.yaml b/tests/fixtures/config/shadow/git/local-config/playbooks/test2.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/local-config/playbooks/test2.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/shadow/git/local-config/zuul.yaml b/tests/fixtures/config/shadow/git/local-config/zuul.yaml
new file mode 100644
index 0000000..756e843
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/local-config/zuul.yaml
@@ -0,0 +1,25 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- job:
+ name: base
+
+- job:
+ name: test2
+
+- project:
+ name: org/project
+ check:
+ jobs:
+ - test1
+ - test2
diff --git a/tests/fixtures/config/shadow/git/org_project/README b/tests/fixtures/config/shadow/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/shadow/git/stdlib/.zuul.yaml b/tests/fixtures/config/shadow/git/stdlib/.zuul.yaml
new file mode 100644
index 0000000..6a6f9c9
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/stdlib/.zuul.yaml
@@ -0,0 +1,10 @@
+- job:
+ name: base
+
+- job:
+ name: test1
+ parent: base
+
+- job:
+ name: test2
+ parent: base
diff --git a/tests/fixtures/config/shadow/git/stdlib/README b/tests/fixtures/config/shadow/git/stdlib/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/stdlib/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/shadow/git/stdlib/playbooks/base.yaml b/tests/fixtures/config/shadow/git/stdlib/playbooks/base.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/stdlib/playbooks/base.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/shadow/git/stdlib/playbooks/test1.yaml b/tests/fixtures/config/shadow/git/stdlib/playbooks/test1.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/stdlib/playbooks/test1.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/shadow/git/stdlib/playbooks/test2.yaml b/tests/fixtures/config/shadow/git/stdlib/playbooks/test2.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/shadow/git/stdlib/playbooks/test2.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/shadow/main.yaml b/tests/fixtures/config/shadow/main.yaml
new file mode 100644
index 0000000..f148a84
--- /dev/null
+++ b/tests/fixtures/config/shadow/main.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - local-config
+ untrusted-projects:
+ - stdlib:
+ shadow: local-config
+ - org/project
diff --git a/tests/fixtures/config/split-config/git/common-config/playbooks/project-test1.yaml b/tests/fixtures/config/split-config/git/common-config/playbooks/project-test1.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/common-config/playbooks/project-test1.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/split-config/git/common-config/zuul.d/jobs.yaml b/tests/fixtures/config/split-config/git/common-config/zuul.d/jobs.yaml
new file mode 100644
index 0000000..280342c
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/common-config/zuul.d/jobs.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project-test1
diff --git a/tests/fixtures/config/split-config/git/common-config/zuul.d/org-project.yaml b/tests/fixtures/config/split-config/git/common-config/zuul.d/org-project.yaml
new file mode 100644
index 0000000..872e126
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/common-config/zuul.d/org-project.yaml
@@ -0,0 +1,5 @@
+- project:
+ name: org/project
+ check:
+ jobs:
+ - project-test1
diff --git a/tests/fixtures/config/split-config/git/common-config/zuul.d/pipelines.yaml b/tests/fixtures/config/split-config/git/common-config/zuul.d/pipelines.yaml
new file mode 100644
index 0000000..ba91fb5
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/common-config/zuul.d/pipelines.yaml
@@ -0,0 +1,12 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
diff --git a/tests/fixtures/config/split-config/git/org_project/README b/tests/fixtures/config/split-config/git/org_project/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/split-config/git/org_project1/.zuul.d/gate.yaml b/tests/fixtures/config/split-config/git/org_project1/.zuul.d/gate.yaml
new file mode 100644
index 0000000..4bc0d81
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/org_project1/.zuul.d/gate.yaml
@@ -0,0 +1,7 @@
+- project:
+ name: org/project1
+ check:
+ jobs:
+ - project-test1
+ - project1-project2-integration:
+ dependencies: project-test1
diff --git a/tests/fixtures/config/split-config/git/org_project1/.zuul.d/jobs.yaml b/tests/fixtures/config/split-config/git/org_project1/.zuul.d/jobs.yaml
new file mode 100644
index 0000000..33d74f3
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/org_project1/.zuul.d/jobs.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project1-project2-integration
diff --git a/tests/fixtures/config/split-config/git/org_project1/README b/tests/fixtures/config/split-config/git/org_project1/README
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/split-config/git/org_project1/playbooks/project1-project2-integration.yaml b/tests/fixtures/config/split-config/git/org_project1/playbooks/project1-project2-integration.yaml
new file mode 100644
index 0000000..f679dce
--- /dev/null
+++ b/tests/fixtures/config/split-config/git/org_project1/playbooks/project1-project2-integration.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/split-config/main.yaml b/tests/fixtures/config/split-config/main.yaml
new file mode 100644
index 0000000..5f57245
--- /dev/null
+++ b/tests/fixtures/config/split-config/main.yaml
@@ -0,0 +1,9 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project
+ - org/project1
diff --git a/tests/fixtures/config/templated-project/git/common-config/zuul.d/jobs.yaml b/tests/fixtures/config/templated-project/git/common-config/zuul.d/jobs.yaml
new file mode 100644
index 0000000..e051871
--- /dev/null
+++ b/tests/fixtures/config/templated-project/git/common-config/zuul.d/jobs.yaml
@@ -0,0 +1,17 @@
+- job:
+ name: project-test1
+
+- job:
+ name: project-test2
+
+- job:
+ name: layered-project-test3
+
+- job:
+ name: layered-project-test4
+
+- job:
+ name: layered-project-foo-test5
+
+- job:
+ name: project-test6
diff --git a/tests/fixtures/config/templated-project/git/common-config/zuul.d/pipelines.yaml b/tests/fixtures/config/templated-project/git/common-config/zuul.d/pipelines.yaml
new file mode 100644
index 0000000..4a19796
--- /dev/null
+++ b/tests/fixtures/config/templated-project/git/common-config/zuul.d/pipelines.yaml
@@ -0,0 +1,41 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - approved: 1
+ success:
+ gerrit:
+ verified: 2
+ submit: true
+ failure:
+ gerrit:
+ verified: -2
+ start:
+ gerrit:
+ verified: 0
+ precedence: high
+
+- pipeline:
+ name: post
+ manager: independent
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^(?!refs/).*$
diff --git a/tests/fixtures/config/templated-project/git/common-config/zuul.d/projects.yaml b/tests/fixtures/config/templated-project/git/common-config/zuul.d/projects.yaml
new file mode 100644
index 0000000..891c863
--- /dev/null
+++ b/tests/fixtures/config/templated-project/git/common-config/zuul.d/projects.yaml
@@ -0,0 +1,14 @@
+- project:
+ name: org/templated-project
+ templates:
+ - test-one-and-two
+
+- project:
+ name: org/layered-project
+ templates:
+ - test-one-and-two
+ - test-three-and-four
+ - test-five
+ check:
+ jobs:
+ - project-test6
diff --git a/tests/fixtures/config/templated-project/git/common-config/zuul.d/templates.yaml b/tests/fixtures/config/templated-project/git/common-config/zuul.d/templates.yaml
new file mode 100644
index 0000000..27d2f16
--- /dev/null
+++ b/tests/fixtures/config/templated-project/git/common-config/zuul.d/templates.yaml
@@ -0,0 +1,19 @@
+- project-template:
+ name: test-one-and-two
+ check:
+ jobs:
+ - project-test1
+ - project-test2
+
+- project-template:
+ name: test-three-and-four
+ check:
+ jobs:
+ - layered-project-test3
+ - layered-project-test4
+
+- project-template:
+ name: test-five
+ check:
+ jobs:
+ - layered-project-foo-test5
diff --git a/tests/fixtures/config/templated-project/git/common-config/zuul.yaml b/tests/fixtures/config/templated-project/git/common-config/zuul.yaml
deleted file mode 100644
index 251a3cd..0000000
--- a/tests/fixtures/config/templated-project/git/common-config/zuul.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-- pipeline:
- name: check
- manager: independent
- trigger:
- gerrit:
- - event: patchset-created
- success:
- gerrit:
- verified: 1
- failure:
- gerrit:
- verified: -1
-
-- pipeline:
- name: gate
- manager: dependent
- success-message: Build succeeded (gate).
- trigger:
- gerrit:
- - event: comment-added
- approval:
- - approved: 1
- success:
- gerrit:
- verified: 2
- submit: true
- failure:
- gerrit:
- verified: -2
- start:
- gerrit:
- verified: 0
- precedence: high
-
-- pipeline:
- name: post
- manager: independent
- trigger:
- gerrit:
- - event: ref-updated
- ref: ^(?!refs/).*$
-
-- project-template:
- name: test-one-and-two
- check:
- jobs:
- - project-test1
- - project-test2
-
-- project-template:
- name: test-three-and-four
- check:
- jobs:
- - layered-project-test3
- - layered-project-test4
-
-- project-template:
- name: test-five
- check:
- jobs:
- - layered-project-foo-test5
-
-- job:
- name: project-test1
-
-- job:
- name: project-test2
-
-- job:
- name: layered-project-test3
-
-- job:
- name: layered-project-test4
-
-- job:
- name: layered-project-foo-test5
-
-- job:
- name: project-test6
-
-- project:
- name: org/templated-project
- templates:
- - test-one-and-two
-
-- project:
- name: org/layered-project
- templates:
- - test-one-and-two
- - test-three-and-four
- - test-five
- check:
- jobs:
- - project-test6
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index faa2f61..573ccbf 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import textwrap
from tests.base import ZuulTestCase
@@ -37,12 +38,16 @@
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
- self.assertEqual(self.CONFIG_SET,
- tenant.config_projects[0].load_classes)
- self.assertEqual(self.UNTRUSTED_SET,
- tenant.untrusted_projects[0].load_classes)
- self.assertEqual(self.UNTRUSTED_SET,
- tenant.untrusted_projects[1].load_classes)
+
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.UNTRUSTED_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.UNTRUSTED_SET, tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
@@ -69,12 +74,16 @@
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
- self.assertEqual(self.CONFIG_SET,
- tenant.config_projects[0].load_classes)
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
- tenant.untrusted_projects[0].load_classes)
- self.assertEqual(set(['job']),
- tenant.untrusted_projects[1].load_classes)
+ tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(set(['job']), tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
@@ -101,12 +110,17 @@
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
- self.assertEqual(self.CONFIG_SET,
- tenant.config_projects[0].load_classes)
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
- tenant.untrusted_projects[0].load_classes)
+ tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
- tenant.untrusted_projects[1].load_classes)
+ tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
@@ -133,12 +147,17 @@
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
- self.assertEqual(self.CONFIG_SET,
- tenant.config_projects[0].load_classes)
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
- tenant.untrusted_projects[0].load_classes)
+ tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project', 'job']),
- tenant.untrusted_projects[1].load_classes)
+ tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertFalse('project2-job' in tenant.layout.jobs)
@@ -165,12 +184,15 @@
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
- self.assertEqual(self.CONFIG_SET,
- tenant.config_projects[0].load_classes)
- self.assertEqual(set(['job']),
- tenant.untrusted_projects[0].load_classes)
- self.assertEqual(set(['project', 'job']),
- tenant.untrusted_projects[1].load_classes)
+ project = tenant.config_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(self.CONFIG_SET, tpc.load_classes)
+ project = tenant.untrusted_projects[0]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(set(['job']), tpc.load_classes)
+ project = tenant.untrusted_projects[1]
+ tpc = tenant.project_configs[project.canonical_name]
+ self.assertEqual(set(['project', 'job']), tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
@@ -186,3 +208,40 @@
project2_config.pipelines['check'].job_list.jobs)
self.assertTrue('project2-job' in
project2_config.pipelines['check'].job_list.jobs)
+
+
+class TestSplitConfig(ZuulTestCase):
+ tenant_config_file = 'config/split-config/main.yaml'
+
+ def setup_config(self):
+ super(TestSplitConfig, self).setup_config()
+
+ def test_split_config(self):
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertIn('project-test1', tenant.layout.jobs)
+ project_config = tenant.layout.project_configs.get(
+ 'review.example.com/org/project')
+ self.assertIn('project-test1',
+ project_config.pipelines['check'].job_list.jobs)
+ project1_config = tenant.layout.project_configs.get(
+ 'review.example.com/org/project1')
+ self.assertIn('project1-project2-integration',
+ project1_config.pipelines['check'].job_list.jobs)
+
+ def test_dynamic_split_config(self):
+ in_repo_conf = textwrap.dedent(
+ """
+ - project:
+ name: org/project1
+ check:
+ jobs:
+ - project-test1
+ """)
+ file_dict = {'.zuul.d/gate.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ # project1-project2-integration test removed, only want project-test1
+ self.assertHistory([
+ dict(name='project-test1', result='SUCCESS', changes='1,1')])
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index fcfaf5d..8d9d127 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -121,7 +121,8 @@
self.assertEqual('project-merge', buildset0_builds[0]['job_name'])
self.assertEqual("SUCCESS", buildset0_builds[0]['result'])
self.assertEqual(
- 'finger://zl.example.com/{uuid}'.format(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
uuid=buildset0_builds[0]['uuid']),
buildset0_builds[0]['log_url'])
self.assertEqual('check', buildset1['pipeline'])
@@ -144,7 +145,8 @@
self.assertEqual('project-test1', buildset1_builds[-2]['job_name'])
self.assertEqual("FAILURE", buildset1_builds[-2]['result'])
self.assertEqual(
- 'finger://zl.example.com/{uuid}'.format(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
uuid=buildset1_builds[-2]['uuid']),
buildset1_builds[-2]['log_url'])
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
old mode 100644
new mode 100755
index 39b6070..7b76802
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -18,6 +18,9 @@
import logging
import time
+import zuul.executor.server
+import zuul.model
+
from tests.base import ZuulTestCase, simple_layout
@@ -305,3 +308,27 @@
]
self.assertBuildStates(states, projects)
+
+
+class TestAnsibleJob(ZuulTestCase):
+ tenant_config_file = 'config/ansible/main.yaml'
+
+ def setUp(self):
+ super(TestAnsibleJob, self).setUp()
+ job = zuul.model.Job('test')
+ job.unique = 'test'
+ self.test_job = zuul.executor.server.AnsibleJob(self.executor_server,
+ job)
+
+ def test_getHostList_host_keys(self):
+ # Test without ssh_port set
+ node = {'name': 'fake-host',
+ 'host_keys': ['fake-host-key'],
+ 'interface_ip': 'localhost'}
+ keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ self.assertEqual(keys[0], 'localhost fake-host-key')
+
+ # Test with custom ssh_port set
+ node['ssh_port'] = 22022
+ keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ self.assertEqual(keys[0], '[localhost]:22022 fake-host-key')
diff --git a/tests/unit/test_log_streamer.py b/tests/unit/test_log_streamer.py
index b0ef2c2..f47a8c8 100644
--- a/tests/unit/test_log_streamer.py
+++ b/tests/unit/test_log_streamer.py
@@ -14,6 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import aiohttp
+import asyncio
+import logging
+import json
import os
import os.path
import socket
@@ -21,6 +25,7 @@
import threading
import time
+import zuul.web
import zuul.lib.log_streamer
import tests.base
@@ -57,6 +62,7 @@
class TestStreaming(tests.base.AnsibleZuulTestCase):
tenant_config_file = 'config/streamer/main.yaml'
+ log = logging.getLogger("zuul.test.test_log_streamer.TestStreaming")
def setUp(self):
super(TestStreaming, self).setUp()
@@ -146,9 +152,116 @@
# job and deleted. However, we still have a file handle to it, so we
# can make sure that we read the entire contents at this point.
# Compact the returned lines into a single string for easy comparison.
- file_contents = ''.join(logfile.readlines())
+ file_contents = logfile.read()
logfile.close()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n", self.streaming_data)
self.assertEqual(file_contents, self.streaming_data)
+
+ def runWSClient(self, build_uuid, event):
+ async def client(loop, build_uuid, event):
+ uri = 'http://127.0.0.1:9000/console-stream'
+ try:
+ session = aiohttp.ClientSession(loop=loop)
+ async with session.ws_connect(uri) as ws:
+ req = {'uuid': build_uuid, 'logfile': None}
+ ws.send_str(json.dumps(req))
+ event.set() # notify we are connected and req sent
+ async for msg in ws:
+ if msg.type == aiohttp.WSMsgType.TEXT:
+ self.ws_client_results += msg.data
+ elif msg.type == aiohttp.WSMsgType.CLOSED:
+ break
+ elif msg.type == aiohttp.WSMsgType.ERROR:
+ break
+ session.close()
+ except Exception as e:
+ self.log.exception("client exception:")
+
+ loop = asyncio.new_event_loop()
+ loop.set_debug(True)
+ loop.run_until_complete(client(loop, build_uuid, event))
+ loop.close()
+
+ def test_websocket_streaming(self):
+ # Need to set the streaming port before submitting the job
+ finger_port = 7902
+ self.executor_server.log_streaming_port = finger_port
+
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+
+ # We don't have any real synchronization for the ansible jobs, so
+ # just wait until we get our running build.
+ while not len(self.builds):
+ time.sleep(0.1)
+ build = self.builds[0]
+ self.assertEqual(build.name, 'python27')
+
+ build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
+ while not os.path.exists(build_dir):
+ time.sleep(0.1)
+
+ # Need to wait to make sure that jobdir gets set
+ while build.jobdir is None:
+ time.sleep(0.1)
+ build = self.builds[0]
+
+ # Wait for the job to begin running and create the ansible log file.
+ # The job waits to complete until the flag file exists, so we can
+ # safely access the log here. We only open it (to force a file handle
+ # to be kept open for it after the job finishes) but wait to read the
+ # contents until the job is done.
+ ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
+ while not os.path.exists(ansible_log):
+ time.sleep(0.1)
+ logfile = open(ansible_log, 'r')
+ self.addCleanup(logfile.close)
+
+ # Start the finger streamer daemon
+ streamer = zuul.lib.log_streamer.LogStreamer(
+ None, self.host, finger_port, self.executor_server.jobdir_root)
+ self.addCleanup(streamer.stop)
+
+ # Start the web server
+ web_server = zuul.web.ZuulWeb(
+ listen_address='127.0.0.1', listen_port=9000,
+ gear_server='127.0.0.1', gear_port=self.gearman_server.port)
+ loop = asyncio.new_event_loop()
+ loop.set_debug(True)
+ ws_thread = threading.Thread(target=web_server.run, args=(loop,))
+ ws_thread.start()
+ self.addCleanup(loop.close)
+ self.addCleanup(ws_thread.join)
+ self.addCleanup(web_server.stop)
+
+ # Wait until web server is started
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ while s.connect_ex((self.host, 9000)):
+ time.sleep(0.1)
+
+ # Start a thread with the websocket client
+ ws_client_event = threading.Event()
+ self.ws_client_results = ''
+ ws_client_thread = threading.Thread(
+ target=self.runWSClient, args=(build.uuid, ws_client_event)
+ )
+ ws_client_thread.start()
+ ws_client_event.wait()
+
+ # Allow the job to complete
+ flag_file = os.path.join(build_dir, 'test_wait')
+ open(flag_file, 'w').close()
+
+ # Wait for the websocket client to complete, which it should when
+ # it's received the full log.
+ ws_client_thread.join()
+
+ self.waitUntilSettled()
+
+ file_contents = logfile.read()
+ logfile.close()
+ self.log.debug("\n\nFile contents: %s\n\n", file_contents)
+ self.log.debug("\n\nStreamed: %s\n\n", self.ws_client_results)
+ self.assertEqual(file_contents, self.ws_client_results)
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index f4ca96f..7fe101e 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -40,9 +40,10 @@
self.source = Dummy(canonical_hostname='git.example.com',
connection=self.connection)
self.tenant = model.Tenant('tenant')
- self.layout = model.Layout()
+ self.layout = model.Layout(self.tenant)
self.project = model.Project('project', self.source)
- self.tenant.addUntrustedProject(self.project)
+ self.tpc = model.TenantProjectConfig(self.project)
+ self.tenant.addUntrustedProject(self.tpc)
self.pipeline = model.Pipeline('gate', self.layout)
self.layout.addPipeline(self.pipeline)
self.queue = model.ChangeQueue(self.pipeline)
@@ -58,7 +59,7 @@
@property
def job(self):
tenant = model.Tenant('tenant')
- layout = model.Layout()
+ layout = model.Layout(tenant)
job = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': self.context,
'_start_mark': self.start_mark,
@@ -169,13 +170,14 @@
def test_job_inheritance_configloader(self):
# TODO(jeblair): move this to a configloader test
tenant = model.Tenant('tenant')
- layout = model.Layout()
+ layout = model.Layout(tenant)
pipeline = model.Pipeline('gate', layout)
layout.addPipeline(pipeline)
queue = model.ChangeQueue(pipeline)
project = model.Project('project', self.source)
- tenant.addUntrustedProject(project)
+ tpc = model.TenantProjectConfig(project)
+ tenant.addUntrustedProject(tpc)
base = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': self.context,
@@ -331,8 +333,8 @@
'playbooks/base'])
def test_job_auth_inheritance(self):
- tenant = model.Tenant('tenant')
- layout = model.Layout()
+ tenant = self.tenant
+ layout = self.layout
conf = yaml.safe_load('''
- secret:
@@ -357,7 +359,7 @@
secret = configloader.SecretParser.fromYaml(layout, conf)
layout.addSecret(secret)
- base = configloader.JobParser.fromYaml(tenant, layout, {
+ base = configloader.JobParser.fromYaml(self.tenant, self.layout, {
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'base',
@@ -441,8 +443,9 @@
def test_job_inheritance_job_tree(self):
tenant = model.Tenant('tenant')
- layout = model.Layout()
- tenant.addUntrustedProject(self.project)
+ layout = model.Layout(tenant)
+ tpc = model.TenantProjectConfig(self.project)
+ tenant.addUntrustedProject(tpc)
pipeline = model.Pipeline('gate', layout)
layout.addPipeline(pipeline)
@@ -517,13 +520,14 @@
def test_inheritance_keeps_matchers(self):
tenant = model.Tenant('tenant')
- layout = model.Layout()
+ layout = model.Layout(tenant)
pipeline = model.Pipeline('gate', layout)
layout.addPipeline(pipeline)
queue = model.ChangeQueue(pipeline)
project = model.Project('project', self.source)
- tenant.addUntrustedProject(project)
+ tpc = model.TenantProjectConfig(project)
+ tenant.addUntrustedProject(tpc)
base = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': self.context,
@@ -567,11 +571,13 @@
self.assertEqual([], item.getJobs())
def test_job_source_project(self):
- tenant = model.Tenant('tenant')
- layout = model.Layout()
+ tenant = self.tenant
+ layout = self.layout
base_project = model.Project('base_project', self.source)
base_context = model.SourceContext(base_project, 'master',
'test', True)
+ tpc = model.TenantProjectConfig(base_project)
+ tenant.addUntrustedProject(tpc)
base = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': base_context,
@@ -583,6 +589,8 @@
other_project = model.Project('other_project', self.source)
other_context = model.SourceContext(other_project, 'master',
'test', True)
+ tpc = model.TenantProjectConfig(other_project)
+ tenant.addUntrustedProject(tpc)
base2 = configloader.JobParser.fromYaml(tenant, layout, {
'_source_context': other_context,
'_start_mark': self.start_mark,
@@ -604,7 +612,8 @@
self.layout.addJob(job)
project2 = model.Project('project2', self.source)
- self.tenant.addUntrustedProject(project2)
+ tpc2 = model.TenantProjectConfig(project2)
+ self.tenant.addUntrustedProject(tpc2)
context2 = model.SourceContext(project2, 'master',
'test', True)
@@ -805,7 +814,8 @@
connection=connection1)
source1_project1 = model.Project('project1', source1)
- tenant.addConfigProject(source1_project1)
+ source1_project1_tpc = model.TenantProjectConfig(source1_project1)
+ tenant.addConfigProject(source1_project1_tpc)
d = {'project1':
{'git1.example.com': source1_project1}}
self.assertEqual(d, tenant.projects)
@@ -815,7 +825,8 @@
tenant.getProject('git1.example.com/project1'))
source1_project2 = model.Project('project2', source1)
- tenant.addUntrustedProject(source1_project2)
+ tpc = model.TenantProjectConfig(source1_project2)
+ tenant.addUntrustedProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1},
'project2':
@@ -832,7 +843,8 @@
connection=connection2)
source2_project1 = model.Project('project1', source2)
- tenant.addUntrustedProject(source2_project1)
+ tpc = model.TenantProjectConfig(source2_project1)
+ tenant.addUntrustedProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
@@ -851,7 +863,8 @@
tenant.getProject('git2.example.com/project1'))
source2_project2 = model.Project('project2', source2)
- tenant.addConfigProject(source2_project2)
+ tpc = model.TenantProjectConfig(source2_project2)
+ tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
@@ -877,7 +890,8 @@
tenant.getProject('git2.example.com/project2'))
source1_project2b = model.Project('subpath/project2', source1)
- tenant.addConfigProject(source1_project2b)
+ tpc = model.TenantProjectConfig(source1_project2b)
+ tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
@@ -898,7 +912,8 @@
tenant.getProject('git1.example.com/subpath/project2'))
source2_project2b = model.Project('subpath/project2', source2)
- tenant.addConfigProject(source2_project2b)
+ tpc = model.TenantProjectConfig(source2_project2b)
+ tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
@@ -927,4 +942,4 @@
with testtools.ExpectedException(
Exception,
"Project project1 is already in project index"):
- tenant._addProject(source1_project1)
+ tenant._addProject(source1_project1_tpc)
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index a90b64d..61bf9f8 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -2289,22 +2289,40 @@
status_jobs.append(job)
self.assertEqual('project-merge', status_jobs[0]['name'])
# TODO(mordred) pull uuids from self.builds
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[0]['uuid'],
- status_jobs[0]['url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[0]['uuid']),
+ status_jobs[0]['url'])
# TOOD(mordred) configure a success-url on the base job
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[0]['uuid'],
- status_jobs[0]['report_url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[0]['uuid']),
+ status_jobs[0]['report_url'])
self.assertEqual('project-test1', status_jobs[1]['name'])
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[1]['uuid'],
- status_jobs[1]['url'])
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[1]['uuid'],
- status_jobs[1]['report_url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[1]['uuid']),
+ status_jobs[1]['url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[1]['uuid']),
+ status_jobs[1]['report_url'])
self.assertEqual('project-test2', status_jobs[2]['name'])
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[2]['uuid'],
- status_jobs[2]['url'])
- self.assertEqual('finger://zl.example.com/%s' % status_jobs[2]['uuid'],
- status_jobs[2]['report_url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[2]['uuid']),
+ status_jobs[2]['url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=status_jobs[2]['uuid']),
+ status_jobs[2]['report_url'])
# check job dependencies
self.assertIsNotNone(status_jobs[0]['dependencies'])
@@ -3359,8 +3377,9 @@
# Only C's test jobs are queued because window is still 1.
self.assertEqual(len(self.builds), 2)
- self.assertEqual(self.builds[0].name, 'project-test1')
- self.assertEqual(self.builds[1].name, 'project-test2')
+ builds = self.getSortedBuilds()
+ self.assertEqual(builds[0].name, 'project-test1')
+ self.assertEqual(builds[1].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
@@ -3587,8 +3606,11 @@
self.assertEqual('project-merge', job['name'])
self.assertEqual('gate', job['pipeline'])
self.assertEqual(False, job['retry'])
- self.assertEqual('finger://zl.example.com/%s' % job['uuid'],
- job['url'])
+ self.assertEqual(
+ 'finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
+ uuid=job['uuid']),
+ job['url'])
self.assertEqual(2, len(job['worker']))
self.assertEqual(False, job['canceled'])
self.assertEqual(True, job['voting'])
@@ -4684,7 +4706,8 @@
# NOTE: This default URL is currently hard-coded in executor/server.py
self.assertIn(
- '- docs-draft-test2 finger://zl.example.com/{uuid}'.format(
+ '- docs-draft-test2 finger://{hostname}/{uuid}'.format(
+ hostname=self.executor_server.hostname,
uuid=uuid_test2),
body[3])
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 327f745..2b865cf 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -486,6 +486,62 @@
self.assertIn('appears multiple times', A.messages[0],
"A should have a syntax error reported")
+ def test_multi_repo(self):
+ downstream_repo_conf = textwrap.dedent(
+ """
+ - project:
+ name: org/project1
+ tenant-one-gate:
+ jobs:
+ - project-test1
+
+ - job:
+ name: project1-test1
+ parent: project-test1
+ """)
+
+ file_dict = {'.zuul.yaml': downstream_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+ A.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ upstream_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+
+ - job:
+ name: project-test2
+
+ - project:
+ name: org/project
+ tenant-one-gate:
+ jobs:
+ - project-test1
+ """)
+
+ file_dict = {'.zuul.yaml': upstream_repo_conf}
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
+ files=file_dict)
+ B.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.fake_gerrit.addEvent(B.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ tenant = self.sched.abide.tenants.get('tenant-one')
+ # Ensure the latest change is reflected in the config; if it
+ # isn't this will raise an exception.
+ tenant.layout.getJob('project-test2')
+
class TestAnsible(AnsibleZuulTestCase):
# A temporary class to hold new tests while others are disabled
@@ -630,3 +686,17 @@
self.assertHistory([
dict(name='project-test', result='SUCCESS', changes='1,1 2,1'),
])
+
+
+class TestShadow(ZuulTestCase):
+ tenant_config_file = 'config/shadow/main.yaml'
+
+ def test_shadow(self):
+ # Test that a repo is allowed to shadow another's job definitions.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertHistory([
+ dict(name='test1', result='SUCCESS', changes='1,1'),
+ dict(name='test2', result='SUCCESS', changes='1,1'),
+ ])
diff --git a/zuul/ansible/library/command.py b/zuul/ansible/library/command.py
index 00020c7..f701b48 100644
--- a/zuul/ansible/library/command.py
+++ b/zuul/ansible/library/command.py
@@ -356,6 +356,10 @@
if umask:
old_umask = os.umask(umask)
+ t = None
+ fail_json_kwargs = None
+ ret = None
+
try:
if self._debug:
self.log('Executing: ' + clean_args)
@@ -394,11 +398,27 @@
except (OSError, IOError):
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
- self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
+ fail_json_kwargs=dict(rc=e.errno, msg=str(e), cmd=clean_args)
except Exception:
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
- self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
+ fail_json_kwargs = dict(rc=257, msg=str(e), exception=traceback.format_exc(), cmd=clean_args)
+ finally:
+ if t:
+ with Console(zuul_log_id) as console:
+ if t.isAlive():
+ console.addLine("[Zuul] standard output/error still open "
+ "after child exited")
+ if ret is None and fail_json_kwargs:
+ ret = fail_json_kwargs['rc']
+ elif ret is None and not fail_json_kwargs:
+ ret = -1
+ console.addLine("[Zuul] Task exit code: %s\n" % ret)
+ if ret == -1 and not fail_json_kwargs:
+ self.fail_json(rc=ret, msg="Something went horribly wrong during task execution")
+
+ if fail_json_kwargs:
+ self.fail_json(**fail_json_kwargs)
# Restore env settings
for key, val in old_env_vals.items():
diff --git a/zuul/cmd/web.py b/zuul/cmd/web.py
new file mode 100755
index 0000000..9869a2c
--- /dev/null
+++ b/zuul/cmd/web.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# Copyright 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import asyncio
+import daemon
+import extras
+import logging
+import signal
+import sys
+import threading
+
+import zuul.cmd
+import zuul.web
+
+from zuul.lib.config import get_default
+
+# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
+# instead it depends on lockfile-0.9.1 which uses pidfile.
+pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
+
+
+class WebServer(zuul.cmd.ZuulApp):
+
+ def parse_arguments(self):
+ parser = argparse.ArgumentParser(description='Zuul Web Server.')
+ parser.add_argument('-c', dest='config',
+ help='specify the config file')
+ parser.add_argument('-d', dest='nodaemon', action='store_true',
+ help='do not run as a daemon')
+ parser.add_argument('--version', dest='version', action='version',
+ version=self._get_version(),
+ help='show zuul version')
+ self.args = parser.parse_args()
+
+ def exit_handler(self, signum, frame):
+ self.web.stop()
+
+ def _main(self):
+ params = dict()
+
+ params['listen_address'] = get_default(self.config,
+ 'web', 'listen_address',
+ '127.0.0.1')
+ params['listen_port'] = get_default(self.config, 'web', 'port', 9000)
+ params['gear_server'] = get_default(self.config, 'gearman', 'server')
+ params['gear_port'] = get_default(self.config, 'gearman', 'port', 4730)
+ params['ssl_key'] = get_default(self.config, 'gearman', 'ssl_key')
+ params['ssl_cert'] = get_default(self.config, 'gearman', 'ssl_cert')
+ params['ssl_ca'] = get_default(self.config, 'gearman', 'ssl_ca')
+
+ try:
+ self.web = zuul.web.ZuulWeb(**params)
+ except Exception as e:
+ self.log.exception("Error creating ZuulWeb:")
+ sys.exit(1)
+
+ loop = asyncio.get_event_loop()
+ signal.signal(signal.SIGUSR1, self.exit_handler)
+ signal.signal(signal.SIGTERM, self.exit_handler)
+
+ self.log.info('Zuul Web Server starting')
+ self.thread = threading.Thread(target=self.web.run,
+ args=(loop,),
+ name='web')
+ self.thread.start()
+
+ try:
+ signal.pause()
+ except KeyboardInterrupt:
+ print("Ctrl + C: asking web server to exit nicely...\n")
+ self.exit_handler(signal.SIGINT, None)
+
+ self.thread.join()
+ loop.stop()
+ loop.close()
+ self.log.info("Zuul Web Server stopped")
+
+ def main(self):
+ self.setup_logging('web', 'log_config')
+ self.log = logging.getLogger("zuul.WebServer")
+
+ try:
+ self._main()
+ except Exception:
+ self.log.exception("Exception from WebServer:")
+
+
+def main():
+ server = WebServer()
+ server.parse_arguments()
+ server.read_config()
+
+ pid_fn = get_default(server.config, 'web', 'pidfile',
+ '/var/run/zuul-web/zuul-web.pid', expand_user=True)
+
+ pid = pid_file_module.TimeoutPIDLockFile(pid_fn, 10)
+
+ if server.args.nodaemon:
+ server.main()
+ else:
+ with daemon.DaemonContext(pidfile=pid):
+ server.main()
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 4246206..3c9ecf7 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -887,6 +887,7 @@
project_dict = {str: {
'include': to_list(classes),
'exclude': to_list(classes),
+ 'shadow': to_list(str),
}}
project = vs.Any(str, project_dict)
@@ -932,13 +933,18 @@
tenant = model.Tenant(conf['name'])
tenant.unparsed_config = conf
unparsed_config = model.UnparsedTenantConfig()
- config_projects, untrusted_projects = \
+ # tpcs is TenantProjectConfigs
+ config_tpcs, untrusted_tpcs = \
TenantParser._loadTenantProjects(
project_key_dir, connections, conf)
- for project in config_projects:
- tenant.addConfigProject(project)
- for project in untrusted_projects:
- tenant.addUntrustedProject(project)
+ for tpc in config_tpcs:
+ tenant.addConfigProject(tpc)
+ for tpc in untrusted_tpcs:
+ tenant.addUntrustedProject(tpc)
+
+ for tpc in config_tpcs + untrusted_tpcs:
+ TenantParser._resolveShadowProjects(tenant, tpc)
+
tenant.config_projects_config, tenant.untrusted_projects_config = \
TenantParser._loadTenantInRepoLayouts(merger, connections,
tenant.config_projects,
@@ -953,6 +959,13 @@
return tenant
@staticmethod
+ def _resolveShadowProjects(tenant, tpc):
+ shadow_projects = []
+ for sp in tpc.shadow_projects:
+ shadow_projects.append(tenant.getProject(sp)[1])
+ tpc.shadow_projects = frozenset(shadow_projects)
+
+ @staticmethod
def _loadProjectKeys(project_key_dir, connection_name, project):
project.private_key_file = (
os.path.join(project_key_dir, connection_name,
@@ -1007,9 +1020,11 @@
# Return a project object whether conf is a dict or a str
project = source.getProject(conf)
project_include = current_include
+ shadow_projects = []
else:
project_name = list(conf.keys())[0]
project = source.getProject(project_name)
+ shadow_projects = as_list(conf[project_name].get('shadow', []))
project_include = frozenset(
as_list(conf[project_name].get('include', [])))
@@ -1020,8 +1035,11 @@
if project_exclude:
project_include = frozenset(project_include - project_exclude)
- project.load_classes = frozenset(project_include)
- return project
+ tenant_project_config = model.TenantProjectConfig(project)
+ tenant_project_config.load_classes = frozenset(project_include)
+ tenant_project_config.shadow_projects = shadow_projects
+
+ return tenant_project_config
@staticmethod
def _getProjects(source, conf, current_include):
@@ -1065,21 +1083,22 @@
current_include = default_include
for conf_repo in conf_source.get('config-projects', []):
- projects = TenantParser._getProjects(source, conf_repo,
- current_include)
- for project in projects:
+ # tpcs = TenantProjectConfigs
+ tpcs = TenantParser._getProjects(source, conf_repo,
+ current_include)
+ for tpc in tpcs:
TenantParser._loadProjectKeys(
- project_key_dir, source_name, project)
- config_projects.append(project)
+ project_key_dir, source_name, tpc.project)
+ config_projects.append(tpc)
current_include = frozenset(default_include - set(['pipeline']))
for conf_repo in conf_source.get('untrusted-projects', []):
- projects = TenantParser._getProjects(source, conf_repo,
- current_include)
- for project in projects:
+ tpcs = TenantParser._getProjects(source, conf_repo,
+ current_include)
+ for tpc in tpcs:
TenantParser._loadProjectKeys(
- project_key_dir, source_name, project)
- untrusted_projects.append(project)
+ project_key_dir, source_name, tpc.project)
+ untrusted_projects.append(tpc)
return config_projects, untrusted_projects
@@ -1090,13 +1109,20 @@
untrusted_projects_config = model.UnparsedTenantConfig()
jobs = []
+ # In some cases, we can use cached data, but it's still
+ # important that we process that in the same order along with
+ # any jobs that we run. This class is used to hold the cached
+ # data and is inserted in the ordered jobs list for later
+ # processing.
+ class CachedDataJob(object):
+ def __init__(self, config_project, project):
+ self.config_project = config_project
+ self.project = project
+
for project in config_projects:
# If we have cached data (this is a reconfiguration) use it.
if cached and project.unparsed_config:
- TenantParser.log.info(
- "Loading previously parsed configuration from %s" %
- (project,))
- config_projects_config.extend(project.unparsed_config)
+ jobs.append(CachedDataJob(True, project))
continue
# Otherwise, prepare an empty unparsed config object to
# hold cached data later.
@@ -1106,7 +1132,8 @@
job = merger.getFiles(
project.source.connection.connection_name,
project.name, 'master',
- files=['zuul.yaml', '.zuul.yaml'])
+ files=['zuul.yaml', '.zuul.yaml'],
+ dirs=['zuul.d', '.zuul.d'])
job.source_context = model.SourceContext(project, 'master',
'', True)
jobs.append(job)
@@ -1114,10 +1141,7 @@
for project in untrusted_projects:
# If we have cached data (this is a reconfiguration) use it.
if cached and project.unparsed_config:
- TenantParser.log.info(
- "Loading previously parsed configuration from %s" %
- (project,))
- untrusted_projects_config.extend(project.unparsed_config)
+ jobs.append(CachedDataJob(False, project))
continue
# Otherwise, prepare an empty unparsed config object to
# hold cached data later.
@@ -1134,7 +1158,8 @@
job = merger.getFiles(
project.source.connection.connection_name,
project.name, branch,
- files=['zuul.yaml', '.zuul.yaml'])
+ files=['zuul.yaml', '.zuul.yaml'],
+ dirs=['zuul.d', '.zuul.d'])
job.source_context = model.SourceContext(
project, branch, '', False)
jobs.append(job)
@@ -1144,18 +1169,35 @@
# complete in the order they were executed which is the
# same order they were defined in the main config file.
# This is important for correct inheritance.
+ if isinstance(job, CachedDataJob):
+ TenantParser.log.info(
+ "Loading previously parsed configuration from %s" %
+ (job.project,))
+ if job.config_project:
+ config_projects_config.extend(
+ job.project.unparsed_config)
+ else:
+ untrusted_projects_config.extend(
+ job.project.unparsed_config)
+ continue
TenantParser.log.debug("Waiting for cat job %s" % (job,))
job.wait()
+ TenantParser.log.debug("Cat job %s got files %s" %
+ (job, job.files))
loaded = False
- for fn in ['zuul.yaml', '.zuul.yaml']:
- if job.files.get(fn):
- # Don't load from more than one file in a repo-branch
- if loaded:
+ files = sorted(job.files.keys())
+ for conf_root in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
+ for fn in files:
+ fn_root = fn.split('/')[0]
+ if fn_root != conf_root or not job.files.get(fn):
+ continue
+ # Don't load from more than configuration in a repo-branch
+ if loaded and loaded != conf_root:
TenantParser.log.warning(
"Multiple configuration files in %s" %
(job.source_context,))
continue
- loaded = True
+ loaded = conf_root
job.source_context.path = fn
TenantParser.log.info(
"Loading configuration from %s" %
@@ -1192,12 +1234,18 @@
return config
@staticmethod
+ def _getLoadClasses(tenant, conf_object):
+ project = conf_object['_source_context'].project
+ tpc = tenant.project_configs[project.canonical_name]
+ return tpc.load_classes
+
+ @staticmethod
def _parseLayoutItems(layout, tenant, data, scheduler, connections,
skip_pipelines=False, skip_semaphores=False):
if not skip_pipelines:
for config_pipeline in data.pipelines:
- classes = config_pipeline['_source_context'].\
- project.load_classes
+ classes = TenantParser._getLoadClasses(
+ tenant, config_pipeline)
if 'pipeline' not in classes:
continue
layout.addPipeline(PipelineParser.fromYaml(
@@ -1205,7 +1253,7 @@
scheduler, config_pipeline))
for config_nodeset in data.nodesets:
- classes = config_nodeset['_source_context'].project.load_classes
+ classes = TenantParser._getLoadClasses(tenant, config_nodeset)
if 'nodeset' not in classes:
continue
with configuration_exceptions('nodeset', config_nodeset):
@@ -1213,29 +1261,33 @@
layout, config_nodeset))
for config_secret in data.secrets:
- classes = config_secret['_source_context'].project.load_classes
+ classes = TenantParser._getLoadClasses(tenant, config_secret)
if 'secret' not in classes:
continue
layout.addSecret(SecretParser.fromYaml(layout, config_secret))
for config_job in data.jobs:
- classes = config_job['_source_context'].project.load_classes
+ classes = TenantParser._getLoadClasses(tenant, config_job)
if 'job' not in classes:
continue
with configuration_exceptions('job', config_job):
job = JobParser.fromYaml(tenant, layout, config_job)
- layout.addJob(job)
+ added = layout.addJob(job)
+ if not added:
+ TenantParser.log.debug(
+ "Skipped adding job %s which shadows an existing job" %
+ (job,))
if not skip_semaphores:
for config_semaphore in data.semaphores:
- classes = config_semaphore['_source_context'].\
- project.load_classes
+ classes = TenantParser._getLoadClasses(
+ tenant, config_semaphore)
if 'semaphore' not in classes:
continue
layout.addSemaphore(SemaphoreParser.fromYaml(config_semaphore))
for config_template in data.project_templates:
- classes = config_template['_source_context'].project.load_classes
+ classes = TenantParser._getLoadClasses(tenant, config_template)
if 'project-template' not in classes:
continue
layout.addProjectTemplate(ProjectTemplateParser.fromYaml(
@@ -1249,10 +1301,11 @@
# each of the project stanzas. Each one may be (should
# be!) from a different repo, so filter them according to
# the include/exclude rules before parsing them.
- filtered_projects = [
- p for p in config_projects if
- 'project' in p['_source_context'].project.load_classes
- ]
+ filtered_projects = []
+ for config_project in config_projects:
+ classes = TenantParser._getLoadClasses(tenant, config_project)
+ if 'project' in classes:
+ filtered_projects.append(config_project)
if not filtered_projects:
continue
@@ -1262,13 +1315,11 @@
@staticmethod
def _parseLayout(base, tenant, data, scheduler, connections):
- layout = model.Layout()
+ layout = model.Layout(tenant)
TenantParser._parseLayoutItems(layout, tenant, data,
scheduler, connections)
- layout.tenant = tenant
-
for pipeline in layout.pipelines.values():
pipeline.manager._postConfig(layout)
@@ -1328,28 +1379,50 @@
branches = project.source.getProjectBranches(project)
for branch in branches:
+ fns1 = []
+ fns2 = []
+ files_list = files.connections.get(
+ project.source.connection.connection_name, {}).get(
+ project.name, {}).get(branch, {}).keys()
+ for fn in files_list:
+ if fn.startswith("zuul.d/"):
+ fns1.append(fn)
+ if fn.startswith(".zuul.d/"):
+ fns2.append(fn)
+
+ fns = ['zuul.yaml', '.zuul.yaml'] + sorted(fns1) + sorted(fns2)
incdata = None
- for fn in ['zuul.yaml', '.zuul.yaml']:
+ loaded = None
+ for fn in fns:
data = files.getFile(project.source.connection.connection_name,
project.name, branch, fn)
if data:
- break
- if data:
- source_context = model.SourceContext(project, branch,
- fn, trusted)
- if trusted:
- incdata = TenantParser._parseConfigProjectLayout(
- data, source_context)
- else:
- incdata = TenantParser._parseUntrustedProjectLayout(
- data, source_context)
- else:
+ source_context = model.SourceContext(project, branch,
+ fn, trusted)
+ # Prevent mixing configuration source
+ conf_root = fn.split('/')[0]
+ if loaded and loaded != conf_root:
+ TenantParser.log.warning(
+ "Multiple configuration in %s" % source_context)
+ continue
+ loaded = conf_root
+
+ if trusted:
+ incdata = TenantParser._parseConfigProjectLayout(
+ data, source_context)
+ else:
+ incdata = TenantParser._parseUntrustedProjectLayout(
+ data, source_context)
+
+ config.extend(incdata)
+
+ if not loaded:
if trusted:
incdata = project.unparsed_config
else:
incdata = project.unparsed_branch_config.get(branch)
- if incdata:
- config.extend(incdata)
+ if incdata:
+ config.extend(incdata)
def createDynamicLayout(self, tenant, files,
include_config_projects=False):
@@ -1362,7 +1435,7 @@
for project in tenant.untrusted_projects:
self._loadDynamicProjectData(config, project, files, False)
- layout = model.Layout()
+ layout = model.Layout(tenant)
# NOTE: the actual pipeline objects (complete with queues and
# enqueued items) are copied by reference here. This allows
# our shadow dynamic configuration to continue to interact
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index 3612eae..d17e47e 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -135,40 +135,6 @@
self.gearman.shutdown()
self.log.debug("Stopped")
- def isJobRegistered(self, name):
- if self.function_cache_time:
- for connection in self.gearman.active_connections:
- if connection.connect_time > self.function_cache_time:
- self.function_cache = set()
- self.function_cache_time = 0
- break
- if name in self.function_cache:
- self.log.debug("Function %s is registered" % name)
- return True
- if ((time.time() - self.function_cache_time) <
- self.negative_function_cache_ttl):
- self.log.debug("Function %s is not registered "
- "(negative ttl in effect)" % name)
- return False
- self.function_cache_time = time.time()
- for connection in self.gearman.active_connections:
- try:
- req = gear.StatusAdminRequest()
- connection.sendAdminRequest(req, timeout=300)
- except Exception:
- self.log.exception("Exception while checking functions")
- continue
- for line in req.response.split('\n'):
- parts = [x.strip() for x in line.split()]
- if not parts or parts[0] == '.':
- continue
- self.function_cache.add(parts[0])
- if name in self.function_cache:
- self.log.debug("Function %s is registered" % name)
- return True
- self.log.debug("Function %s is not registered" % name)
- return False
-
def execute(self, job, item, pipeline, dependent_items=[],
merger_items=[]):
tenant = pipeline.layout.tenant
@@ -284,6 +250,7 @@
host_keys=node.host_keys,
provider=node.provider,
region=node.region,
+ ssh_port=node.ssh_port,
interface_ip=node.interface_ip,
public_ipv6=node.public_ipv6,
public_ipv4=node.public_ipv4))
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 6c390db..bc30386 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -118,12 +118,12 @@
env.update(self.env)
key_path = os.path.expanduser(key_path)
self.log.debug('Adding SSH Key {}'.format(key_path))
- output = ''
try:
- output = subprocess.check_output(['ssh-add', key_path], env=env,
- stderr=subprocess.PIPE)
- except subprocess.CalledProcessError:
- self.log.error('ssh-add failed: {}'.format(output))
+ subprocess.check_output(['ssh-add', key_path], env=env,
+ stderr=subprocess.PIPE)
+ except subprocess.CalledProcessError as e:
+ self.log.error('ssh-add failed. stdout: %s, stderr: %s',
+ e.output, e.stderr)
raise
self.log.info('Added SSH Key {}'.format(key_path))
@@ -641,7 +641,8 @@
task.wait()
with self.merger_lock:
files = self.merger.getFiles(args['connection'], args['project'],
- args['branch'], args['files'])
+ args['branch'], args['files'],
+ args.get('dirs', []))
result = dict(updated=True,
files=files,
zuul_url=self.zuul_url)
@@ -651,6 +652,7 @@
args = json.loads(job.arguments)
with self.merger_lock:
ret = self.merger.mergeChanges(args['items'], args.get('files'),
+ args.get('dirs', []),
args.get('repo_state'))
result = dict(merged=(ret is not None),
zuul_url=self.zuul_url)
@@ -693,16 +695,12 @@
self.running = False
self.aborted = False
self.thread = None
- self.ssh_agent = None
-
self.private_key_file = get_default(self.executor_server.config,
'executor', 'private_key_file',
'~/.ssh/id_rsa')
self.ssh_agent = SshAgent()
def run(self):
- self.ssh_agent.start()
- self.ssh_agent.add(self.private_key_file)
self.running = True
self.thread = threading.Thread(target=self.execute)
self.thread.start()
@@ -715,6 +713,8 @@
def execute(self):
try:
+ self.ssh_agent.start()
+ self.ssh_agent.add(self.private_key_file)
self.jobdir = JobDir(self.executor_server.jobdir_root,
self.executor_server.keep_jobdir,
str(self.job.unique))
@@ -724,19 +724,20 @@
self.job.sendWorkException(traceback.format_exc())
finally:
self.running = False
- try:
- self.jobdir.cleanup()
- except Exception:
- self.log.exception("Error cleaning up jobdir:")
- try:
- self.executor_server.finishJob(self.job.unique)
- except Exception:
- self.log.exception("Error finalizing job thread:")
+ if self.jobdir:
+ try:
+ self.jobdir.cleanup()
+ except Exception:
+ self.log.exception("Error cleaning up jobdir:")
if self.ssh_agent:
try:
self.ssh_agent.stop()
except Exception:
self.log.exception("Error stopping SSH agent:")
+ try:
+ self.executor_server.finishJob(self.job.unique)
+ except Exception:
+ self.log.exception("Error finalizing job thread:")
def _execute(self):
args = json.loads(self.job.arguments)
@@ -882,10 +883,10 @@
result = None
pre_failed = False
- for playbook in self.jobdir.pre_playbooks:
+ for count, playbook in enumerate(self.jobdir.pre_playbooks):
# TODOv3(pabelanger): Implement pre-run timeout setting.
pre_status, pre_code = self.runAnsiblePlaybook(
- playbook, args['timeout'])
+ playbook, args['timeout'], phase='pre', count=count)
if pre_status != self.RESULT_NORMAL or pre_code != 0:
# These should really never fail, so return None and have
# zuul try again
@@ -895,7 +896,7 @@
if not pre_failed:
job_status, job_code = self.runAnsiblePlaybook(
- self.jobdir.playbook, args['timeout'])
+ self.jobdir.playbook, args['timeout'], phase='run')
if job_status == self.RESULT_TIMED_OUT:
return 'TIMED_OUT'
if job_status == self.RESULT_ABORTED:
@@ -911,10 +912,10 @@
else:
result = 'FAILURE'
- for playbook in self.jobdir.post_playbooks:
+ for count, playbook in enumerate(self.jobdir.post_playbooks):
# TODOv3(pabelanger): Implement post-run timeout setting.
post_status, post_code = self.runAnsiblePlaybook(
- playbook, args['timeout'], success)
+ playbook, args['timeout'], success, phase='post', count=count)
if post_status != self.RESULT_NORMAL or post_code != 0:
# If we encountered a pre-failure, that takes
# precedence over the post result.
@@ -934,9 +935,11 @@
# results in the wrong thing being in interface_ip
# TODO(jeblair): Move this notice to the docs.
ip = node.get('interface_ip')
+ port = node.get('ssh_port', 22)
host_vars = dict(
ansible_host=ip,
ansible_user=self.executor_server.default_username,
+ ansible_port=port,
nodepool=dict(
az=node.get('az'),
provider=node.get('provider'),
@@ -944,7 +947,10 @@
host_keys = []
for key in node.get('host_keys'):
- host_keys.append("%s %s" % (ip, key))
+ if port != 22:
+ host_keys.append("[%s]:%s %s" % (ip, port, key))
+ else:
+ host_keys.append("%s %s" % (ip, key))
hosts.append(dict(
name=node['name'],
@@ -1077,11 +1083,8 @@
for entry in os.listdir(d):
self._blockPluginDirs(os.path.join(d, entry))
return d
- # We assume the repository itself is a collection of roles
- if not trusted:
- for entry in os.listdir(path):
- self._blockPluginDirs(os.path.join(path, entry))
- return path
+ # It is neither a bare role, nor a collection of roles
+ raise Exception("Unable to find role in %s" % (path,))
def prepareZuulRole(self, args, role, root, trusted, untrusted):
self.log.debug("Prepare zuul role for %s" % (role,))
@@ -1369,7 +1372,8 @@
return (self.RESULT_NORMAL, ret)
- def runAnsiblePlaybook(self, playbook, timeout, success=None):
+ def runAnsiblePlaybook(self, playbook, timeout, success=None,
+ phase=None, count=None):
env_copy = os.environ.copy()
env_copy['LOGNAME'] = 'zuul'
@@ -1383,6 +1387,12 @@
if success is not None:
cmd.extend(['-e', 'success=%s' % str(bool(success))])
+ if phase:
+ cmd.extend(['-e', 'zuul_execution_phase=%s' % phase])
+
+ if count is not None:
+ cmd.extend(['-e', 'zuul_execution_phase_count=%s' % count])
+
result, code = self.runAnsible(
cmd=cmd, timeout=timeout, trusted=playbook.trusted)
self.log.debug("Ansible complete, result %s code %s" % (
diff --git a/zuul/lib/log_streamer.py b/zuul/lib/log_streamer.py
index 67c733e..57afef9 100644
--- a/zuul/lib/log_streamer.py
+++ b/zuul/lib/log_streamer.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import os
import os.path
import pwd
@@ -212,6 +213,8 @@
'''
def __init__(self, user, host, port, jobdir_root):
+ self.log = logging.getLogger('zuul.lib.LogStreamer')
+ self.log.debug("LogStreamer starting on port %s", port)
self.server = CustomForkingTCPServer((host, port),
RequestHandler,
user=user,
@@ -227,3 +230,4 @@
if self.thd.isAlive():
self.server.shutdown()
self.server.server_close()
+ self.log.debug("LogStreamer stopped")
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 01429ce..09b09d7 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -480,7 +480,7 @@
self.log.debug("Preparing dynamic layout for: %s" % item.change)
return self._loadDynamicLayout(item)
- def scheduleMerge(self, item, files=None):
+ def scheduleMerge(self, item, files=None, dirs=None):
build_set = item.current_build_set
if not hasattr(item.change, 'branch'):
@@ -490,12 +490,12 @@
build_set.merge_state = build_set.COMPLETE
return True
- self.log.debug("Scheduling merge for item %s (files: %s)" %
- (item, files))
+ self.log.debug("Scheduling merge for item %s (files: %s, dirs: %s)" %
+ (item, files, dirs))
build_set = item.current_build_set
build_set.merge_state = build_set.PENDING
self.sched.merger.mergeChanges(build_set.merger_items,
- item.current_build_set, files,
+ item.current_build_set, files, dirs,
precedence=self.pipeline.precedence)
return False
@@ -506,7 +506,9 @@
if not build_set.ref:
build_set.setConfiguration()
if build_set.merge_state == build_set.NEW:
- return self.scheduleMerge(item, ['zuul.yaml', '.zuul.yaml'])
+ return self.scheduleMerge(item,
+ files=['zuul.yaml', '.zuul.yaml'],
+ dirs=['zuul.d', '.zuul.d'])
if build_set.merge_state == build_set.PENDING:
return False
if build_set.unable_to_merge:
diff --git a/zuul/merger/client.py b/zuul/merger/client.py
index e92d9fd..e354d5d 100644
--- a/zuul/merger/client.py
+++ b/zuul/merger/client.py
@@ -108,19 +108,21 @@
timeout=300)
return job
- def mergeChanges(self, items, build_set, files=None, repo_state=None,
- precedence=zuul.model.PRECEDENCE_NORMAL):
+ def mergeChanges(self, items, build_set, files=None, dirs=None,
+ repo_state=None, precedence=zuul.model.PRECEDENCE_NORMAL):
data = dict(items=items,
files=files,
+ dirs=dirs,
repo_state=repo_state)
self.submitJob('merger:merge', data, build_set, precedence)
- def getFiles(self, connection_name, project_name, branch, files,
+ def getFiles(self, connection_name, project_name, branch, files, dirs=[],
precedence=zuul.model.PRECEDENCE_HIGH):
data = dict(connection=connection_name,
project=project_name,
branch=branch,
- files=files)
+ files=files,
+ dirs=dirs)
job = self.submitJob('merger:cat', data, None, precedence)
return job
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index 2ac0de8..93340fa 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -254,7 +254,7 @@
origin.fetch()
origin.fetch(tags=True)
- def getFiles(self, files, branch=None, commit=None):
+ def getFiles(self, files, dirs=[], branch=None, commit=None):
ret = {}
repo = self.createRepoObject()
if branch:
@@ -266,6 +266,14 @@
ret[fn] = tree[fn].data_stream.read().decode('utf8')
else:
ret[fn] = None
+ if dirs:
+ for dn in dirs:
+ if dn not in tree:
+ continue
+ for blob in tree[dn].traverse():
+ if blob.path.endswith(".yaml"):
+ ret[blob.path] = blob.data_stream.read().decode(
+ 'utf-8')
return ret
def deleteRemote(self, remote):
@@ -452,7 +460,7 @@
return None
return commit
- def mergeChanges(self, items, files=None, repo_state=None):
+ def mergeChanges(self, items, files=None, dirs=None, repo_state=None):
# connection+project+branch -> commit
recent = {}
commit = None
@@ -470,9 +478,9 @@
commit = self._mergeItem(item, recent, repo_state)
if not commit:
return None
- if files:
+ if files or dirs:
repo = self.getRepo(item['connection'], item['project'])
- repo_files = repo.getFiles(files, commit=commit)
+ repo_files = repo.getFiles(files, dirs, commit=commit)
read_files.append(dict(
connection=item['connection'],
project=item['project'],
@@ -483,6 +491,6 @@
ret_recent[k] = v.hexsha
return commit.hexsha, read_files, repo_state, ret_recent
- def getFiles(self, connection_name, project_name, branch, files):
+ def getFiles(self, connection_name, project_name, branch, files, dirs=[]):
repo = self.getRepo(connection_name, project_name)
- return repo.getFiles(files, branch=branch)
+ return repo.getFiles(files, dirs, branch=branch)
diff --git a/zuul/merger/server.py b/zuul/merger/server.py
index cbc4cb8..555a4bc 100644
--- a/zuul/merger/server.py
+++ b/zuul/merger/server.py
@@ -94,8 +94,9 @@
def merge(self, job):
args = json.loads(job.arguments)
- ret = self.merger.mergeChanges(args['items'], args.get('files'),
- args.get('repo_state'))
+ ret = self.merger.mergeChanges(
+ args['items'], args.get('files'),
+ args.get('dirs'), args.get('repo_state'))
result = dict(merged=(ret is not None),
zuul_url=self.zuul_url)
if ret is None:
@@ -109,7 +110,8 @@
args = json.loads(job.arguments)
self.merger.updateRepo(args['connection'], args['project'])
files = self.merger.getFiles(args['connection'], args['project'],
- args['branch'], args['files'])
+ args['branch'], args['files'],
+ args.get('dirs'))
result = dict(updated=True,
files=files,
zuul_url=self.zuul_url)
diff --git a/zuul/model.py b/zuul/model.py
index ad0e8d1..9d39a0c 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -331,9 +331,6 @@
self.foreign = foreign
self.unparsed_config = None
self.unparsed_branch_config = {} # branch -> UnparsedTenantConfig
- # Configuration object classes to include or exclude when
- # loading zuul config files.
- self.load_classes = frozenset()
def __str__(self):
return self.name
@@ -361,6 +358,7 @@
self.public_ipv4 = None
self.private_ipv4 = None
self.public_ipv6 = None
+ self.ssh_port = 22
self._keys = []
self.az = None
self.provider = None
@@ -1849,7 +1847,9 @@
return set()
def updatesConfig(self):
- if 'zuul.yaml' in self.files or '.zuul.yaml' in self.files:
+ if 'zuul.yaml' in self.files or '.zuul.yaml' in self.files or \
+ [True for fn in self.files if fn.startswith("zuul.d/") or
+ fn.startswith(".zuul.d/")]:
return True
return False
@@ -1999,6 +1999,20 @@
self.merge_mode = None
+class TenantProjectConfig(object):
+ """A project in the context of a tenant.
+
+ A Project is globally unique in the system, however, when used in
+ a tenant, some metadata about the project local to the tenant is
+ stored in a TenantProjectConfig.
+ """
+
+ def __init__(self, project):
+ self.project = project
+ self.load_classes = set()
+ self.shadow_projects = set()
+
+
class ProjectConfig(object):
# Represents a project cofiguration
def __init__(self, name):
@@ -2010,6 +2024,7 @@
class UnparsedAbideConfig(object):
+
"""A collection of yaml lists that has not yet been parsed into objects.
An Abide is a collection of tenants.
@@ -2119,8 +2134,8 @@
class Layout(object):
"""Holds all of the Pipelines."""
- def __init__(self):
- self.tenant = None
+ def __init__(self, tenant):
+ self.tenant = tenant
self.project_configs = {}
self.project_templates = {}
self.pipelines = OrderedDict()
@@ -2149,6 +2164,18 @@
prior_jobs = [j for j in self.getJobs(job.name) if
j.source_context.project !=
job.source_context.project]
+ # Unless the repo is permitted to shadow another. If so, and
+ # the job we are adding is from a repo that is permitted to
+ # shadow the one with the older jobs, skip adding this job.
+ job_project = job.source_context.project
+ job_tpc = self.tenant.project_configs[job_project.canonical_name]
+ skip_add = False
+ for prior_job in prior_jobs[:]:
+ prior_project = prior_job.source_context.project
+ if prior_project in job_tpc.shadow_projects:
+ prior_jobs.remove(prior_job)
+ skip_add = True
+
if prior_jobs:
raise Exception("Job %s in %s is not permitted to shadow "
"job %s in %s" % (
@@ -2156,11 +2183,13 @@
job.source_context.project,
prior_jobs[0],
prior_jobs[0].source_context.project))
-
+ if skip_add:
+ return False
if job.name in self.jobs:
self.jobs[job.name].append(job)
else:
self.jobs[job.name] = [job]
+ return True
def addNodeSet(self, nodeset):
if nodeset.name in self.nodesets:
@@ -2357,6 +2386,9 @@
# The unparsed config from those projects.
self.untrusted_projects_config = None
self.semaphore_handler = SemaphoreHandler()
+ # Metadata about projects for this tenant
+ # canonical project name -> TenantProjectConfig
+ self.project_configs = {}
# A mapping of project names to projects. project_name ->
# VALUE where VALUE is a further dictionary of
@@ -2364,17 +2396,21 @@
self.projects = {}
self.canonical_hostnames = set()
- def _addProject(self, project):
+ def _addProject(self, tpc):
"""Add a project to the project index
- :arg Project project: The project to add.
+ :arg TenantProjectConfig tpc: The TenantProjectConfig (with
+ associated project) to add.
+
"""
+ project = tpc.project
self.canonical_hostnames.add(project.canonical_hostname)
hostname_dict = self.projects.setdefault(project.name, {})
if project.canonical_hostname in hostname_dict:
raise Exception("Project %s is already in project index" %
(project,))
hostname_dict[project.canonical_hostname] = project
+ self.project_configs[project.canonical_name] = tpc
def getProject(self, name):
"""Return a project given its name.
@@ -2421,13 +2457,13 @@
raise Exception("Project %s is neither trusted nor untrusted" %
(project,))
- def addConfigProject(self, project):
- self.config_projects.append(project)
- self._addProject(project)
+ def addConfigProject(self, tpc):
+ self.config_projects.append(tpc.project)
+ self._addProject(tpc)
- def addUntrustedProject(self, project):
- self.untrusted_projects.append(project)
- self._addProject(project)
+ def addUntrustedProject(self, tpc):
+ self.untrusted_projects.append(tpc.project)
+ self._addProject(tpc)
def getSafeAttributes(self):
return Attributes(name=self.name)
diff --git a/zuul/rpcclient.py b/zuul/rpcclient.py
index 6f0d34b..fd3517f 100644
--- a/zuul/rpcclient.py
+++ b/zuul/rpcclient.py
@@ -86,3 +86,11 @@
def shutdown(self):
self.gearman.shutdown()
+
+ def get_job_log_stream_address(self, uuid, logfile='console.log'):
+ data = {'uuid': uuid, 'logfile': logfile}
+ job = self.submitJob('zuul:get_job_log_stream_address', data)
+ if job.failure:
+ return False
+ else:
+ return json.loads(job.data[0])
diff --git a/zuul/rpclistener.py b/zuul/rpclistener.py
index be3b7d1..6543c91 100644
--- a/zuul/rpclistener.py
+++ b/zuul/rpclistener.py
@@ -53,6 +53,7 @@
self.worker.registerFunction("zuul:enqueue_ref")
self.worker.registerFunction("zuul:promote")
self.worker.registerFunction("zuul:get_running_jobs")
+ self.worker.registerFunction("zuul:get_job_log_stream_address")
def stop(self):
self.log.debug("Stopping")
@@ -173,3 +174,29 @@
running_items.append(item.formatJSON())
job.sendWorkComplete(json.dumps(running_items))
+
+ def handle_get_job_log_stream_address(self, job):
+ # TODO: map log files to ports. Currently there is only one
+ # log stream for a given job. But many jobs produce many
+ # log files, so this is forwards compatible with a future
+ # where there are more logs to potentially request than
+ # "console.log"
+ def find_build(uuid):
+ for tenant in self.sched.abide.tenants.values():
+ for pipeline_name, pipeline in tenant.layout.pipelines.items():
+ for queue in pipeline.queues:
+ for item in queue.queue:
+ for bld in item.current_build_set.getBuilds():
+ if bld.uuid == uuid:
+ return bld
+ return None
+
+ args = json.loads(job.arguments)
+ uuid = args['uuid']
+ # TODO: logfile = args['logfile']
+ job_log_stream_address = {}
+ build = find_build(uuid)
+ if build:
+ job_log_stream_address['server'] = build.worker.hostname
+ job_log_stream_address['port'] = build.worker.log_port
+ job.sendWorkComplete(json.dumps(job_log_stream_address))
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index fe6a673..dd0846d 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -779,6 +779,7 @@
self.log.error("Unable to handle event %s" % event)
event.done()
except Exception:
+ self.log.exception("Exception in management event:")
event.exception(sys.exc_info())
self.management_event_queue.task_done()
diff --git a/zuul/web.py b/zuul/web.py
new file mode 100644
index 0000000..ab16e11
--- /dev/null
+++ b/zuul/web.py
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import asyncio
+import json
+import logging
+import uvloop
+
+import aiohttp
+from aiohttp import web
+
+import zuul.rpcclient
+
+
+class LogStreamingHandler(object):
+ log = logging.getLogger("zuul.web.LogStreamingHandler")
+
+ def __init__(self, loop, gear_server, gear_port,
+ ssl_key=None, ssl_cert=None, ssl_ca=None):
+ self.event_loop = loop
+ self.gear_server = gear_server
+ self.gear_port = gear_port
+ self.ssl_key = ssl_key
+ self.ssl_cert = ssl_cert
+ self.ssl_ca = ssl_ca
+
+ def _getPortLocation(self, job_uuid):
+ '''
+ Query Gearman for the executor running the given job.
+
+ :param str job_uuid: The job UUID we want to stream.
+ '''
+ # TODO: Fetch the entire list of uuid/file/server/ports once and
+ # share that, and fetch a new list on cache misses perhaps?
+ # TODO: Avoid recreating a client for each request.
+ rpc = zuul.rpcclient.RPCClient(self.gear_server, self.gear_port,
+ self.ssl_key, self.ssl_cert,
+ self.ssl_ca)
+ ret = rpc.get_job_log_stream_address(job_uuid)
+ rpc.shutdown()
+ return ret
+
+ async def _fingerClient(self, ws, server, port, job_uuid):
+ '''
+ Create a client to connect to the finger streamer and pull results.
+
+ :param aiohttp.web.WebSocketResponse ws: The websocket response object.
+ :param str server: The executor server running the job.
+ :param str port: The executor server port.
+ :param str job_uuid: The job UUID to stream.
+ '''
+ self.log.debug("Connecting to finger server %s:%s", server, port)
+ reader, writer = await asyncio.open_connection(host=server, port=port,
+ loop=self.event_loop)
+
+ self.log.debug("Sending finger request for %s", job_uuid)
+ msg = "%s\n" % job_uuid # Must have a trailing newline!
+
+ writer.write(msg.encode('utf8'))
+ await writer.drain()
+
+ while True:
+ data = await reader.read(1024)
+ if data:
+ await ws.send_str(data.decode('utf8'))
+ else:
+ writer.close()
+ return
+
+ async def _streamLog(self, ws, request):
+ '''
+ Stream the log for the requested job back to the client.
+
+ :param aiohttp.web.WebSocketResponse ws: The websocket response object.
+ :param dict request: The client request parameters.
+ '''
+ for key in ('uuid', 'logfile'):
+ if key not in request:
+ return (4000, "'{key}' missing from request payload".format(
+ key=key))
+
+ # Schedule the blocking gearman work in an Executor
+ gear_task = self.event_loop.run_in_executor(
+ None, self._getPortLocation, request['uuid'])
+
+ try:
+ port_location = await asyncio.wait_for(gear_task, 10)
+ except asyncio.TimeoutError:
+ return (4010, "Gearman timeout")
+
+ if not port_location:
+ return (4011, "Error with Gearman")
+
+ await self._fingerClient(
+ ws, port_location['server'], port_location['port'], request['uuid']
+ )
+
+ return (1000, "No more data")
+
+ async def processRequest(self, request):
+ '''
+ Handle a client websocket request for log streaming.
+
+ :param aiohttp.web.Request request: The client request.
+ '''
+ try:
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+ async for msg in ws:
+ if msg.type == aiohttp.WSMsgType.TEXT:
+ req = json.loads(msg.data)
+ self.log.debug("Websocket request: %s", req)
+ code, msg = await self._streamLog(ws, req)
+
+ # We expect to process only a single message. I.e., we
+ # can stream only a single file at a time.
+ await ws.close(code=code, message=msg)
+ break
+ elif msg.type == aiohttp.WSMsgType.ERROR:
+ self.log.error(
+ "Websocket connection closed with exception %s",
+ ws.exception()
+ )
+ break
+ elif msg.type == aiohttp.WSMsgType.CLOSED:
+ break
+ except asyncio.CancelledError:
+ self.log.debug("Websocket request handling cancelled")
+ pass
+ except Exception as e:
+ self.log.exception("Websocket exception:")
+ await ws.close(code=4009, message=str(e).encode('utf-8'))
+ return ws
+
+
+class ZuulWeb(object):
+
+ log = logging.getLogger("zuul.web.ZuulWeb")
+
+ def __init__(self, listen_address, listen_port,
+ gear_server, gear_port,
+ ssl_key=None, ssl_cert=None, ssl_ca=None):
+ self.listen_address = listen_address
+ self.listen_port = listen_port
+ self.gear_server = gear_server
+ self.gear_port = gear_port
+ self.ssl_key = ssl_key
+ self.ssl_cert = ssl_cert
+ self.ssl_ca = ssl_ca
+
+ async def _handleWebsocket(self, request):
+ handler = LogStreamingHandler(self.event_loop,
+ self.gear_server, self.gear_port,
+ self.ssl_key, self.ssl_cert, self.ssl_ca)
+ return await handler.processRequest(request)
+
+ def run(self, loop=None):
+ '''
+ Run the websocket daemon.
+
+ Because this method can be the target of a new thread, we need to
+ set the thread event loop here, rather than in __init__().
+
+ :param loop: The event loop to use. If not supplied, the default main
+ thread event loop is used. This should be supplied if ZuulWeb
+ is run within a separate (non-main) thread.
+ '''
+ routes = [
+ ('GET', '/console-stream', self._handleWebsocket)
+ ]
+
+ self.log.debug("ZuulWeb starting")
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
+ user_supplied_loop = loop is not None
+ if not loop:
+ loop = asyncio.get_event_loop()
+ asyncio.set_event_loop(loop)
+
+ self.event_loop = loop
+
+ app = web.Application()
+ for method, path, handler in routes:
+ app.router.add_route(method, path, handler)
+ handler = app.make_handler(loop=self.event_loop)
+
+ # create the server
+ coro = self.event_loop.create_server(handler,
+ self.listen_address,
+ self.listen_port)
+ self.server = self.event_loop.run_until_complete(coro)
+
+ self.term = asyncio.Future()
+
+ # start the server
+ self.event_loop.run_until_complete(self.term)
+
+ # cleanup
+ self.log.debug("ZuulWeb stopping")
+ self.server.close()
+ self.event_loop.run_until_complete(self.server.wait_closed())
+ self.event_loop.run_until_complete(app.shutdown())
+ self.event_loop.run_until_complete(handler.shutdown(60.0))
+ self.event_loop.run_until_complete(app.cleanup())
+ self.log.debug("ZuulWeb stopped")
+
+ # Only run these if we are controlling the loop - they need to be
+ # run from the main thread
+ if not user_supplied_loop:
+ loop.stop()
+ loop.close()
+
+ def stop(self):
+ self.event_loop.call_soon_threadsafe(self.term.set_result, True)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ loop = asyncio.get_event_loop()
+ loop.set_debug(True)
+ z = ZuulWeb(listen_address="127.0.0.1", listen_port=9000,
+ gear_server="127.0.0.1", gear_port=4730)
+ z.run(loop)