Reload tenant configuration on change merged
Whenever a change that updates the zuul config merges, Zuul should
immediately update its configuration. This should be generally
a no-op for changes to project repos in a dependent pipeline,
however, it may affect indepedent pipelines of project repos or
anything related to config repos (for which the config is not
updated dynamically).
To do this efficiently, try to use as much cached data as possible,
but still perform the re-enqueue operations which are normal for
a reconfiguration to ensure correctness. Limit the reconfiguration
to just the affected tenant. NOTE: this may be an error -- tenants
may share common config repos, and therefore, if a change lands to
such a repo, more than one tenant may need to be updated.
Change-Id: Ib35b0b663423b1a27f9e3bcdec7480345e3bdff1
diff --git a/tests/base.py b/tests/base.py
index 59cc9ae..66fd85a 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1258,7 +1258,8 @@
self.event_queues = [
self.sched.result_event_queue,
- self.sched.trigger_event_queue
+ self.sched.trigger_event_queue,
+ self.sched.management_event_queue
]
self.configure_connections()
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 74d69c9..dd812a8 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -107,13 +107,25 @@
A.addApproval('code-review', 2)
self.fake_gerrit.addEvent(A.addApproval('approved', 1))
self.waitUntilSettled()
- self.assertEqual(self.getJobFromHistory('project-test2').result,
- 'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('tenant-one-gate', A.messages[1],
"A should transit tenant-one gate")
+ self.assertHistory([
+ dict(name='project-test2', result='SUCCESS', changes='1,1')])
+
+ # Now that the config change is landed, it should be live for
+ # subsequent changes.
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.addApproval('code-review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('approved', 1))
+ self.waitUntilSettled()
+ self.assertEqual(self.getJobFromHistory('project-test2').result,
+ 'SUCCESS')
+ self.assertHistory([
+ dict(name='project-test2', result='SUCCESS', changes='1,1'),
+ dict(name='project-test2', result='SUCCESS', changes='2,1')])
class TestAnsible(AnsibleZuulTestCase):
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 7a07956..4a97205 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import os
import logging
import six
@@ -230,6 +231,8 @@
@staticmethod
def fromYaml(layout, conf):
ProjectTemplateParser.getSchema(layout)(conf)
+ # Make a copy since we modify this later via pop
+ conf = copy.deepcopy(conf)
project_template = model.ProjectConfig(conf['name'])
source_context = conf['_source_context']
for pipeline in layout.pipelines.values():
@@ -300,6 +303,8 @@
# TODOv3(jeblair): This may need some branch-specific
# configuration for in-repo configs.
ProjectParser.getSchema(layout)(conf)
+ # Make a copy since we modify this later via pop
+ conf = copy.deepcopy(conf)
conf_templates = conf.pop('templates', [])
# The way we construct a project definition is by parsing the
# definition as a template, then applying all of the
@@ -539,15 +544,18 @@
return vs.Schema(tenant)
@staticmethod
- def fromYaml(base, connections, scheduler, merger, conf):
+ def fromYaml(base, connections, scheduler, merger, conf, cached):
TenantParser.getSchema(connections)(conf)
tenant = model.Tenant(conf['name'])
+ tenant.unparsed_config = conf
unparsed_config = model.UnparsedTenantConfig()
tenant.config_repos, tenant.project_repos = \
TenantParser._loadTenantConfigRepos(connections, conf)
tenant.config_repos_config, tenant.project_repos_config = \
- TenantParser._loadTenantInRepoLayouts(
- merger, connections, tenant.config_repos, tenant.project_repos)
+ TenantParser._loadTenantInRepoLayouts(merger, connections,
+ tenant.config_repos,
+ tenant.project_repos,
+ cached)
unparsed_config.extend(tenant.config_repos_config)
unparsed_config.extend(tenant.project_repos_config)
tenant.layout = TenantParser._parseLayout(base, unparsed_config,
@@ -575,12 +583,22 @@
@staticmethod
def _loadTenantInRepoLayouts(merger, connections, config_repos,
- project_repos):
+ project_repos, cached):
config_repos_config = model.UnparsedTenantConfig()
project_repos_config = model.UnparsedTenantConfig()
jobs = []
for (source, project) in config_repos:
+ # If we have cached data (this is a reconfiguration) use it.
+ if cached and project.unparsed_config:
+ TenantParser.log.info(
+ "Loading previously parsed configuration from %s" %
+ (project,))
+ config_repos_config.extend(project.unparsed_config)
+ continue
+ # Otherwise, prepare an empty unparsed config object to
+ # hold cached data later.
+ project.unparsed_config = model.UnparsedTenantConfig()
# Get main config files. These files are permitted the
# full range of configuration.
url = source.getGitUrl(project)
@@ -590,6 +608,16 @@
jobs.append(job)
for (source, project) in project_repos:
+ # If we have cached data (this is a reconfiguration) use it.
+ if cached and project.unparsed_config:
+ TenantParser.log.info(
+ "Loading previously parsed configuration from %s" %
+ (project,))
+ project_repos_config.extend(project.unparsed_config)
+ continue
+ # Otherwise, prepare an empty unparsed config object to
+ # hold cached data later.
+ project.unparsed_config = model.UnparsedTenantConfig()
# Get in-project-repo config files which have a restricted
# set of options.
url = source.getGitUrl(project)
@@ -624,7 +652,7 @@
incdata = TenantParser._parseProjectRepoLayout(
job.files[fn], job.source_context)
project_repos_config.extend(incdata)
- job.source_context.project.unparsed_config = incdata
+ job.source_context.project.unparsed_config.extend(incdata)
return config_repos_config, project_repos_config
@staticmethod
@@ -695,11 +723,27 @@
base = os.path.dirname(os.path.realpath(config_path))
for conf_tenant in config.tenants:
+ # When performing a full reload, do not use cached data.
tenant = TenantParser.fromYaml(base, connections, scheduler,
- merger, conf_tenant)
+ merger, conf_tenant, cached=False)
abide.tenants[tenant.name] = tenant
return abide
+ def reloadTenant(self, config_path, scheduler, merger, connections,
+ abide, tenant):
+ new_abide = model.Abide()
+ new_abide.tenants = abide.tenants.copy()
+
+ config_path = self.expandConfigPath(config_path)
+ base = os.path.dirname(os.path.realpath(config_path))
+
+ # When reloading a tenant only, use cached data if available.
+ new_tenant = TenantParser.fromYaml(base, connections, scheduler,
+ merger, tenant.unparsed_config,
+ cached=True)
+ new_abide.tenants[tenant.name] = new_tenant
+ return new_abide
+
def createDynamicLayout(self, tenant, files):
config = tenant.config_repos_config.copy()
for source, project in tenant.project_repos:
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 18cf11b..71d8c19 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -689,6 +689,12 @@
tenant = self.pipeline.layout.tenant
zuul_driver.onChangeMerged(tenant, item.change,
self.pipeline.source)
+ if item.change.updatesConfig():
+ # The change that just landed updates the config.
+ # Clear out cached data for this project and
+ # perform a reconfiguration.
+ item.change.project.unparsed_config = None
+ self.sched.reconfigureTenant(tenant)
def _reportItem(self, item):
self.log.debug("Reporting change %s" % item.change)
diff --git a/zuul/model.py b/zuul/model.py
index 5a9e367..a2f5ebd 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -2133,6 +2133,9 @@
def __init__(self, name):
self.name = name
self.layout = None
+ # The unparsed configuration from the main zuul config for
+ # this tenant.
+ self.unparsed_config = None
# The list of repos from which we will read main
# configuration. (source, project)
self.config_repos = []
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 1162f51..6fbac9b 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -125,6 +125,17 @@
self.config = config
+class TenantReconfigureEvent(ManagementEvent):
+ """Reconfigure the given tenant. The layout will be (re-)loaded from
+ the path specified in the configuration.
+
+ :arg Tenant tenant: the tenant to reconfigure
+ """
+ def __init__(self, tenant):
+ super(TenantReconfigureEvent, self).__init__()
+ self.tenant = tenant
+
+
class PromoteEvent(ManagementEvent):
"""Promote one or more changes to the head of the queue.
@@ -385,6 +396,12 @@
self.result_event_queue.put(event)
self.wake_event.set()
+ def reconfigureTenant(self, tenant):
+ self.log.debug("Prepare to reconfigure")
+ event = TenantReconfigureEvent(tenant)
+ self.management_event_queue.put(event)
+ self.wake_event.set()
+
def reconfigure(self, config):
self.log.debug("Prepare to reconfigure")
event = ReconfigureEvent(config)
@@ -394,6 +411,7 @@
event.wait()
self.log.debug("Reconfiguration complete")
self.last_reconfigured = int(time.time())
+ # TODOv3(jeblair): reconfigure time should be per-tenant
def promote(self, tenant_name, pipeline_name, change_ids):
event = PromoteEvent(tenant_name, pipeline_name, change_ids)
@@ -499,6 +517,23 @@
finally:
self.layout_lock.release()
+ def _doTenantReconfigureEvent(self, event):
+ # This is called in the scheduler loop after another thread submits
+ # a request
+ self.layout_lock.acquire()
+ try:
+ self.log.debug("Performing tenant reconfiguration")
+ loader = configloader.ConfigLoader()
+ abide = loader.reloadTenant(
+ self.config.get('zuul', 'tenant_config'),
+ self, self.merger, self.connections,
+ self.abide, event.tenant)
+ tenant = abide.tenants[event.tenant.name]
+ self._reconfigureTenant(tenant)
+ self.abide = abide
+ finally:
+ self.layout_lock.release()
+
def _reenqueueTenant(self, old_tenant, tenant):
for name, new_pipeline in tenant.layout.pipelines.items():
old_pipeline = old_tenant.layout.pipelines.get(name)
@@ -738,6 +773,8 @@
try:
if isinstance(event, ReconfigureEvent):
self._doReconfigureEvent(event)
+ if isinstance(event, TenantReconfigureEvent):
+ self._doTenantReconfigureEvent(event)
elif isinstance(event, PromoteEvent):
self._doPromoteEvent(event)
elif isinstance(event, EnqueueEvent):