Add host/group vars
This adds job-level definitions of Ansible host variables and group
variables.
Change-Id: I259144a8560f8fa5ff5990620854fed2002c8951
diff --git a/doc/source/user/config.rst b/doc/source/user/config.rst
index 597062e..18d28c4 100644
--- a/doc/source/user/config.rst
+++ b/doc/source/user/config.rst
@@ -856,6 +856,48 @@
same name will override a previously defined variable, but new
variable names will be added to the set of defined variables.
+ .. attr:: host_vars
+
+ A dictionary of host variables to supply to Ansible. The keys
+ of this dictionary are node names as defined in a
+ :ref:`nodeset`, and the values are dictionaries of variables,
+ just as in :attr:`job.vars`.
+
+ .. attr:: group_vars
+
+ A dictionary of group variables to supply to Ansible. The keys
+ of this dictionary are node groups as defined in a
+ :ref:`nodeset`, and the values are dictionaries of variables,
+ just as in :attr:`job.vars`.
+
+ An example of three kinds of variables:
+
+ .. code-block:: yaml
+
+ - job:
+ name: variable-example
+ nodeset:
+ nodes:
+ - name: controller
+ label: fedora-27
+ - name: api1
+ label: centos-7
+ - name: api2
+ label: centos-7
+ groups:
+ - name: api
+ nodes:
+ - api1
+ - api2
+ vars:
+ foo: "this variable is visible to all nodes"
+ host_vars:
+ controller:
+ bar: "this variable is visible only on the controller node"
+ group_vars:
+ api:
+ baz: "this variable is visible on api1 and api2"
+
.. attr:: dependencies
A list of other jobs upon which this job depends. Zuul will not
diff --git a/tests/base.py b/tests/base.py
index 70889bb..96491f9 100755
--- a/tests/base.py
+++ b/tests/base.py
@@ -1325,7 +1325,7 @@
host['host_vars']['ansible_connection'] = 'local'
hosts.append(dict(
- name=['localhost'],
+ name='localhost',
host_vars=dict(ansible_connection='local'),
host_keys=[]))
return hosts
diff --git a/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
new file mode 100644
index 0000000..36e0eca
--- /dev/null
+++ b/tests/fixtures/config/ansible/git/common-config/playbooks/check-hostvars.yaml
@@ -0,0 +1,26 @@
+- hosts: host1
+ tasks:
+ - name: Assert hostvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar == 'host'
+ - groupvar is not defined
+
+- hosts: host2
+ tasks:
+ - name: Assert groupvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar is not defined
+ - groupvar == 'group'
+
+- hosts: host3
+ tasks:
+ - name: Assert groupvar is present.
+ assert:
+ that:
+ - allvar == 'all'
+ - hostvar is not defined
+ - groupvar == 'group'
diff --git a/tests/fixtures/config/ansible/git/common-config/zuul.yaml b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
index d0a8f7b..7637a08 100644
--- a/tests/fixtures/config/ansible/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/ansible/git/common-config/zuul.yaml
@@ -115,6 +115,32 @@
- job:
parent: python27
+ name: check-hostvars
+ run: playbooks/check-hostvars.yaml
+ nodeset:
+ nodes:
+ - name: host1
+ label: ubuntu-xenial
+ - name: host2
+ label: ubuntu-xenial
+ - name: host3
+ label: ubuntu-xenial
+ groups:
+ - name: group1
+ nodes:
+ - host2
+ - host3
+ vars:
+ allvar: all
+ host_vars:
+ host1:
+ hostvar: host
+ group_vars:
+ group1:
+ groupvar: group
+
+- job:
+ parent: python27
name: check-secret-names
run: playbooks/check-secret-names.yaml
nodeset:
diff --git a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
index 447f6cd..142625f 100644
--- a/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
+++ b/tests/fixtures/config/ansible/git/org_project/.zuul.yaml
@@ -15,6 +15,7 @@
- python27
- faillocal
- check-vars
+ - check-hostvars
- check-secret-names
- timeout
- hello-world
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index 46e1d99..c67eb55 100755
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -425,12 +425,14 @@
node = {'name': 'fake-host',
'host_keys': ['fake-host-key'],
'interface_ip': 'localhost'}
- keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ keys = self.test_job.getHostList({'nodes': [node],
+ 'host_vars': {}})[0]['host_keys']
self.assertEqual(keys[0], 'localhost fake-host-key')
# Test with custom connection_port set
node['connection_port'] = 22022
- keys = self.test_job.getHostList({'nodes': [node]})[0]['host_keys']
+ keys = self.test_job.getHostList({'nodes': [node],
+ 'host_vars': {}})[0]['host_keys']
self.assertEqual(keys[0], '[localhost]:22022 fake-host-key')
diff --git a/zuul/configloader.py b/zuul/configloader.py
index be1bd63..0b78993 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -499,6 +499,8 @@
'roles': to_list(role),
'required-projects': to_list(vs.Any(job_project, str)),
'vars': dict,
+ 'host_vars': {str: dict},
+ 'group_vars': {str: dict},
'dependencies': to_list(str),
'allowed-projects': to_list(str),
'override-branch': str,
@@ -600,8 +602,9 @@
secret = layout.secrets.get(secret_config['secret'])
if secret is None:
raise SecretNotFoundError(secret_name)
- if secret_name == 'zuul':
- raise Exception("Secrets named 'zuul' are not allowed.")
+ if secret_name == 'zuul' or secret_name == 'nodepool':
+ raise Exception("Secrets named 'zuul' or 'nodepool' "
+ "are not allowed.")
if not secret.source_context.isSameProject(job.source_context):
raise Exception(
"Unable to use secret %s. Secrets must be "
@@ -714,9 +717,24 @@
variables = conf.get('vars', None)
if variables:
- if 'zuul' in variables:
- raise Exception("Variables named 'zuul' are not allowed.")
+ if 'zuul' in variables or 'nodepool' in variables:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
job.variables = variables
+ host_variables = conf.get('host_vars', None)
+ if host_variables:
+ for host, hvars in host_variables.items():
+ if 'zuul' in hvars or 'nodepool' in hvars:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
+ job.host_variables = host_variables
+ group_variables = conf.get('group_vars', None)
+ if group_variables:
+ for group, gvars in group_variables.items():
+ if 'zuul' in group_variables or 'nodepool' in gvars:
+ raise Exception("Variables named 'zuul' or 'nodepool' "
+ "are not allowed.")
+ job.group_variables = group_variables
allowed_projects = conf.get('allowed-projects', None)
if allowed_projects:
diff --git a/zuul/executor/client.py b/zuul/executor/client.py
index d561232..35f3199 100644
--- a/zuul/executor/client.py
+++ b/zuul/executor/client.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
import gear
import json
import logging
@@ -208,7 +207,9 @@
nodes.append(n)
params['nodes'] = nodes
params['groups'] = [group.toDict() for group in nodeset.getGroups()]
- params['vars'] = copy.deepcopy(job.variables)
+ params['vars'] = job.variables
+ params['host_vars'] = job.host_variables
+ params['group_vars'] = job.group_variables
params['zuul'] = zuul_params
projects = set()
required_projects = set()
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 7384aa5..de85647 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -497,16 +497,22 @@
shutil.copy(os.path.join(library_path, fn), target_dir)
-def make_setup_inventory_dict(nodes):
+def check_varnames(var):
+ # We block these in configloader, but block it here too to make
+ # sure that a job doesn't pass variables named zuul or nodepool.
+ if 'zuul' in var:
+ raise Exception("Defining variables named 'zuul' is not allowed")
+ if 'nodepool' in var:
+ raise Exception("Defining variables named 'nodepool' is not allowed")
+
+def make_setup_inventory_dict(nodes):
hosts = {}
for node in nodes:
if (node['host_vars']['ansible_connection'] in
BLACKLISTED_ANSIBLE_CONNECTION_TYPES):
continue
-
- for name in node['name']:
- hosts[name] = node['host_vars']
+ hosts[node['name']] = node['host_vars']
inventory = {
'all': {
@@ -517,12 +523,10 @@
return inventory
-def make_inventory_dict(nodes, groups, all_vars):
-
+def make_inventory_dict(nodes, args, all_vars):
hosts = {}
for node in nodes:
- for name in node['name']:
- hosts[name] = node['host_vars']
+ hosts[node['name']] = node['host_vars']
inventory = {
'all': {
@@ -531,14 +535,16 @@
}
}
- for group in groups:
+ for group in args['groups']:
group_hosts = {}
for node_name in group['nodes']:
- # children is a dict with None as values because we don't have
- # and per-group variables. If we did, None would be a dict
- # with the per-group variables
group_hosts[node_name] = None
- inventory[group['name']] = {'hosts': group_hosts}
+ group_vars = args['group_vars'].get(group['name'], {}).copy()
+ check_varnames(group_vars)
+ inventory[group['name']] = {
+ 'hosts': group_hosts,
+ 'vars': group_vars,
+ }
return inventory
@@ -959,42 +965,45 @@
# set to True in the clouds.yaml for a cloud if this
# results in the wrong thing being in interface_ip
# TODO(jeblair): Move this notice to the docs.
- ip = node.get('interface_ip')
- port = node.get('connection_port', node.get('ssh_port', 22))
- host_vars = dict(
- ansible_host=ip,
- ansible_user=self.executor_server.default_username,
- ansible_port=port,
- nodepool=dict(
- label=node.get('label'),
- az=node.get('az'),
- cloud=node.get('cloud'),
- provider=node.get('provider'),
- region=node.get('region'),
- interface_ip=node.get('interface_ip'),
- public_ipv4=node.get('public_ipv4'),
- private_ipv4=node.get('private_ipv4'),
- public_ipv6=node.get('public_ipv6')))
+ for name in node['name']:
+ ip = node.get('interface_ip')
+ port = node.get('connection_port', node.get('ssh_port', 22))
+ host_vars = args['host_vars'].get(name, {}).copy()
+ check_varnames(host_vars)
+ host_vars.update(dict(
+ ansible_host=ip,
+ ansible_user=self.executor_server.default_username,
+ ansible_port=port,
+ nodepool=dict(
+ label=node.get('label'),
+ az=node.get('az'),
+ cloud=node.get('cloud'),
+ provider=node.get('provider'),
+ region=node.get('region'),
+ interface_ip=node.get('interface_ip'),
+ public_ipv4=node.get('public_ipv4'),
+ private_ipv4=node.get('private_ipv4'),
+ public_ipv6=node.get('public_ipv6'))))
- username = node.get('username')
- if username:
- host_vars['ansible_user'] = username
+ username = node.get('username')
+ if username:
+ host_vars['ansible_user'] = username
- connection_type = node.get('connection_type')
- if connection_type:
- host_vars['ansible_connection'] = connection_type
+ connection_type = node.get('connection_type')
+ if connection_type:
+ host_vars['ansible_connection'] = connection_type
- host_keys = []
- for key in node.get('host_keys'):
- if port != 22:
- host_keys.append("[%s]:%s %s" % (ip, port, key))
- else:
- host_keys.append("%s %s" % (ip, key))
+ host_keys = []
+ for key in node.get('host_keys'):
+ if port != 22:
+ host_keys.append("[%s]:%s %s" % (ip, port, key))
+ else:
+ host_keys.append("%s %s" % (ip, key))
- hosts.append(dict(
- name=node['name'],
- host_vars=host_vars,
- host_keys=host_keys))
+ hosts.append(dict(
+ name=name,
+ host_vars=host_vars,
+ host_keys=host_keys))
return hosts
def _blockPluginDirs(self, path):
@@ -1087,10 +1096,7 @@
secrets = playbook['secrets']
if secrets:
- if 'zuul' in secrets:
- # We block this in configloader, but block it here too to make
- # sure that a job doesn't pass secrets named zuul.
- raise Exception("Defining secrets named 'zuul' is not allowed")
+ check_varnames(secrets)
jobdir_playbook.secrets_content = yaml.safe_dump(
secrets, default_flow_style=False)
@@ -1191,12 +1197,9 @@
def prepareAnsibleFiles(self, args):
all_vars = args['vars'].copy()
+ check_varnames(all_vars)
# TODO(mordred) Hack to work around running things with python3
all_vars['ansible_python_interpreter'] = '/usr/bin/python2'
- if 'zuul' in all_vars:
- # We block this in configloader, but block it here too to make
- # sure that a job doesn't pass variables named zuul.
- raise Exception("Defining vars named 'zuul' is not allowed")
all_vars['zuul'] = args['zuul'].copy()
all_vars['zuul']['executor'] = dict(
hostname=self.executor_server.hostname,
@@ -1207,7 +1210,7 @@
nodes = self.getHostList(args)
setup_inventory = make_setup_inventory_dict(nodes)
- inventory = make_inventory_dict(nodes, args['groups'], all_vars)
+ inventory = make_inventory_dict(nodes, args, all_vars)
with open(self.jobdir.setup_inventory, 'w') as setup_inventory_yaml:
setup_inventory_yaml.write(
diff --git a/zuul/model.py b/zuul/model.py
index 38f2d6b..997b129 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -840,6 +840,8 @@
parent=None,
timeout=None,
variables={},
+ host_variables={},
+ group_variables={},
nodeset=NodeSet(),
workspace=None,
pre_run=(),
@@ -980,10 +982,19 @@
matchers.append(self.branch_matcher)
self.branch_matcher = change_matcher.MatchAll(matchers)
- def updateVariables(self, other_vars):
- v = copy.deepcopy(self.variables)
- Job._deepUpdate(v, other_vars)
- self.variables = v
+ def updateVariables(self, other_vars, other_host_vars, other_group_vars):
+ if other_vars is not None:
+ v = copy.deepcopy(self.variables)
+ Job._deepUpdate(v, other_vars)
+ self.variables = v
+ if other_host_vars is not None:
+ v = copy.deepcopy(self.host_variables)
+ Job._deepUpdate(v, other_host_vars)
+ self.host_variables = v
+ if other_group_vars is not None:
+ v = copy.deepcopy(self.group_variables)
+ Job._deepUpdate(v, other_group_vars)
+ self.group_variables = v
def updateParentData(self, other_vars):
# Update variables, but give the current values priority (used
@@ -1060,7 +1071,8 @@
"from other projects."
% (repr(self), this_origin))
if k not in set(['pre_run', 'run', 'post_run', 'roles',
- 'variables', 'required_projects',
+ 'variables', 'host_variables',
+ 'group_variables', 'required_projects',
'allowed_projects']):
# TODO(jeblair): determine if deepcopy is required
setattr(self, k, copy.deepcopy(other._get(k)))
@@ -1094,8 +1106,8 @@
if other._get('post_run') is not None:
other_post_run = self.freezePlaybooks(other.post_run)
self.post_run = other_post_run + self.post_run
- if other._get('variables') is not None:
- self.updateVariables(other.variables)
+ self.updateVariables(other.variables, other.host_variables,
+ other.group_variables)
if other._get('required_projects') is not None:
self.updateProjects(other.required_projects)
if (other._get('allowed_projects') is not None and