Add support for shared ansible_host in inventory

Today it is possible to create the following ansible inventory file:

  [foo]
  foo01 ansible_host=192.168.1.1

  [bar]
  bar01 ansible_host=192.168.1.1

Which allows a user to create multiple host aliases for a single
connection. This could be done with ansible groups, however there is
some functional differences on how ansible runs in that configuration.

We could also request 2 nodes from nodepool, however in this case, it
would be a waste of CI resources because every alias would need a new
node from nodepool.

Now, a user is able to alias multiple host names to a single node from
nodepool by doing the following:

  nodeset:
    nodes:
      - name:
          - foo
          - bar
        label: ubuntu-xenial

This would result in a single node request from nodepool, but create
an inventory file with 2 alaises sharing and single ansible_host
variable.

Change-Id: I674d6baac26852ee1503feb1ed16c279bf773688
Signed-off-by: Paul Belanger <pabelanger@redhat.com>
diff --git a/tests/unit/test_inventory.py b/tests/unit/test_inventory.py
index 2835d30..71cb05e 100644
--- a/tests/unit/test_inventory.py
+++ b/tests/unit/test_inventory.py
@@ -57,6 +57,26 @@
         self.executor_server.release()
         self.waitUntilSettled()
 
+    def test_single_inventory_list(self):
+
+        inventory = self._get_build_inventory('single-inventory-list')
+
+        all_nodes = ('compute', 'controller')
+        self.assertIn('all', inventory)
+        self.assertIn('hosts', inventory['all'])
+        self.assertIn('vars', inventory['all'])
+        for node_name in all_nodes:
+            self.assertIn(node_name, inventory['all']['hosts'])
+        self.assertIn('zuul', inventory['all']['vars'])
+        z_vars = inventory['all']['vars']['zuul']
+        self.assertIn('executor', z_vars)
+        self.assertIn('src_root', z_vars['executor'])
+        self.assertIn('job', z_vars)
+        self.assertEqual(z_vars['job'], 'single-inventory-list')
+
+        self.executor_server.release()
+        self.waitUntilSettled()
+
     def test_group_inventory(self):
 
         inventory = self._get_build_inventory('group-inventory')
diff --git a/tests/unit/test_nodepool.py b/tests/unit/test_nodepool.py
index d3f9ddb..aa0f082 100644
--- a/tests/unit/test_nodepool.py
+++ b/tests/unit/test_nodepool.py
@@ -67,8 +67,8 @@
         # Test a simple node request
 
         nodeset = model.NodeSet()
-        nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
-        nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['controller', 'foo'], 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
         job = model.Job('testjob')
         job.nodeset = nodeset
         request = self.nodepool.requestNodes(None, job)
@@ -99,8 +99,8 @@
         # Test that node requests are re-submitted after disconnect
 
         nodeset = model.NodeSet()
-        nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
-        nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
         job = model.Job('testjob')
         job.nodeset = nodeset
         self.fake_nodepool.paused = True
@@ -116,8 +116,8 @@
         # Test that node requests can be canceled
 
         nodeset = model.NodeSet()
-        nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
-        nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
         job = model.Job('testjob')
         job.nodeset = nodeset
         self.fake_nodepool.paused = True
@@ -131,8 +131,8 @@
         # Test that a resubmitted request would not lock nodes
 
         nodeset = model.NodeSet()
-        nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
-        nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
         job = model.Job('testjob')
         job.nodeset = nodeset
         request = self.nodepool.requestNodes(None, job)
@@ -152,8 +152,8 @@
         # Test that a lost request would not lock nodes
 
         nodeset = model.NodeSet()
-        nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
-        nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
+        nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
         job = model.Job('testjob')
         job.nodeset = nodeset
         request = self.nodepool.requestNodes(None, job)