Merge "Fix create_branch in tests" into feature/zuulv3
diff --git a/.zuul.yaml b/.zuul.yaml
index 27f2ca1..b0fe135 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -43,7 +43,8 @@
     name: openstack-infra/zuul
     check:
       jobs:
-        - tox-docs
+        - build-openstack-infra-sphinx-docs:
+            success-url: 'html/feature/zuulv3/'
         - tox-cover:
             voting: false
         - tox-pep8
@@ -55,7 +56,8 @@
               - playbooks/zuul-migrate.yaml
     gate:
       jobs:
-        - tox-docs
+        - build-openstack-infra-sphinx-docs:
+            success-url: 'html/feature/zuulv3/'
         - tox-pep8
         - tox-py35
         - zuul-stream-functional
diff --git a/zuul/cmd/migrate.py b/zuul/cmd/migrate.py
index 05278aa..1b2e44e 100644
--- a/zuul/cmd/migrate.py
+++ b/zuul/cmd/migrate.py
@@ -43,7 +43,7 @@
 
 JOBS_BY_ORIG_TEMPLATE = {}  # type: ignore
 SUFFIXES = []  # type: ignore
-ENVIRONMENT = '{{ host_vars[inventory_hostname] | zuul_legacy_vars }}'
+ENVIRONMENT = '{{ zuul | zuul_legacy_vars }}'
 DESCRIPTION = """Migrate zuul v2 and Jenkins Job Builder to Zuul v3.
 
 This program takes a zuul v2 layout.yaml and a collection of Jenkins Job
@@ -507,15 +507,19 @@
             self.jjb_job = jobs[self.orig]
 
     def getTimeout(self):
+        timeout = None
         if self.jjb_job:
             for wrapper in self.jjb_job.get('wrappers', []):
                 if isinstance(wrapper, dict):
-                    build_timeout = wrapper.get('timeout')
+                    build_timeout = wrapper.get(
+                        'build-timeout', wrapper.get('timeout'))
                     if isinstance(build_timeout, dict):
                         timeout = build_timeout.get('timeout')
                         if timeout is not None:
                             timeout = int(timeout) * 60
 
+        return timeout
+
     @property
     def short_name(self):
         return self.name.replace('legacy-', '')
@@ -550,7 +554,7 @@
         # html dir inside of logs such that if the node's contents have an
         # index.html in them setting the success-url to html/ will render
         # things as expected. Existing builder macros look like:
-        # 
+        #
         #   - publisher:
         #     name: upload-sphinx-draft
         #     publishers:
@@ -567,7 +571,7 @@
         # docs-draft/$LOG_PATH.
         #
         # Then there is a success-pattern in layout.yaml that looks like:
-        # 
+        #
         #     http://{url}/{log_path}/doc/build/html/
         #
         # Which gets reports. There are many variations on that URL. So rather
@@ -592,7 +596,7 @@
             if scpfile.get('copy-console'):
                 continue
             else:
-                src = "{{ ansible_user_dir }}"
+                src = "{{ ansible_user_dir }}/"
                 rsync_opts = self._getRsyncOptions(scpfile['source'])
 
             target = scpfile['target']
@@ -610,6 +614,10 @@
                     src = scpfile['source'].replace('**', '')
                     rsync_opts = None
                     draft = True
+                else:
+                    target = target.replace(
+                        'logs/$LOG_PATH',
+                        "{{ zuul.executor.log_root }}")
             elif site == 'tarballs.openstack.org':
                 if not target.startswith('tarballs'):
                     self.log.error(
@@ -629,13 +637,13 @@
             syncargs = collections.OrderedDict()
             syncargs['src'] = src
             syncargs['dest'] = target
-            syncargs['copy_links'] = 'yes'
             syncargs['mode'] = 'pull'
+            syncargs['copy_links'] = True
             syncargs['verify_host'] = True
             if rsync_opts:
                 syncargs['rsync_opts'] = rsync_opts
             task = collections.OrderedDict()
-            task['name'] = 'copy files from {src} on node to'.format(src=src)
+            task['name'] = 'Copy files from {src} on node'.format(src=src)
             task['synchronize'] = syncargs
             # We don't use retry_args here because there is a bug in
             # the synchronize module that breaks subsequent attempts at
@@ -657,9 +665,11 @@
     def _emitShellTask(self, data, syntax_check):
         shell, data = deal_with_shebang(data)
         task = collections.OrderedDict()
-        task['shell'] = data
+        # Putting the data directly into shell: causes here docs to break.
+        task['shell'] = collections.OrderedDict()
+        task['shell']['cmd'] = data
         if shell:
-            task['args'] = dict(executable=shell)
+            task['shell']['executable'] = shell
 
         if syntax_check:
             # Emit a test playbook with this shell task in it then run
@@ -707,6 +717,9 @@
             return
         if 'echo "Detailed logs:' in builder['shell']:
             return
+        if ('cat /etc/dib-builddate.txt' in builder['shell'] and
+                'echo "Network configuration data..."' in builder['shell']):
+            return
 
         task = self._emitShellTask(builder['shell'], syntax_check)
         if not task:
@@ -819,6 +832,7 @@
             expanded_projects.extend(expand_project_names(
                 self.name, project_names))
 
+        output['parent'] = 'legacy-base'
         if 'dsvm' in self.name:
             output['parent'] = 'legacy-dsvm-base'
         elif 'puppet-openstack-integration' in self.name:
@@ -835,13 +849,18 @@
 
         if self.vars:
             output['vars'] = self.vars.copy()
+
         timeout = self.getTimeout()
         if timeout:
             output['timeout'] = timeout
+            output.setdefault('vars', {})
             output['vars']['BUILD_TIMEOUT'] = str(timeout * 1000)
 
         if self.nodes:
-            output['nodes'] = self.getNodes()
+            if len(self.nodes) == 1:
+                output['nodeset'] = self.getNodes()[0]
+            else:
+                output['nodeset'] = dict(nodes=self.getNodes())
 
         if expanded_projects:
             output['required-projects'] = list(set(expanded_projects))
@@ -1294,36 +1313,41 @@
         '''
 
         def processPipeline(pipeline_jobs, job_name_regex, files):
+            new_jobs = []
             for job in pipeline_jobs:
                 if isinstance(job, str):
                     old_job_name = self.getOldJobName(job)
-                    if not old_job_name:
-                        continue
-                    if re.search(job_name_regex, old_job_name):
+                    if old_job_name and re.search(
+                            job_name_regex, old_job_name):
                         self.log.debug(
                             "Applied irrelevant-files to job %s in project %s",
                             job, project['name'])
-                        job = dict(job={'irrelevant-files': files})
+                        job = {job: {'irrelevant-files': list(set(files))}}
                 elif isinstance(job, dict):
-                    # should really only be one key (job name)
-                    job_name = list(job.keys())[0]
+                    job = job.copy()
+                    job_name = get_single_key(job)
                     extras = job[job_name]
                     old_job_name = self.getOldJobName(job_name)
-                    if not old_job_name:
-                        continue
-                    if re.search(job_name_regex, old_job_name):
+                    if old_job_name and re.search(
+                            job_name_regex, old_job_name):
                         self.log.debug(
                             "Applied irrelevant-files to complex job "
                             "%s in project %s", job_name, project['name'])
                         if 'irrelevant-files' not in extras:
                             extras['irrelevant-files'] = []
                         extras['irrelevant-files'].extend(files)
+                        extras['irrelevant-files'] = list(
+                            set(extras['irrelevant-files']))
+                    job[job_name] = extras
+                new_jobs.append(job)
+            return new_jobs
 
         def applyIrrelevantFiles(job_name_regex, files):
             for k, v in project.items():
                 if k in ('template', 'name'):
                     continue
-                processPipeline(project[k]['jobs'], job_name_regex, files)
+                project[k]['jobs'] = processPipeline(
+                    project[k]['jobs'], job_name_regex, files)
 
         for matcher in matchers:
             # find the project-specific section