Don't shrink windows on reconfiguration

The window size supplied by the user on a pipeline is really
the starting window size.  It can grow without bound, and so
there is no reason to shrink a window on reconfiguration.
For that matter, it's not necessary to increase it either,
unless the user has raised the floor higher than the current
value.

Therefore, use the existing window value on static (ie, dependent)
change queues during reconfiguration, or the new floor if it is
higher.

Dynamic queues will get new values from the pipeline (whose value
is also conditionally updated in the same manner).

In review, we discovered that some folks may be using the expotential
window type to create a static window.  To make sure this doesn't
break that case, add a test for it.  Since that can be used to
intentionally reduce the window size on reconfiguration, this new
test mirrors the test for window shrinkage added in the previous
change which must be altered now that we don't auto-shrink on
reconfiguration.

Change-Id: Iaba25788ebe51ced919dc896aa20aa90675d7773
diff --git a/tests/fixtures/layouts/reconfigure-window-fixed.yaml b/tests/fixtures/layouts/reconfigure-window-fixed.yaml
new file mode 100644
index 0000000..9aa1a97
--- /dev/null
+++ b/tests/fixtures/layouts/reconfigure-window-fixed.yaml
@@ -0,0 +1,46 @@
+- pipeline:
+    name: gate
+    manager: dependent
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - Approved: 1
+    start:
+      gerrit:
+        Verified: 0
+    success:
+      gerrit:
+        Verified: 2
+        submit: true
+    failure:
+      gerrit:
+        Verified: -2
+    window: 2
+    window-increase-type: exponential
+    window-increase-factor: 1
+    window-decrease-type: exponential
+    window-decrease-factor: 1
+
+- job:
+    name: base
+    parent: null
+    nodeset:
+      nodes:
+        - label: ubuntu-xenial
+          name: controller
+
+- job:
+    name: job1
+    run: playbooks/job1.yaml
+
+- job:
+    name: job2
+    run: playbooks/job2.yaml
+
+- project:
+    name: org/project
+    gate:
+      jobs:
+        - job1
+        - job2
diff --git a/tests/fixtures/layouts/reconfigure-window-fixed2.yaml b/tests/fixtures/layouts/reconfigure-window-fixed2.yaml
new file mode 100644
index 0000000..13382c5
--- /dev/null
+++ b/tests/fixtures/layouts/reconfigure-window-fixed2.yaml
@@ -0,0 +1,46 @@
+- pipeline:
+    name: gate
+    manager: dependent
+    trigger:
+      gerrit:
+        - event: comment-added
+          approval:
+            - Approved: 1
+    start:
+      gerrit:
+        Verified: 0
+    success:
+      gerrit:
+        Verified: 2
+        submit: true
+    failure:
+      gerrit:
+        Verified: -2
+    window: 1
+    window-increase-type: exponential
+    window-increase-factor: 1
+    window-decrease-type: exponential
+    window-decrease-factor: 1
+
+- job:
+    name: base
+    parent: null
+    nodeset:
+      nodes:
+        - label: ubuntu-xenial
+          name: controller
+
+- job:
+    name: job1
+    run: playbooks/job1.yaml
+
+- job:
+    name: job2
+    run: playbooks/job2.yaml
+
+- project:
+    name: org/project
+    gate:
+      jobs:
+        - job1
+        - job2
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 264883c..cad557e 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -3879,8 +3879,60 @@
         self.sched.reconfigure(self.config)
         tenant = self.sched.abide.tenants.get('tenant-one')
         queue = tenant.layout.pipelines['gate'].queues[0]
+        # Even though we have configured a smaller window, the value
+        # on the existing shared queue should be used.
+        self.assertEqual(queue.window, 20)
+        self.assertTrue(len(self.builds), 4)
+
+        self.sched.reconfigure(self.config)
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        queue = tenant.layout.pipelines['gate'].queues[0]
+        self.assertEqual(queue.window, 20)
+        self.assertTrue(len(self.builds), 4)
+
+        self.executor_server.hold_jobs_in_build = False
+        self.executor_server.release()
+
+        self.waitUntilSettled()
+        self.assertHistory([
+            dict(name='job1', result='SUCCESS', changes='1,1'),
+            dict(name='job1', result='SUCCESS', changes='1,1 2,1'),
+            dict(name='job2', result='SUCCESS', changes='1,1'),
+            dict(name='job2', result='SUCCESS', changes='1,1 2,1'),
+        ], ordered=False)
+
+    @simple_layout('layouts/reconfigure-window-fixed.yaml')
+    def test_reconfigure_window_fixed(self):
+        # Test the active window shrinking during reconfiguration
+        self.executor_server.hold_jobs_in_build = True
+
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+        A.addApproval('Code-Review', 2)
+        self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+        self.waitUntilSettled()
+        B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+        B.addApproval('Code-Review', 2)
+        self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+        self.waitUntilSettled()
+
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        queue = tenant.layout.pipelines['gate'].queues[0]
+        self.assertEqual(queue.window, 2)
+        self.assertTrue(len(self.builds), 4)
+
+        self.executor_server.release('job1')
+        self.waitUntilSettled()
+        self.commitConfigUpdate('org/common-config',
+                                'layouts/reconfigure-window-fixed2.yaml')
+        self.sched.reconfigure(self.config)
+        tenant = self.sched.abide.tenants.get('tenant-one')
+        queue = tenant.layout.pipelines['gate'].queues[0]
+        # Because we have configured a static window, it should
+        # be allowed to shrink on reconfiguration.
         self.assertEqual(queue.window, 1)
-        # B is now outside the window, but builds haven't been canceled
+        # B is outside the window, but still marked active until the
+        # next pass through the queue processor, so its builds haven't
+        # been canceled.
         self.assertTrue(len(self.builds), 4)
 
         self.sched.reconfigure(self.config)
@@ -3892,7 +3944,9 @@
 
         self.executor_server.hold_jobs_in_build = False
         self.executor_server.release()
-        # This will run new builds for B
+
+        # B's builds will be restarted and will show up in our history
+        # twice.
         self.waitUntilSettled()
         self.assertHistory([
             dict(name='job1', result='SUCCESS', changes='1,1'),
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 846242c..7dee00d 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -595,10 +595,32 @@
                                  "when reconfiguring" % name)
                 continue
             self.log.debug("Re-enqueueing changes for pipeline %s" % name)
+            # TODO(jeblair): This supports an undocument and
+            # unanticipated hack to create a static window.  If we
+            # really want to support this, maybe we should have a
+            # 'static' type?  But it may be in use in the wild, so we
+            # should allow this at least until there's an upgrade
+            # path.
+            if (new_pipeline.window and
+                new_pipeline.window_increase_type == 'exponential' and
+                new_pipeline.window_decrease_type == 'exponential' and
+                new_pipeline.window_increase_factor == 1 and
+                new_pipeline.window_decrease_factor == 1):
+                static_window = True
+            else:
+                static_window = False
+            if old_pipeline.window and (not static_window):
+                new_pipeline.window = max(old_pipeline.window,
+                                          new_pipeline.window_floor)
             items_to_remove = []
             builds_to_cancel = []
             last_head = None
             for shared_queue in old_pipeline.queues:
+                # Attempt to keep window sizes from shrinking where possible
+                new_queue = new_pipeline.getQueue(shared_queue.projects[0])
+                if new_queue and shared_queue.window and (not static_window):
+                    new_queue.window = max(shared_queue.window,
+                                           new_queue.window_floor)
                 for item in shared_queue.queue:
                     if not item.item_ahead:
                         last_head = item