Merge "Add required reason for hold" into feature/zuulv3
diff --git a/doc/source/admin/client.rst b/doc/source/admin/client.rst
index 5cdf919..961b205 100644
--- a/doc/source/admin/client.rst
+++ b/doc/source/admin/client.rst
@@ -28,7 +28,7 @@
 
 Example::
 
-  zuul autohold --tenant openstack --project example_project --job example_job ---count 1
+  zuul autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
 
 Enqueue
 ^^^^^^^
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index e38dd84..16d82af 100755
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -1441,7 +1441,8 @@
         client = zuul.rpcclient.RPCClient('127.0.0.1',
                                           self.gearman_server.port)
         self.addCleanup(client.shutdown)
-        r = client.autohold('tenant-one', 'org/project', 'project-test2', 1)
+        r = client.autohold('tenant-one', 'org/project', 'project-test2',
+                            "reason text", 1)
         self.assertTrue(r)
 
         self.executor_server.failJob('project-test2', A)
@@ -1469,6 +1470,7 @@
                       'review.example.com/org/project',
                       'project-test2'])
         )
+        self.assertEqual(held_node['hold_reason'], "reason text")
 
         # Another failed change should not hold any more nodes
         B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
diff --git a/zuul/cmd/client.py b/zuul/cmd/client.py
index 03eabce..00af538 100755
--- a/zuul/cmd/client.py
+++ b/zuul/cmd/client.py
@@ -54,6 +54,8 @@
                                   required=True)
         cmd_autohold.add_argument('--job', help='job name',
                                   required=True)
+        cmd_autohold.add_argument('--reason', help='reason for the hold',
+                                  required=True)
         cmd_autohold.add_argument('--count',
                                   help='number of job runs (default: 1)',
                                   required=False, type=int, default=1)
@@ -156,6 +158,7 @@
         r = client.autohold(tenant_name=self.args.tenant,
                             project_name=self.args.project,
                             job_name=self.args.job,
+                            reason=self.args.reason,
                             count=self.args.count)
         return r
 
diff --git a/zuul/model.py b/zuul/model.py
index e7043f3..90cc81d 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -357,6 +357,7 @@
         self.id = None
         self.lock = None
         self.hold_job = None
+        self.hold_reason = None
         # Attributes from Nodepool
         self._state = 'unknown'
         self.state_time = time.time()
@@ -398,6 +399,7 @@
         d = {}
         d['state'] = self.state
         d['hold_job'] = self.hold_job
+        d['hold_reason'] = self.hold_reason
         for k in self._keys:
             d[k] = getattr(self, k)
         return d
diff --git a/zuul/nodepool.py b/zuul/nodepool.py
index f677810..6b3632b 100644
--- a/zuul/nodepool.py
+++ b/zuul/nodepool.py
@@ -55,12 +55,13 @@
         if autohold_key not in self.sched.autohold_requests:
             return
 
-        hold_iterations = self.sched.autohold_requests[autohold_key]
+        (hold_iterations, reason) = self.sched.autohold_requests[autohold_key]
         nodes = nodeset.getNodes()
 
         for node in nodes:
             node.state = model.STATE_HOLD
             node.hold_job = " ".join(autohold_key)
+            node.hold_reason = reason
             self.sched.zk.storeNode(node)
 
         # We remove the autohold when the number of nodes in hold
diff --git a/zuul/rpcclient.py b/zuul/rpcclient.py
index ee0c7d2..a2901bd 100644
--- a/zuul/rpcclient.py
+++ b/zuul/rpcclient.py
@@ -48,10 +48,11 @@
         self.log.debug("Job complete, success: %s" % (not job.failure))
         return job
 
-    def autohold(self, tenant_name, project_name, job_name, count):
+    def autohold(self, tenant_name, project_name, job_name, reason, count):
         data = {'tenant_name': tenant_name,
                 'project_name': project_name,
                 'job_name': job_name,
+                'reason': reason,
                 'count': count}
         return not self.submitJob('zuul:autohold', data).failure
 
diff --git a/zuul/rpclistener.py b/zuul/rpclistener.py
index ae948eb..0e4736c 100644
--- a/zuul/rpclistener.py
+++ b/zuul/rpclistener.py
@@ -111,6 +111,7 @@
             return
 
         params['job_name'] = args['job_name']
+        params['reason'] = args['reason']
 
         if args['count'] < 0:
             error = "Invalid count: %d" % args['count']
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 7eb8a69..0a33b00 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -350,14 +350,14 @@
         self.last_reconfigured = int(time.time())
         # TODOv3(jeblair): reconfigure time should be per-tenant
 
-    def autohold(self, tenant_name, project_name, job_name, count):
+    def autohold(self, tenant_name, project_name, job_name, reason, count):
         key = (tenant_name, project_name, job_name)
         if count == 0 and key in self.autohold_requests:
             self.log.debug("Removing autohold for %s", key)
             del self.autohold_requests[key]
         else:
             self.log.debug("Autohold requested for %s", key)
-            self.autohold_requests[key] = count
+            self.autohold_requests[key] = (count, reason)
 
     def promote(self, tenant_name, pipeline_name, change_ids):
         event = PromoteEvent(tenant_name, pipeline_name, change_ids)