blob: 40b3b7212e7cb6f5f21b9af25aa4dcb6a40296a6 [file] [log] [blame]
Clint Byrumdc8a0902017-07-20 16:36:27 -07001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import os
14import tempfile
15import time
16
17from tests.base import BaseTestCase
18
19from zuul.executor.server import DiskAccountant
20
21
22class FakeExecutor(object):
23 def __init__(self):
24 self.stopped_jobs = set()
25 self.used = {}
26
27 def stopJobByJobDir(self, jobdir):
28 self.stopped_jobs.add(jobdir)
29
30 def usage(self, dirname, used):
31 self.used[dirname] = used
32
33
34class TestDiskAccountant(BaseTestCase):
35 def test_disk_accountant(self):
36 jobs_dir = tempfile.mkdtemp()
37 cache_dir = tempfile.mkdtemp()
38 executor_server = FakeExecutor()
39 da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
40 cache_dir)
41 da.start()
42
43 jobdir = os.path.join(jobs_dir, '012345')
44 os.mkdir(jobdir)
45 testfile = os.path.join(jobdir, 'tfile')
46 with open(testfile, 'w') as tf:
47 tf.write(2 * 1024 * 1024 * '.')
48
49 # da should catch over-limit dir within 5 seconds
50 for i in range(0, 50):
51 if jobdir in executor_server.stopped_jobs:
52 break
53 time.sleep(0.1)
Tobias Henkel227c5512017-07-31 21:44:40 +020054 try:
55 self.assertEqual(set([jobdir]), executor_server.stopped_jobs)
56 finally:
57 da.stop()
Clint Byrumdc8a0902017-07-20 16:36:27 -070058 self.assertFalse(da.thread.is_alive())
59
60 def test_cache_hard_links(self):
61 root_dir = tempfile.mkdtemp()
62 jobs_dir = os.path.join(root_dir, 'jobs')
63 os.mkdir(jobs_dir)
64 cache_dir = os.path.join(root_dir, 'cache')
65 os.mkdir(cache_dir)
66
67 executor_server = FakeExecutor()
68 da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
69 cache_dir, executor_server.usage)
70 da.start()
71
72 jobdir = os.path.join(jobs_dir, '012345')
73 os.mkdir(jobdir)
74
75 repo_dir = os.path.join(cache_dir, 'a.repo')
76 os.mkdir(repo_dir)
77 source_file = os.path.join(repo_dir, 'big_file')
78 with open(source_file, 'w') as tf:
79 tf.write(2 * 1024 * 1024 * '.')
80 dest_link = os.path.join(jobdir, 'big_file')
81 os.link(source_file, dest_link)
82
83 # da should _not_ count this file. Wait for 5s to get noticed
84 for i in range(0, 50):
85 if jobdir in executor_server.used:
86 break
87 time.sleep(0.1)
Tobias Henkel227c5512017-07-31 21:44:40 +020088 try:
89 self.assertEqual(set(), executor_server.stopped_jobs)
90 self.assertIn(jobdir, executor_server.used)
91 self.assertEqual(1, executor_server.used[jobdir])
92 finally:
93 da.stop()