blob: 22c8f343e3d02a3d3f5fa6aa9aa1262d78a9d9d1 [file] [log] [blame]
Clint Byrumdc8a0902017-07-20 16:36:27 -07001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import os
14import tempfile
15import time
16
17from tests.base import BaseTestCase
18
19from zuul.executor.server import DiskAccountant
20
21
22class FakeExecutor(object):
23 def __init__(self):
24 self.stopped_jobs = set()
25 self.used = {}
26
27 def stopJobByJobDir(self, jobdir):
28 self.stopped_jobs.add(jobdir)
29
30 def usage(self, dirname, used):
31 self.used[dirname] = used
32
33
34class TestDiskAccountant(BaseTestCase):
35 def test_disk_accountant(self):
36 jobs_dir = tempfile.mkdtemp()
37 cache_dir = tempfile.mkdtemp()
38 executor_server = FakeExecutor()
39 da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
40 cache_dir)
41 da.start()
42
43 jobdir = os.path.join(jobs_dir, '012345')
44 os.mkdir(jobdir)
45 testfile = os.path.join(jobdir, 'tfile')
46 with open(testfile, 'w') as tf:
47 tf.write(2 * 1024 * 1024 * '.')
48
49 # da should catch over-limit dir within 5 seconds
50 for i in range(0, 50):
51 if jobdir in executor_server.stopped_jobs:
52 break
53 time.sleep(0.1)
54 self.assertEqual(set([jobdir]), executor_server.stopped_jobs)
55 da.stop()
56 self.assertFalse(da.thread.is_alive())
57
58 def test_cache_hard_links(self):
59 root_dir = tempfile.mkdtemp()
60 jobs_dir = os.path.join(root_dir, 'jobs')
61 os.mkdir(jobs_dir)
62 cache_dir = os.path.join(root_dir, 'cache')
63 os.mkdir(cache_dir)
64
65 executor_server = FakeExecutor()
66 da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
67 cache_dir, executor_server.usage)
68 da.start()
69
70 jobdir = os.path.join(jobs_dir, '012345')
71 os.mkdir(jobdir)
72
73 repo_dir = os.path.join(cache_dir, 'a.repo')
74 os.mkdir(repo_dir)
75 source_file = os.path.join(repo_dir, 'big_file')
76 with open(source_file, 'w') as tf:
77 tf.write(2 * 1024 * 1024 * '.')
78 dest_link = os.path.join(jobdir, 'big_file')
79 os.link(source_file, dest_link)
80
81 # da should _not_ count this file. Wait for 5s to get noticed
82 for i in range(0, 50):
83 if jobdir in executor_server.used:
84 break
85 time.sleep(0.1)
86 self.assertEqual(set(), executor_server.stopped_jobs)
87 self.assertIn(jobdir, executor_server.used)
88 self.assertEqual(1, executor_server.used[jobdir])
89 da.stop()