forked from manytask/checker
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpython.py
389 lines (348 loc) · 13.8 KB
/
python.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
from __future__ import annotations
import re
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from ..exceptions import BuildFailedError, ExecutionFailedError, RunFailedError, StylecheckFailedError, TestsFailedError
from ..utils.files import check_folder_contains_regexp, copy_files
from ..utils.print import print_info
from .tester import Tester
IGNORE_FILE_PATTERNS = ['*.md', 'build', '__pycache__', '.pytest_cache', '.mypy_cache', '.tester.json']
COVER_IGNORE_FILES = ['setup.py']
class PythonTester(Tester):
SOURCE_FILES_EXTENSIONS: list[str] = ['.py']
@dataclass
class TaskTestConfig(Tester.TaskTestConfig):
partially_scored: bool = False
verbose_tests_output: bool = False
module_test: bool = False
build_wheel: bool = False
run_mypy: bool = True
forbidden_regexp: list[re.Pattern[str]] = field(default_factory=list)
public_test_files: list[str] = field(default_factory=list)
private_test_files: list[str] = field(default_factory=list)
test_timeout: int = 60 # seconds
coverage: bool | int = False
# Created on init
test_files: list[str] = field(init=False, default_factory=list)
# Init only
explicit_public_tests: InitVar[list[str]] = None
explicit_private_tests: InitVar[list[str]] = None
def __post_init__(
self,
explicit_public_tests: list[str] | None,
explicit_private_tests: list[str] | None,
) -> None:
self.forbidden_regexp += [r'exit\(0\)'] # type: ignore
for regexp in self.forbidden_regexp:
re.compile(regexp)
self.public_test_files = ['test_public.py'] + (explicit_public_tests or [])
self.private_test_files = ['test_private.py'] + (explicit_private_tests or [])
self.test_files = self.public_test_files + self.private_test_files
def _gen_build( # type: ignore[override]
self,
test_config: TaskTestConfig,
build_dir: Path,
source_dir: Path,
public_tests_dir: Path | None,
private_tests_dir: Path | None,
sandbox: bool = True,
verbose: bool = False,
normalize_output: bool = False,
) -> None:
# Copy submitted code (ignore tests)
self._executor(
copy_files,
source=source_dir,
target=build_dir,
ignore_patterns=test_config.test_files + IGNORE_FILE_PATTERNS,
verbose=verbose,
)
# Check submitted code using forbidden regexp
self._executor(
check_folder_contains_regexp,
folder=build_dir,
extensions=self.SOURCE_FILES_EXTENSIONS,
regexps=test_config.forbidden_regexp,
raise_on_found=True,
verbose=verbose,
)
# Install submitted code as module if needed
if test_config.module_test:
# assert setup files exists
setup_files = {i.name for i in build_dir.glob(r'setup.*')} | \
{i.name for i in build_dir.glob(r'pyproject.*')}
if 'setup.py' not in setup_files and 'setup.cfg' not in setup_files and 'pyproject.toml' not in setup_files:
raise BuildFailedError(
'This task is in editable `module` mode. You have to provide pyproject.toml/setup.cfg/setup.py file'
)
if 'setup.py' not in setup_files:
raise BuildFailedError('This task is in editable `module` mode. You have to provide setup.py file')
if test_config.build_wheel:
task_build_dir_dist = build_dir / 'dist'
output = self._executor(
['pip3', 'wheel', '--wheel-dir', str(task_build_dir_dist), str(build_dir)],
verbose=verbose,
env_sandbox=sandbox,
capture_output=normalize_output,
)
if normalize_output:
print_info(output or '', end='')
output = self._executor(
['pip3', 'install', '--prefer-binary', '--force-reinstall', '--find-links',
str(task_build_dir_dist), str(build_dir)],
verbose=verbose,
env_sandbox=sandbox,
capture_output=normalize_output,
)
if normalize_output:
print_info(output or '', end='')
if (build_dir / 'build').exists():
output = self._executor(
['rm', '-rf', str(build_dir / 'build')],
verbose=verbose,
env_sandbox=sandbox,
capture_output=normalize_output,
)
if normalize_output:
print_info(output or '', end='')
else:
output = self._executor(
['pip3', 'install', '-e', str(build_dir), '--force'],
verbose=verbose,
env_sandbox=sandbox,
capture_output=normalize_output,
)
if normalize_output:
print_info(output or '', end='')
# Copy public test files
if public_tests_dir is not None:
self._executor(
copy_files,
source=public_tests_dir,
target=build_dir,
patterns=test_config.public_test_files,
verbose=verbose,
)
# Copy private test files
if private_tests_dir is not None:
self._executor(
copy_files,
source=private_tests_dir,
target=build_dir,
patterns=test_config.private_test_files,
verbose=verbose,
)
def _clean_build( # type: ignore[override]
self,
test_config: TaskTestConfig,
build_dir: Path,
verbose: bool = False,
) -> None:
self._executor(
['rm', '-rf', str(build_dir)],
check=False,
verbose=verbose,
)
@staticmethod
def _parse_summary_score(
output: str,
) -> float:
score = 0.0
for line in output.splitlines():
if 'Summary score percentage is: ' in line:
score += float(line.strip().split('Summary score percentage is: ')[1])
break
return score
def _run_tests( # type: ignore[override]
self,
test_config: TaskTestConfig,
build_dir: Path,
sandbox: bool = False,
verbose: bool = False,
normalize_output: bool = False,
) -> float:
# TODO: replace with preserved setup.cfg
codestyle_cmd = [
'flake8',
'--exclude', ','.join(test_config.private_test_files),
'--max-line-length', '120',
str(build_dir)
]
# codestyle_cmd = [
# 'ruff',
# '--exclude', ','.join(test_config.private_test_files),
# '--line-length', '120',
# '--no-fix',
# str(build_dir)
# ]
mypy_cmd = [
'mypy',
'--no-incremental',
'--cache-dir', '/dev/null',
'--ignore-missing-imports',
'--disallow-untyped-defs',
'--disallow-incomplete-defs',
'--disallow-subclassing-any',
'--disallow-any-generics',
'--no-implicit-optional',
'--warn-redundant-casts',
'--warn-unused-ignores',
'--warn-unreachable',
'--allow-untyped-decorators',
str(build_dir)
]
tests_collection_cmd = [
'pytest',
'-p', 'no:cacheprovider',
'-p', 'no:requests_mock',
'-p', 'no:cov',
'-p', 'no:mock',
'-p', 'no:socket',
'-qq',
'--collect-only',
str(build_dir)
]
tests_cmd = [
'pytest',
'-p', 'no:cacheprovider',
'-p', 'no:requests_mock',
'-p', 'no:timeout',
'-p', 'no:socket',
# '--timeout=60',
str(build_dir)
]
if not verbose:
tests_cmd += ['--no-header']
if not verbose and not test_config.verbose_tests_output:
tests_cmd += ['--tb=no']
# if test_config.partially_scored:
# tests_cmd += ['-s']
if test_config.coverage:
tests_cmd += ['--cov-report', 'term-missing']
# exclude test files
dirs_to_cover = {
i.relative_to(build_dir) for i in build_dir.iterdir()
if i.suffix in ['', '.py'] and i.name not in test_config.test_files and i.name not in COVER_IGNORE_FILES
}
if dirs_to_cover:
for _dir in dirs_to_cover:
tests_cmd += ['--cov', str(_dir).replace(r'.', r'\.')]
else:
tests_cmd += ['--cov', str(build_dir)]
# tests_cmd += ['--cov-config', '.coveragerc']
if test_config.coverage is not True:
tests_cmd += ['--cov-fail-under', str(test_config.coverage)]
else:
tests_cmd += ['-p', 'no:cov']
# Check style
styles_err = None
try:
print_info('Running codestyle checks...', color='orange')
output = self._executor(
codestyle_cmd,
sandbox=sandbox,
cwd=str(build_dir),
verbose=verbose,
capture_output=normalize_output,
)
if normalize_output:
print_info(output or '', end='')
print_info('[No issues]')
print_info('OK', color='green')
except ExecutionFailedError as e:
# Always reraise for style checks
styles_err = e
if normalize_output:
print_info(e.output, end='')
e.output = ''
output = ''
print_info('ERROR', color='red')
# Check typing
typing_err = None
try:
if test_config.run_mypy:
print_info('Running mypy checks...', color='orange')
output = self._executor(
mypy_cmd,
sandbox=sandbox,
cwd=str(build_dir.parent), # mypy didn't work from cwd
verbose=verbose,
capture_output=normalize_output,
)
if normalize_output:
print_info(output, end='')
print_info('OK', color='green')
else:
print_info('Type check is skipped for this task!', color='orange')
except ExecutionFailedError as e:
# Always reraise for typing checks
typing_err = e
if normalize_output:
print_info(e.output, end='')
e.output = ''
output = ''
print_info('ERROR', color='red')
# Check import and tests collecting
import_err = None
try:
print_info('Collecting tests...', color='orange')
output = self._executor(
tests_collection_cmd,
sandbox=sandbox,
cwd=str(build_dir),
verbose=verbose,
capture_output=normalize_output,
)
if normalize_output:
print_info(output, end='')
output = ''
print_info('OK', color='green')
except ExecutionFailedError as e:
# Always reraise for import checks
import_err = e
if normalize_output:
print_info(e.output, end='')
e.output = ''
output = ''
print_info('ERROR', color='red')
# Check tests
tests_err = None
try:
print_info('Running tests...', color='orange')
output = self._executor(
tests_cmd,
sandbox=sandbox,
cwd=str(build_dir),
timeout=test_config.test_timeout,
verbose=verbose,
capture_output=test_config.partially_scored or normalize_output,
)
if normalize_output or test_config.partially_scored:
print_info(output, end='')
print_info('OK', color='green')
except ExecutionFailedError as e:
if not test_config.partially_scored:
# Reraise only if all tests should pass
tests_err = e
output = e.output
if normalize_output or test_config.partially_scored:
print_info(output, end='')
e.output = ''
output = ''
if test_config.partially_scored:
print_info('ERROR? (Some tests failed, but this is partially_scored task)', color='orange')
else:
print_info('ERROR', color='red')
if import_err is not None:
raise RunFailedError('Import error', output=import_err.output) from import_err
if tests_err is not None:
raise TestsFailedError('Public or private tests error', output=tests_err.output) from tests_err
if styles_err is not None:
raise StylecheckFailedError('Style error', output=styles_err.output) from styles_err
if typing_err is not None:
raise StylecheckFailedError('Typing error', output=typing_err.output) from typing_err
if test_config.partially_scored:
output = output or '' # for mypy only
return self._parse_summary_score(output)
else:
return 1.