-
Notifications
You must be signed in to change notification settings - Fork 81
Expand file tree
/
Copy pathcontrollers.py
More file actions
691 lines (587 loc) · 25.2 KB
/
controllers.py
File metadata and controls
691 lines (587 loc) · 25.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
"""Logic to find all tests, their progress and details of individual test."""
import os
from typing import Any, Dict, List
from flask import (Blueprint, Response, abort, g, jsonify, redirect, request,
url_for)
from sqlalchemy import and_
from decorators import template_renderer
from exceptions import TestNotFoundException
from mod_auth.controllers import check_access_rights, login_required
from mod_auth.models import Role
from mod_customized.models import TestFork
from mod_home.models import CCExtractorVersion, GeneralData
from mod_regression.models import (Category, RegressionTestOutput,
regressionTestLinkTable)
from mod_test.models import (Fork, Test, TestPlatform, TestProgress,
TestResult, TestResultFile, TestStatus, TestType)
from utility import serve_file_download
mod_test = Blueprint('test', __name__)
@mod_test.before_app_request
def before_app_request() -> None:
"""Curate menu items before app request."""
g.menu_entries['tests'] = {
'title': 'Test results',
'icon': 'flask',
'route': 'test.index'
}
@mod_test.errorhandler(TestNotFoundException)
@template_renderer('test/test_not_found.html', 404)
def not_found(error):
"""Show error page when page not found."""
return {
'message': error.message
}
@mod_test.route('/')
@template_renderer()
def index():
"""Show index page for tests."""
fork = Fork.query.filter(Fork.github.like(f"%/{g.github['repository_owner']}/{g.github['repository']}.git")).first()
return {
'tests': Test.query.order_by(Test.id.desc()).limit(50).all(),
'TestType': TestType,
'fork': fork
}
def get_test_results(test) -> List[Dict[str, Any]]:
"""
Get test results for each category.
:param test: The test to retrieve the data for.
:type test: Test
"""
populated_categories = g.db.query(regressionTestLinkTable.c.category_id).subquery()
categories = Category.query.filter(Category.id.in_(populated_categories)).order_by(Category.name.asc()).all()
results = [{
'category': category,
'tests': [{
'test': rt,
'result': next((r for r in test.results if r.regression_test_id == rt.id), None),
'files': TestResultFile.query.filter(
and_(TestResultFile.test_id == test.id, TestResultFile.regression_test_id == rt.id)
).all()
} for rt in category.regression_tests if rt.id in test.get_customized_regressiontests()]
} for category in categories]
# Run through the categories to see if they should be marked as failed or passed. A category failed if one or more
# tests in said category failed.
for category in results:
error = False
for category_test in category['tests']:
test_error = False
# A test fails if:
# - Exit code is not what we expected
# - There are result files but one of the files is [not identical
# and not one of the multiple correct output files]
# - There are no result files but there should have been
result = category_test['result']
if result is not None and result.exit_code != result.expected_rc:
test_error = True
if len(category_test['files']) > 0:
for result_file in category_test['files']:
if result_file.got is not None and result.exit_code == 0:
file_error = True
for file in result_file.regression_test_output.multiple_files:
if file.file_hashes == result_file.got:
file_error = False
break
test_error = file_error or test_error
else:
# We need to check if the regression test had any file that shouldn't have been ignored.
outputs = RegressionTestOutput.query.filter(and_(
RegressionTestOutput.regression_id == category_test['test'].id,
RegressionTestOutput.ignore.is_(False)
)).all()
got = None
if len(outputs) > 0:
test_error = True
got = 'error'
# Add dummy entry for pass/fail display
category_test['files'] = [TestResultFile(-1, -1, -1, '', got)]
# Store test status in error field
category_test['error'] = test_error
# Update category error
error = error or test_error
category['error'] = error
results.sort(key=lambda entry: entry['category'].name)
return results
def get_data_for_test(test, title=None) -> Dict[str, Any]:
"""
Retrieve the data for a single test, with an optional title.
:param test: The test to retrieve the data for.
:type test: Test
:param title: The title to use in the result. If empty, it's set to 'test {id}'
:type title: str
:return: A dictionary with the appropriate values.
:rtype: dict
"""
if title is None:
title = f"test {test.id}"
# Calculate average runtime for this platform (used when test hasn't started yet)
avg_minutes = 0
if len(test.progress) == 0:
try:
avg_time_key = 'average_time_' + test.platform.value
prep_time_key = 'avg_prep_time_' + test.platform.value
avg_time_record = GeneralData.query.filter(GeneralData.key == avg_time_key).first()
prep_time_record = GeneralData.query.filter(GeneralData.key == prep_time_key).first()
avg_duration = float(avg_time_record.value) if avg_time_record else 0
avg_prep = float(prep_time_record.value) if prep_time_record else 0
# Total average time in minutes
avg_minutes = int((avg_duration + avg_prep) / 60)
except (ValueError, AttributeError):
avg_minutes = 0
results = get_test_results(test)
# Calculate sample progress for initial page load
completed_samples = len(test.results)
total_samples = len(test.get_customized_regressiontests())
progress_percentage = 0
if total_samples > 0:
progress_percentage = int((completed_samples / total_samples) * 100)
return {
'test': test,
'TestType': TestType,
'results': results,
'title': title,
'avg_minutes': avg_minutes,
'sample_progress': {
'current': completed_samples,
'total': total_samples,
'percentage': progress_percentage
}
}
@mod_test.route('/get_json_data/<test_id>')
def get_json_data(test_id):
"""
Retrieve the status of a test id and returns it in JSON format.
:param test_id: The id of the test to retrieve data for.
:type test_id: int
:return: A JSON structure that holds the data about this test.
:rtype: JSON dict
"""
test = Test.query.filter(Test.id == test_id).first()
if test is None:
g.log.error(f'test with id: {test_id} not found!')
return jsonify({'status': 'failure', 'error': 'Test not found'})
pr_data = test.progress_data()
progress_array = []
for entry in test.progress:
progress_array.append({
'timestamp': entry.timestamp.strftime('%Y-%m-%d %H:%M:%S (%Z)'),
'status': entry.status.description,
'message': entry.message
})
# Calculate sample progress from existing TestResult data
completed_samples = len(test.results)
total_samples = len(test.get_customized_regressiontests())
progress_percentage = 0
if total_samples > 0:
progress_percentage = int((completed_samples / total_samples) * 100)
return jsonify({
'status': 'success',
'details': pr_data["progress"],
'complete': test.finished,
'progress_array': progress_array,
'sample_progress': {
'current': completed_samples,
'total': total_samples,
'percentage': progress_percentage
}
})
@mod_test.route('/<test_id>')
@template_renderer()
def by_id(test_id):
"""
Show specific test.
:param test_id: id of the test
:type test_id: int
:raises TestNotFoundException: when test id is not found
:return: data for given test id
:rtype: dict
"""
test = Test.query.filter(Test.id == test_id).first()
if test is None:
g.log.error(f"test with id: {test_id} not found!")
raise TestNotFoundException(f"Test with id {test_id} does not exist")
return get_data_for_test(test)
@mod_test.route('/ccextractor/<ccx_version>')
@template_renderer('test/by_id.html')
def ccextractor_version(ccx_version):
"""
Provide tests for a particular version of CCExtractor.
Look up the hash, find a test for it and redirect.
:param ccx_version: version of the CCExtractor
:type ccx_version: str
:raises TestNotFoundException: when no test is found
:raises TestNotFoundException: when wrong version is provided
:return: test data
:rtype: dict
"""
version = CCExtractorVersion.query.filter(CCExtractorVersion.version == ccx_version).first()
if version is not None:
test = Test.query.filter(Test.commit == version.commit).first()
if test is None:
g.log.error(f"test with commit {version.commit} not found!")
raise TestNotFoundException(f"There are no tests available for CCExtractor version {version.version}")
return get_data_for_test(test, f"CCExtractor {version.version}")
raise TestNotFoundException(f"There is no CCExtractor version known as {ccx_version}")
@mod_test.route('/commit/<commit_hash>')
@template_renderer('test/by_id.html')
def by_commit(commit_hash):
"""
Provide tests for a particular commit of CCExtractor.
Look up the hash, find a test for it and redirect.
:param commit_hash: commit hash
:type commit_hash: str
:raises TestNotFoundException: when no test is found for the commit
:return: test data
:rtype: dict
"""
test = Test.query.filter(Test.commit == commit_hash).first()
if test is None:
g.log.error(f"test with commit hash {commit_hash} not found!")
raise TestNotFoundException(f"There is no test available for commit {commit_hash}")
return get_data_for_test(test, f"commit {commit_hash}")
@mod_test.route('/master/<platform>')
@template_renderer('test/by_id.html')
def latest_commit_info(platform):
"""
Provide tests for the latest commit of CCExtractor for a particular platform.
:param platform: platform
:type platform: enum, ["windows", "linux"]
:raises TestNotFoundException: when no test is found for latest commit
:return: test data
:rtype: dict
"""
try:
platform = TestPlatform.from_string(platform)
except ValueError:
g.log.critical(f"platform {platform} is not supported!")
abort(404)
# Look up the hash of the latest commit
commit_hash = GeneralData.query.filter(GeneralData.key == 'fetch_commit_' + platform.value).first().value
test = Test.query.filter(Test.commit == commit_hash, Test.platform == platform).first()
if test is None:
g.log.error(f"test with commit hash {commit_hash} not found in {str(platform)}!")
raise TestNotFoundException(f"There is no test available for commit {commit_hash}")
return get_data_for_test(test, f"master {commit_hash}")
@mod_test.route('/diff/<test_id>/<regression_test_id>/<output_id>', defaults={'to_view': 1})
@mod_test.route('/diff/<test_id>/<regression_test_id>/<output_id>/<int:to_view>')
def generate_diff(test_id: int, regression_test_id: int, output_id: int, to_view: int = 1):
"""
Generate diff for output and expected result.
The function is invoked in two modes: to view and to download. In the 'to view' a max of 50 diffs are returned,
and in 'to download' all the diffs are returned but as a downloadable HTML.
We check for XHR when the request is to simply view the diff.
:param test_id: id of the test
:type test_id: int
:param regression_test_id: id of the regression test
:type regression_test_id: int
:param output_id: id of the generated output
:type output_id: int
:param to_view: 1 (default) if test diff to be shown in browser, 0 if to be downloaded
:type to_view: int
:return: html diff
:rtype: html
"""
from run import config
result = TestResultFile.query.filter(and_(
TestResultFile.test_id == test_id,
TestResultFile.regression_test_id == regression_test_id,
TestResultFile.regression_test_output_id == output_id
)).first()
if result is not None:
path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'TestResults')
request_xhr_key = request.headers.get('X-Requested-With')
if (request_xhr_key == 'XMLHttpRequest' or request.accept_mimetypes['application/json']) and to_view == 1:
return result.generate_html_diff(path)
elif to_view == 0:
diff_html_text = result.generate_html_diff(path, to_view=False)
return Response(
diff_html_text,
mimetype='text/html',
headers={
'Content-disposition':
f"attachment; filename=test{test_id}_regression{regression_test_id}_output{output_id}.html"
}
)
abort(403, 'generate_diff')
abort(404)
@mod_test.route('/log-files/<test_id>')
def download_build_log_file(test_id):
"""
Serve download of build log.
:param test_id: id of the test
:type test_id: int
:raises TestNotFoundException: when build log not found
:raises TestNotFoundException: when test id is not found
:return: build log text file
:rtype: Flask response
"""
from run import config, storage_client_bucket
test = Test.query.filter(Test.id == test_id).first()
from flask import send_from_directory
if test is not None:
file_name = f"{test_id}.txt"
log_dir = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'LogFiles')
log_file_path = os.path.join(log_dir, file_name)
if os.path.isfile(log_file_path):
return send_from_directory(log_dir, file_name, as_attachment=True)
raise TestNotFoundException(f"Build log for Test {test_id} not found")
raise TestNotFoundException(f"Test with id {test_id} not found")
@mod_test.route('/restart_test/<test_id>', methods=['GET', 'POST'])
@login_required
@check_access_rights([Role.admin, Role.tester, Role.contributor])
@template_renderer()
def restart_test(test_id):
"""
Admin or Test User can restart the running or finished test.
:param test_id: Test ID of the test which user want to restart
:type test_id: int
"""
test = Test.query.filter(Test.id == test_id).first()
test_fork = TestFork.query.filter(TestFork.user_id == g.user.id, TestFork.test_id == test_id).first()
if not g.user.is_admin and test_fork is None:
g.log.warning(f"user with id: {g.user.id} tried to access restricted endpoint")
abort(403)
TestResultFile.query.filter(TestResultFile.test_id == test.id).delete()
TestResult.query.filter(TestResult.test_id == test.id).delete()
TestProgress.query.filter(TestProgress.test_id == test.id).delete()
g.db.commit()
g.log.info(f"test with id: {test_id} restarted")
return redirect(url_for('.by_id', test_id=test.id))
@mod_test.route('/stop_test/<test_id>', methods=['GET', 'POST'])
@login_required
@check_access_rights([Role.admin, Role.tester, Role.contributor])
@template_renderer()
def stop_test(test_id):
"""
Admin or Test User can stop the running test.
:param test_id: Test ID of the test which user want to stop
:type test_id: int
"""
test = Test.query.filter(Test.id == test_id).first()
test_fork = TestFork.query.filter(TestFork.user_id == g.user.id, TestFork.test_id == test_id).first()
if not g.user.is_admin and test_fork is None:
g.log.warning(f"user with id: {g.user.id} tried to access restricted endpoint")
abort(403)
message = "Canceled by user"
if g.user.is_admin:
message = "Canceled by admin"
test_progress = TestProgress(test.id, TestStatus.canceled, message)
g.db.add(test_progress)
g.db.commit()
g.log.info(f"test with id: {test_id} stopped")
return redirect(url_for('.by_id', test_id=test.id))
def _artifact_redirect(test_id, blob_path, filename='artifact'):
"""Generate a signed URL for a GCS artifact and redirect, or 404."""
from datetime import timedelta
from run import config, storage_client_bucket
blob = storage_client_bucket.blob(blob_path)
if not blob.exists():
abort(404)
url = blob.generate_signed_url(
version="v4",
expiration=timedelta(minutes=config.get('GCS_SIGNED_URL_EXPIRY_LIMIT', 30)),
method="GET",
response_disposition=f'attachment; filename="{filename}"'
)
return redirect(url)
@mod_test.route('/<int:test_id>/binary')
def download_binary(test_id):
"""Download the ccextractor binary used in a test (linux or windows)."""
from run import storage_client_bucket
# Try linux name first, then windows
for name in ['ccextractor', 'ccextractor.exe']:
blob_path = f'test_artifacts/{test_id}/{name}'
if storage_client_bucket.blob(blob_path).exists():
return _artifact_redirect(test_id, blob_path, filename=name)
abort(404)
@mod_test.route('/<int:test_id>/coredump')
def download_coredump(test_id):
"""Download the coredump from a test, if one was produced."""
return _artifact_redirect(
test_id,
f'test_artifacts/{test_id}/coredump',
filename=f'coredump-{test_id}'
)
@mod_test.route('/<int:test_id>/combined-stdout')
def download_combined_stdout(test_id):
"""Download the combined stdout/stderr log from all test invocations."""
return _artifact_redirect(
test_id,
f'test_artifacts/{test_id}/combined_stdout.log',
filename=f'combined_stdout-{test_id}.log'
)
@mod_test.route('/<int:test_id>/regression/<int:regression_test_id>/<int:output_id>/output-got')
def download_output_got(test_id, regression_test_id, output_id):
"""Download the actual output file from TestResults using DB hash."""
rf = TestResultFile.query.filter(and_(
TestResultFile.test_id == test_id,
TestResultFile.regression_test_id == regression_test_id,
TestResultFile.regression_test_output_id == output_id
)).first()
if rf is None or rf.got is None:
abort(404)
import os
ext = os.path.splitext(rf.regression_test_output.filename_correct)[1]
return _artifact_redirect(
test_id,
f'TestResults/{rf.got}{ext}',
filename=f'output_got_{regression_test_id}_{output_id}{ext}'
)
@mod_test.route('/<int:test_id>/regression/<int:regression_test_id>/<int:output_id>/output-expected')
def download_output_expected(test_id, regression_test_id, output_id):
"""Download the expected output file from TestResults using DB hash."""
rf = TestResultFile.query.filter(and_(
TestResultFile.test_id == test_id,
TestResultFile.regression_test_id == regression_test_id,
TestResultFile.regression_test_output_id == output_id
)).first()
if rf is None:
abort(404)
import os
ext = os.path.splitext(rf.regression_test_output.filename_correct)[1]
return _artifact_redirect(
test_id,
f'TestResults/{rf.expected}{ext}',
filename=f'output_expected_{regression_test_id}_{output_id}{ext}'
)
@mod_test.route('/<int:test_id>/sample/<int:sample_id>')
def download_sample_ai(test_id, sample_id):
"""Download the sample file for a regression test (no auth required for AI workflow)."""
from mod_sample.models import Sample
sample = Sample.query.filter(Sample.id == sample_id).first()
if sample is None:
abort(404)
return _artifact_redirect(
test_id,
f'TestFiles/{sample.filename}',
filename=sample.original_name
)
@mod_test.route('/<int:test_id>/ai.json')
def ai_json_endpoint(test_id):
"""Structured JSON with download URLs for all artifacts — for AI agents."""
from run import storage_client_bucket
test = Test.query.filter(Test.id == test_id).first()
if test is None:
return jsonify({'error': f'Test {test_id} not found'}), 404
def blob_exists(path):
return storage_client_bucket.blob(path).exists()
has_binary = (
blob_exists(f'test_artifacts/{test_id}/ccextractor') or
blob_exists(f'test_artifacts/{test_id}/ccextractor.exe')
)
has_coredump = blob_exists(f'test_artifacts/{test_id}/coredump')
has_combined_stdout = blob_exists(f'test_artifacts/{test_id}/combined_stdout.log')
results = get_test_results(test)
test_cases = []
total = 0
passed = 0
failed = 0
for category in results:
for t_data in category['tests']:
total += 1
rt = t_data['test']
result = t_data['result']
is_error = t_data.get('error', False)
result_files = t_data['files']
if is_error:
failed += 1
else:
passed += 1
outputs = []
for expected_output in rt.output_files:
if expected_output.ignore:
continue
matched_rf = None
for rf in result_files:
if rf.test_id != -1 and rf.regression_test_output_id == expected_output.id:
matched_rf = rf
break
got_url = None
diff_url = None
if matched_rf and matched_rf.got is not None:
got_url = url_for(
'.download_output_got',
test_id=test_id,
regression_test_id=rt.id,
output_id=expected_output.id,
_external=True
)
diff_url = url_for(
'.generate_diff',
test_id=test_id,
regression_test_id=rt.id,
output_id=expected_output.id,
to_view=0,
_external=True
)
else:
# If test passed, got and expected match exactly.
got_url = url_for(
'.download_output_expected',
test_id=test_id,
regression_test_id=rt.id,
output_id=expected_output.id,
_external=True
)
output_entry = {
'output_id': expected_output.id,
'correct_extension': expected_output.correct_extension,
'expected_url': url_for(
'.download_output_expected',
test_id=test_id,
regression_test_id=rt.id,
output_id=expected_output.id,
_external=True
),
'got_url': got_url,
'diff_url': diff_url,
}
outputs.append(output_entry)
test_cases.append({
'regression_test_id': rt.id,
'category': category['category'].name,
'sample_filename': rt.sample.original_name,
'sample_url': url_for(
'.download_sample_ai',
test_id=test_id,
sample_id=rt.sample.id,
_external=True
),
'arguments': rt.command,
'result': 'Fail' if is_error else 'Pass',
'exit_code': result.exit_code if result else None,
'expected_exit_code': result.expected_rc if result else None,
'runtime_ms': result.runtime if result else None,
'outputs': outputs,
'how_to_reproduce': f'./ccextractor {rt.command} {rt.sample.original_name}',
})
report = {
'test_id': test.id,
'commit': test.commit,
'platform': test.platform.value,
'branch': test.branch,
'status': 'completed' if test.finished else 'running',
'binary_url': url_for(
'.download_binary', test_id=test_id, _external=True
) if has_binary else None,
'coredump_url': url_for(
'.download_coredump', test_id=test_id, _external=True
) if has_coredump else None,
'log_url': url_for(
'.download_build_log_file', test_id=test_id, _external=True
),
'combined_stdout_url': url_for(
'.download_combined_stdout', test_id=test_id, _external=True
) if has_combined_stdout else None,
'summary': {
'total': total,
'passed': passed,
'failed': failed,
},
'test_cases': test_cases,
'how_to_reproduce': (
'Download the binary and sample, then run: '
+ ('./ccextractor {arguments} {sample_filename}' if test.platform.value == 'linux'
else 'ccextractorwinfull.exe {arguments} {sample_filename}')
),
}
return jsonify(report)