|
11 | 11 | # 5. Saves detailed log and summary to test-results/ |
12 | 12 | # 6. Restores terminal to normal when done |
13 | 13 | # |
14 | | -# Usage: ./run_all_tests.sh |
| 14 | +# Usage: |
| 15 | +# ./run_all_tests.sh - Run full test suite |
| 16 | +# ./run_all_tests.sh --summary-only - Regenerate summary from existing log |
15 | 17 | ################################################################################ |
16 | 18 |
|
17 | 19 | set -e |
18 | 20 |
|
| 21 | +################################################################################ |
| 22 | +# PARSE COMMAND LINE ARGUMENTS |
| 23 | +################################################################################ |
| 24 | + |
| 25 | +SUMMARY_ONLY=false |
| 26 | +if [ "$1" = "--summary-only" ]; then |
| 27 | + SUMMARY_ONLY=true |
| 28 | +fi |
| 29 | + |
19 | 30 | ################################################################################ |
20 | 31 | # TERMINAL STYLING FUNCTIONS |
21 | 32 | ################################################################################ |
@@ -79,31 +90,46 @@ LOG_DIR="test-results" |
79 | 90 | DETAIL_LOG="${LOG_DIR}/last_run.log" # Full Maven output |
80 | 91 | SUMMARY_LOG="${LOG_DIR}/last_run_summary.log" # Summary only |
81 | 92 |
|
82 | | - |
83 | 93 | mkdir -p "${LOG_DIR}" |
84 | 94 |
|
85 | | -# Delete old log files and stale flag files from previous run |
86 | | -echo "Cleaning up old files..." |
87 | | -if [ -f "${DETAIL_LOG}" ]; then |
88 | | - rm -f "${DETAIL_LOG}" |
89 | | - echo " - Removed old detail log" |
90 | | -fi |
91 | | -if [ -f "${SUMMARY_LOG}" ]; then |
92 | | - rm -f "${SUMMARY_LOG}" |
93 | | - echo " - Removed old summary log" |
94 | | -fi |
| 95 | +# If summary-only mode, skip to summary generation |
| 96 | +if [ "$SUMMARY_ONLY" = true ]; then |
| 97 | + if [ ! -f "${DETAIL_LOG}" ]; then |
| 98 | + echo "ERROR: No log file found at ${DETAIL_LOG}" |
| 99 | + echo "Please run tests first without --summary-only flag" |
| 100 | + exit 1 |
| 101 | + fi |
| 102 | + echo "Regenerating summary from existing log: ${DETAIL_LOG}" |
| 103 | + # Skip cleanup and jump to summary generation |
| 104 | + START_TIME=0 |
| 105 | + END_TIME=0 |
| 106 | + DURATION=0 |
| 107 | + DURATION_MIN=0 |
| 108 | + DURATION_SEC=0 |
| 109 | +else |
| 110 | + # Delete old log files and stale flag files from previous run |
| 111 | + echo "Cleaning up old files..." |
| 112 | + if [ -f "${DETAIL_LOG}" ]; then |
| 113 | + rm -f "${DETAIL_LOG}" |
| 114 | + echo " - Removed old detail log" |
| 115 | + fi |
| 116 | + if [ -f "${SUMMARY_LOG}" ]; then |
| 117 | + rm -f "${SUMMARY_LOG}" |
| 118 | + echo " - Removed old summary log" |
| 119 | + fi |
95 | 120 | if [ -f "${LOG_DIR}/monitor.flag" ]; then |
96 | 121 | rm -f "${LOG_DIR}/monitor.flag" |
97 | 122 | echo " - Removed stale monitor flag" |
98 | 123 | fi |
99 | | -if [ -f "${LOG_DIR}/warning_analysis.tmp" ]; then |
100 | | - rm -f "${LOG_DIR}/warning_analysis.tmp" |
101 | | - echo " - Removed stale warning analysis" |
102 | | -fi |
103 | | -if [ -f "${LOG_DIR}/recent_lines.tmp" ]; then |
104 | | - rm -f "${LOG_DIR}/recent_lines.tmp" |
105 | | - echo " - Removed stale temp file" |
106 | | -fi |
| 124 | + if [ -f "${LOG_DIR}/warning_analysis.tmp" ]; then |
| 125 | + rm -f "${LOG_DIR}/warning_analysis.tmp" |
| 126 | + echo " - Removed stale warning analysis" |
| 127 | + fi |
| 128 | + if [ -f "${LOG_DIR}/recent_lines.tmp" ]; then |
| 129 | + rm -f "${LOG_DIR}/recent_lines.tmp" |
| 130 | + echo " - Removed stale temp file" |
| 131 | + fi |
| 132 | +fi # End of if [ "$SUMMARY_ONLY" = true ] |
107 | 133 |
|
108 | 134 | ################################################################################ |
109 | 135 | # HELPER FUNCTIONS |
@@ -186,6 +212,130 @@ display_warning_factors() { |
186 | 212 | rm -f "${analysis_file}" |
187 | 213 | } |
188 | 214 |
|
| 215 | +################################################################################ |
| 216 | +# GENERATE SUMMARY FUNCTION (DRY) |
| 217 | +################################################################################ |
| 218 | + |
| 219 | +generate_summary() { |
| 220 | + local detail_log="$1" |
| 221 | + local summary_log="$2" |
| 222 | + local start_time="${3:-0}" |
| 223 | + local end_time="${4:-0}" |
| 224 | + |
| 225 | + # Calculate duration |
| 226 | + local duration=$((end_time - start_time)) |
| 227 | + local duration_min=$((duration / 60)) |
| 228 | + local duration_sec=$((duration % 60)) |
| 229 | + |
| 230 | + # If no timing info (summary-only mode), extract from log |
| 231 | + if [ $duration -eq 0 ] && grep -q "Total time:" "$detail_log"; then |
| 232 | + local time_str=$(grep "Total time:" "$detail_log" | tail -1) |
| 233 | + duration_min=$(echo "$time_str" | grep -oP '\d+(?= min)' || echo "0") |
| 234 | + duration_sec=$(echo "$time_str" | grep -oP '\d+(?=\.\d+ s)' || echo "0") |
| 235 | + fi |
| 236 | + |
| 237 | + print_header "Test Results Summary" |
| 238 | + |
| 239 | + # Extract test statistics from ScalaTest output (with UNKNOWN fallback if extraction fails) |
| 240 | + # ScalaTest outputs across multiple lines: |
| 241 | + # Run completed in X seconds. |
| 242 | + # Total number of tests run: N |
| 243 | + # Suites: completed M, aborted 0 |
| 244 | + # Tests: succeeded N, failed 0, canceled 0, ignored 0, pending 0 |
| 245 | + # All tests passed. |
| 246 | + # We need to extract the stats from the last test run (in case there are multiple modules) |
| 247 | + SCALATEST_SECTION=$(grep -A 4 "Run completed" "${detail_log}" | tail -5) |
| 248 | + if [ -n "$SCALATEST_SECTION" ]; then |
| 249 | + TOTAL_TESTS=$(echo "$SCALATEST_SECTION" | grep -oP "Total number of tests run: \K\d+" || echo "UNKNOWN") |
| 250 | + SUCCEEDED=$(echo "$SCALATEST_SECTION" | grep -oP "succeeded \K\d+" || echo "UNKNOWN") |
| 251 | + FAILED=$(echo "$SCALATEST_SECTION" | grep -oP "failed \K\d+" || echo "UNKNOWN") |
| 252 | + ERRORS=$(echo "$SCALATEST_SECTION" | grep -oP "errors \K\d+" || echo "0") |
| 253 | + SKIPPED=$(echo "$SCALATEST_SECTION" | grep -oP "ignored \K\d+" || echo "UNKNOWN") |
| 254 | + else |
| 255 | + TOTAL_TESTS="UNKNOWN" |
| 256 | + SUCCEEDED="UNKNOWN" |
| 257 | + FAILED="UNKNOWN" |
| 258 | + ERRORS="0" |
| 259 | + SKIPPED="UNKNOWN" |
| 260 | + fi |
| 261 | + WARNINGS=$(grep -c "WARNING" "${detail_log}" || echo "UNKNOWN") |
| 262 | + |
| 263 | + # Determine build status |
| 264 | + if grep -q "BUILD SUCCESS" "${detail_log}"; then |
| 265 | + BUILD_STATUS="SUCCESS" |
| 266 | + BUILD_COLOR="" |
| 267 | + elif grep -q "BUILD FAILURE" "${detail_log}"; then |
| 268 | + BUILD_STATUS="FAILURE" |
| 269 | + BUILD_COLOR="" |
| 270 | + else |
| 271 | + BUILD_STATUS="UNKNOWN" |
| 272 | + BUILD_COLOR="" |
| 273 | + fi |
| 274 | + |
| 275 | + # Print summary |
| 276 | + log_message "Test Run Summary" |
| 277 | + log_message "================" |
| 278 | + log_message "Timestamp: $(date)" |
| 279 | + log_message "Duration: ${duration_min}m ${duration_sec}s" |
| 280 | + log_message "Build Status: ${BUILD_STATUS}" |
| 281 | + log_message "" |
| 282 | + log_message "Test Statistics:" |
| 283 | + log_message " Total: ${TOTAL_TESTS}" |
| 284 | + log_message " Succeeded: ${SUCCEEDED}" |
| 285 | + log_message " Failed: ${FAILED}" |
| 286 | + log_message " Errors: ${ERRORS}" |
| 287 | + log_message " Skipped: ${SKIPPED}" |
| 288 | + log_message " Warnings: ${WARNINGS}" |
| 289 | + log_message "" |
| 290 | + |
| 291 | + # Analyze and display warning factors if warnings exist |
| 292 | + if [ "${WARNINGS}" != "0" ] && [ "${WARNINGS}" != "UNKNOWN" ]; then |
| 293 | + warning_analysis=$(analyze_warnings "${detail_log}") |
| 294 | + display_warning_factors "${warning_analysis}" 10 |
| 295 | + log_message "" |
| 296 | + fi |
| 297 | + |
| 298 | + # Show failed tests if any (only actual test failures, not application ERROR logs) |
| 299 | + if [ "${FAILED}" != "0" ] && [ "${FAILED}" != "UNKNOWN" ]; then |
| 300 | + log_message "Failed Tests:" |
| 301 | + # Look for ScalaTest failure markers, not application ERROR logs |
| 302 | + grep -E "\*\*\* FAILED \*\*\*|\*\*\* RUN ABORTED \*\*\*" "${detail_log}" | head -50 >> "${summary_log}" |
| 303 | + log_message "" |
| 304 | + elif [ "${ERRORS}" != "0" ] && [ "${ERRORS}" != "UNKNOWN" ]; then |
| 305 | + log_message "Test Errors:" |
| 306 | + grep -E "\*\*\* FAILED \*\*\*|\*\*\* RUN ABORTED \*\*\*" "${detail_log}" | head -50 >> "${summary_log}" |
| 307 | + log_message "" |
| 308 | + fi |
| 309 | + |
| 310 | + # Final result |
| 311 | + print_header "Test Run Complete" |
| 312 | + |
| 313 | + if [ "${BUILD_STATUS}" = "SUCCESS" ] && [ "${FAILED}" = "0" ] && [ "${ERRORS}" = "0" ]; then |
| 314 | + log_message "[PASS] All tests passed!" |
| 315 | + return 0 |
| 316 | + else |
| 317 | + log_message "[FAIL] Tests failed" |
| 318 | + return 1 |
| 319 | + fi |
| 320 | +} |
| 321 | + |
| 322 | +################################################################################ |
| 323 | +# SUMMARY-ONLY MODE |
| 324 | +################################################################################ |
| 325 | + |
| 326 | +if [ "$SUMMARY_ONLY" = true ]; then |
| 327 | + # Just regenerate the summary and exit |
| 328 | + rm -f "${SUMMARY_LOG}" |
| 329 | + if generate_summary "${DETAIL_LOG}" "${SUMMARY_LOG}" 0 0; then |
| 330 | + log_message "" |
| 331 | + log_message "Summary regenerated:" |
| 332 | + log_message " ${SUMMARY_LOG}" |
| 333 | + exit 0 |
| 334 | + else |
| 335 | + exit 1 |
| 336 | + fi |
| 337 | +fi |
| 338 | + |
189 | 339 | ################################################################################ |
190 | 340 | # START TEST RUN |
191 | 341 | ################################################################################ |
@@ -391,72 +541,12 @@ FINAL_COUNTS="" |
391 | 541 | update_terminal_title "Complete" "$FINAL_ELAPSED" "$FINAL_COUNTS" "" "" |
392 | 542 |
|
393 | 543 | ################################################################################ |
394 | | -# GENERATE SUMMARY |
| 544 | +# GENERATE SUMMARY (using DRY function) |
395 | 545 | ################################################################################ |
396 | 546 |
|
397 | | -print_header "Test Results Summary" |
398 | | - |
399 | | -# Extract test statistics (with UNKNOWN fallback if extraction fails) |
400 | | -TOTAL_TESTS=$(grep -E "Total number of tests run:|Tests run:" "${DETAIL_LOG}" | tail -1 | grep -oP '\d+' | head -1 || echo "UNKNOWN") |
401 | | -SUCCEEDED=$(grep -oP "succeeded \K\d+" "${DETAIL_LOG}" | tail -1 || echo "UNKNOWN") |
402 | | -FAILED=$(grep -oP "failed \K\d+" "${DETAIL_LOG}" | tail -1 || echo "UNKNOWN") |
403 | | -ERRORS=$(grep -oP "errors \K\d+" "${DETAIL_LOG}" | tail -1 || echo "UNKNOWN") |
404 | | -SKIPPED=$(grep -oP "(skipped|ignored) \K\d+" "${DETAIL_LOG}" | tail -1 || echo "UNKNOWN") |
405 | | -WARNINGS=$(grep -c "WARNING" "${DETAIL_LOG}" || echo "UNKNOWN") |
406 | | - |
407 | | -# Determine build status |
408 | | -if grep -q "BUILD SUCCESS" "${DETAIL_LOG}"; then |
409 | | - BUILD_STATUS="SUCCESS" |
410 | | - BUILD_COLOR="" |
411 | | -elif grep -q "BUILD FAILURE" "${DETAIL_LOG}"; then |
412 | | - BUILD_STATUS="FAILURE" |
413 | | - BUILD_COLOR="" |
414 | | -else |
415 | | - BUILD_STATUS="UNKNOWN" |
416 | | - BUILD_COLOR="" |
417 | | -fi |
418 | | - |
419 | | -# Print summary |
420 | | -log_message "Test Run Summary" |
421 | | -log_message "================" |
422 | | -log_message "Timestamp: $(date)" |
423 | | -log_message "Duration: ${DURATION_MIN}m ${DURATION_SEC}s" |
424 | | -log_message "Build Status: ${BUILD_STATUS}" |
425 | | -log_message "" |
426 | | -log_message "Test Statistics:" |
427 | | -log_message " Total: ${TOTAL_TESTS}" |
428 | | -log_message " Succeeded: ${SUCCEEDED}" |
429 | | -log_message " Failed: ${FAILED}" |
430 | | -log_message " Errors: ${ERRORS}" |
431 | | -log_message " Skipped: ${SKIPPED}" |
432 | | -log_message " Warnings: ${WARNINGS}" |
433 | | -log_message "" |
434 | | - |
435 | | -# Analyze and display warning factors if warnings exist |
436 | | -if [ "${WARNINGS}" != "0" ] && [ "${WARNINGS}" != "UNKNOWN" ]; then |
437 | | - warning_analysis=$(analyze_warnings "${DETAIL_LOG}") |
438 | | - display_warning_factors "${warning_analysis}" 10 |
439 | | - log_message "" |
440 | | -fi |
441 | | - |
442 | | -# Show failed tests if any |
443 | | -if [ "${FAILED}" != "0" ] || [ "${ERRORS}" != "0" ]; then |
444 | | - log_message "Failed Tests:" |
445 | | - grep -A 5 "FAILED\|ERROR" "${DETAIL_LOG}" | head -50 >> "${SUMMARY_LOG}" |
446 | | - log_message "" |
447 | | -fi |
448 | | - |
449 | | -################################################################################ |
450 | | -# FINAL RESULT |
451 | | -################################################################################ |
452 | | - |
453 | | -print_header "Test Run Complete" |
454 | | - |
455 | | -if [ "${BUILD_STATUS}" = "SUCCESS" ] && [ "${FAILED}" = "0" ] && [ "${ERRORS}" = "0" ]; then |
456 | | - log_message "[PASS] All tests passed!" |
| 547 | +if generate_summary "${DETAIL_LOG}" "${SUMMARY_LOG}" "$START_TIME" "$END_TIME"; then |
457 | 548 | EXIT_CODE=0 |
458 | 549 | else |
459 | | - log_message "[FAIL] Tests failed" |
460 | 550 | EXIT_CODE=1 |
461 | 551 | fi |
462 | 552 |
|
|
0 commit comments