diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6c8b21e07..4d6510978 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: v4.0.1 hooks: - id: end-of-file-fixer + exclude: ^smart_tests/docs/ - repo: https://github.com/PyCQA/flake8 rev: 7.0.0 diff --git a/README.md b/README.md index 1d95e0749..e49dc0129 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +This is a CLI to interact with CloudBees Smart Tests. + # Usage See https://help.launchableinc.com/resources/cli-reference/ and @@ -89,6 +91,13 @@ uv add some-package uv add --dev some-dev-package ``` +## Adding new command +`smart_tests/args4p` defines a small command line argument parsing framework for this CLI. See its README.md for +the capabilities. + +`smart_tests/__main__.py` defines the top level commands. Follow the code from there to see where sub-commands +are registered. + ## Updating Python Version When updating the Python version requirement, update the following files: @@ -106,6 +115,21 @@ When updating the Python version requirement, update the following files: Create new release on Github, then Github Actions automatically uploads the module to PyPI. +## How to update bundled documentation + +The product documentation lives in `smart_tests/docs/` and is bundled into the +distributed package. To pull in the latest from the docsite repository: + +```shell +uv run poe update-docs +``` + +Users can extract the bundled docs to the current directory with: + +```shell +smart-tests get docs +``` + ## How to update smart_tests/jar/exe_deploy.jar ``` diff --git a/pyproject.toml b/pyproject.toml index e50cd10b1..60b66e7f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ lint-warn = "flake8 --count --exit-zero --max-complexity=15 --max-line-length=13 test = "python -m unittest" test-xml = "python -m test-runner" type = "mypy smart_tests tests" +update-docs = "bash scripts/update_docs.sh" [build-system] requires = ["setuptools>=45", "wheel", "setuptools_scm"] @@ -61,6 +62,6 @@ build-backend = "setuptools.build_meta" packages = ["smart_tests"] [tool.setuptools.package-data] -smart_tests = ["jar/exe_deploy.jar"] +smart_tests = ["jar/exe_deploy.jar", "docs/**/*"] [tool.setuptools_scm] diff --git a/scripts/update_docs.sh b/scripts/update_docs.sh new file mode 100755 index 000000000..573589b47 --- /dev/null +++ b/scripts/update_docs.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Fetch the latest docs from the docsite repository into smart_tests/docs/. +# Run via: uv run poe update-docs +set -euo pipefail + +REPO="git@github.com:cloudbees/docsite-cloudbees-smart-tests.git" +DOCS_DST="$(dirname "$0")/../smart_tests/docs" + +tmpdir=$(mktemp -d) +trap 'rm -rf "$tmpdir"' EXIT + +git clone --depth=1 "$REPO" "$tmpdir" +rm -rf "$DOCS_DST" +cp -r "$tmpdir/docs" "$DOCS_DST" +echo "Docs updated at $DOCS_DST" diff --git a/smart_tests/__main__.py b/smart_tests/__main__.py index 460325f7e..5756bf29e 100644 --- a/smart_tests/__main__.py +++ b/smart_tests/__main__.py @@ -9,6 +9,7 @@ from smart_tests.commands.compare import compare from smart_tests.commands.detect_flakes import detect_flakes from smart_tests.commands.gate import gate +from smart_tests.commands.get import get from smart_tests.commands.inspect import inspect from smart_tests.commands.record import record from smart_tests.commands.stats import stats @@ -25,6 +26,7 @@ cli.add_command(compare) cli.add_command(detect_flakes) cli.add_command(gate) +cli.add_command(get) def _load_test_runners(): diff --git a/smart_tests/commands/get/__init__.py b/smart_tests/commands/get/__init__.py new file mode 100644 index 000000000..c6603b61e --- /dev/null +++ b/smart_tests/commands/get/__init__.py @@ -0,0 +1,11 @@ +from ... import args4p +from ...app import Application +from .docs import docs + + +@args4p.group(help="Retrieve resources") +def get(app: Application): + return app + + +get.add_command(docs) diff --git a/smart_tests/commands/get/docs.py b/smart_tests/commands/get/docs.py new file mode 100644 index 000000000..1cbc0c340 --- /dev/null +++ b/smart_tests/commands/get/docs.py @@ -0,0 +1,27 @@ +import importlib.resources +import shutil +import sys +from pathlib import Path + +import click + +from ... import args4p +from ...app import Application + +OUTPUT_DIR = 'smart-tests-docs' + + +@args4p.command(help="Copy product documentation into ./smart-tests-docs") +def docs(app: Application): + output = Path(OUTPUT_DIR) + if output.exists(): + click.secho( + f"'{OUTPUT_DIR}' already exists. Please delete it first, then re-run this command.", + fg='red', err=True, + ) + sys.exit(1) + + docs_src = Path(str(importlib.resources.files('smart_tests') / 'docs')) + click.echo(f"Copying docs to ./{OUTPUT_DIR} ...") + shutil.copytree(docs_src, output) + click.echo(f"Done. Documentation is in ./{OUTPUT_DIR}") diff --git a/smart_tests/docs/antora.yml b/smart_tests/docs/antora.yml new file mode 100644 index 000000000..af2bc6f45 --- /dev/null +++ b/smart_tests/docs/antora.yml @@ -0,0 +1,14 @@ +name: cloudbees-smart-tests +title: CloudBees Smart Tests +version: latest +start_page: ROOT:index.adoc +nav: + - modules/ROOT/nav.adoc +asciidoc: + attributes: + sectanchors: "" + experimental: "" + toc: "" + toc-title: On this page + toc-placement: manual + toclevels: 1@ diff --git a/smart_tests/docs/modules/ROOT/assets/images/0607-inference.png b/smart_tests/docs/modules/ROOT/assets/images/0607-inference.png new file mode 100644 index 000000000..cedcc3a4d Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/0607-inference.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/0607-subset-creation.png b/smart_tests/docs/modules/ROOT/assets/images/0607-subset-creation.png new file mode 100644 index 000000000..8a1fd03df Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/0607-subset-creation.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/0607-training.png b/smart_tests/docs/modules/ROOT/assets/images/0607-training.png new file mode 100644 index 000000000..ff973ba5c Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/0607-training.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/507088cf-209d-4856-be3e-60477c166d92.png b/smart_tests/docs/modules/ROOT/assets/images/507088cf-209d-4856-be3e-60477c166d92.png new file mode 100644 index 000000000..a0be92bb9 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/507088cf-209d-4856-be3e-60477c166d92.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/confidence-curve.png b/smart_tests/docs/modules/ROOT/assets/images/confidence-curve.png new file mode 100644 index 000000000..25a3e25c0 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/confidence-curve.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/customer-example-curve.png b/smart_tests/docs/modules/ROOT/assets/images/customer-example-curve.png new file mode 100644 index 000000000..2ec22750f Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/customer-example-curve.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/data-flow.png b/smart_tests/docs/modules/ROOT/assets/images/data-flow.png new file mode 100644 index 000000000..910458179 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/data-flow.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/demo.gif b/smart_tests/docs/modules/ROOT/assets/images/demo.gif new file mode 100644 index 000000000..21ae4f44c Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/demo.gif differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/eval-sessions-prioritized.png b/smart_tests/docs/modules/ROOT/assets/images/eval-sessions-prioritized.png new file mode 100644 index 000000000..ef1906af0 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/eval-sessions-prioritized.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/eval-sessions.png b/smart_tests/docs/modules/ROOT/assets/images/eval-sessions.png new file mode 100644 index 000000000..9dee29eae Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/eval-sessions.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/flavors.png b/smart_tests/docs/modules/ROOT/assets/images/flavors.png new file mode 100644 index 000000000..8291ed503 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/flavors.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/github-comment.png b/smart_tests/docs/modules/ROOT/assets/images/github-comment.png new file mode 100644 index 000000000..1c14b2732 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/github-comment.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/groups-2-record-after.png b/smart_tests/docs/modules/ROOT/assets/images/groups-2-record-after.png new file mode 100644 index 000000000..7fcab77be Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/groups-2-record-after.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/how-long-train-ml-model.png b/smart_tests/docs/modules/ROOT/assets/images/how-long-train-ml-model.png new file mode 100644 index 000000000..b7fce28d8 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/how-long-train-ml-model.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/icons/icon-vertical-three-dots-dark.png b/smart_tests/docs/modules/ROOT/assets/images/icons/icon-vertical-three-dots-dark.png new file mode 100644 index 000000000..dbfb5eb45 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/icons/icon-vertical-three-dots-dark.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/illustion-run-whitelisted-tests-1.png b/smart_tests/docs/modules/ROOT/assets/images/illustion-run-whitelisted-tests-1.png new file mode 100644 index 000000000..96f4feb77 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/illustion-run-whitelisted-tests-1.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/illustration-ignore-flakes-1.png b/smart_tests/docs/modules/ROOT/assets/images/illustration-ignore-flakes-1.png new file mode 100644 index 000000000..a507c52ef Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/illustration-ignore-flakes-1.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/illustration-rotate-optimize-coverage.png b/smart_tests/docs/modules/ROOT/assets/images/illustration-rotate-optimize-coverage.png new file mode 100644 index 000000000..73f3ed50a Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/illustration-rotate-optimize-coverage.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/illustration-run-recently-failed-tests.png b/smart_tests/docs/modules/ROOT/assets/images/illustration-run-recently-failed-tests.png new file mode 100644 index 000000000..63b71b15c Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/illustration-run-recently-failed-tests.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/in-place.png b/smart_tests/docs/modules/ROOT/assets/images/in-place.png new file mode 100644 index 000000000..03a3d3440 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/in-place.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/link-to-results-example.png b/smart_tests/docs/modules/ROOT/assets/images/link-to-results-example.png new file mode 100644 index 000000000..f3d74f834 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/link-to-results-example.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/model-training-evaluation-confidence-curves.png b/smart_tests/docs/modules/ROOT/assets/images/model-training-evaluation-confidence-curves.png new file mode 100644 index 000000000..37df6734a Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/model-training-evaluation-confidence-curves.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/new-code-existing-tests.png b/smart_tests/docs/modules/ROOT/assets/images/new-code-existing-tests.png new file mode 100644 index 000000000..b9280f313 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/new-code-existing-tests.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/object-model-june.png b/smart_tests/docs/modules/ROOT/assets/images/object-model-june.png new file mode 100644 index 000000000..81ebbb70c Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/object-model-june.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/object-model-v2.png b/smart_tests/docs/modules/ROOT/assets/images/object-model-v2.png new file mode 100644 index 000000000..5b01b873e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/object-model-v2.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/object-model.png b/smart_tests/docs/modules/ROOT/assets/images/object-model.png new file mode 100644 index 000000000..7cbe48da2 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/object-model.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/optimization-target.png b/smart_tests/docs/modules/ROOT/assets/images/optimization-target.png new file mode 100644 index 000000000..36e468a0e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/optimization-target.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/predictive-test-selection-observation-mode.png b/smart_tests/docs/modules/ROOT/assets/images/predictive-test-selection-observation-mode.png new file mode 100644 index 000000000..7bb463e9a Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/predictive-test-selection-observation-mode.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/prioritized-evaluation-session.png b/smart_tests/docs/modules/ROOT/assets/images/prioritized-evaluation-session.png new file mode 100644 index 000000000..0efd9728a Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/prioritized-evaluation-session.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/pts-v2-working.png b/smart_tests/docs/modules/ROOT/assets/images/pts-v2-working.png new file mode 100644 index 000000000..c7e414f53 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/pts-v2-working.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/recording-data.png b/smart_tests/docs/modules/ROOT/assets/images/recording-data.png new file mode 100644 index 000000000..43b1cae40 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/recording-data.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately-detailed.png b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately-detailed.png new file mode 100644 index 000000000..95ceaac71 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately-detailed.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately.png b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately.png new file mode 100644 index 000000000..e6c4cb534 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos-built-separately.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos.png b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos.png new file mode 100644 index 000000000..de1722c4b Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/recording-from-multiple-repos.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-41-20.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-41-20.png new file mode 100644 index 000000000..99cd1c4d2 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-41-20.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-44-15.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-44-15.png new file mode 100644 index 000000000..f83a29e2e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-44-15.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-45-10.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-45-10.png new file mode 100644 index 000000000..82e300100 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-07-25-at-20-45-10.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-08-22-at-22-50-49.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-08-22-at-22-50-49.png new file mode 100644 index 000000000..538e9e3cd Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2023-08-22-at-22-50-49.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-1-48-30-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-1-48-30-pm.png new file mode 100644 index 000000000..e3f0ff531 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-1-48-30-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-2-14-11-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-2-14-11-pm.png new file mode 100644 index 000000000..c4a6f463e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-04-29-at-2-14-11-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-4-50-56-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-4-50-56-pm.png new file mode 100644 index 000000000..ee0699109 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-4-50-56-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-06-32-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-06-32-pm.png new file mode 100644 index 000000000..e2aea0e82 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-06-32-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-09-04-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-09-04-pm.png new file mode 100644 index 000000000..99d763c36 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-09-04-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-21-37-pm.png b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-21-37-pm.png new file mode 100644 index 000000000..c5a66d4b4 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/screenshot-2024-05-15-at-5-21-37-pm.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/sending-data/builds-dashboard.png b/smart_tests/docs/modules/ROOT/assets/images/sending-data/builds-dashboard.png new file mode 100644 index 000000000..99d12c823 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/sending-data/builds-dashboard.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/sending-data/create-api-key.png b/smart_tests/docs/modules/ROOT/assets/images/sending-data/create-api-key.png new file mode 100644 index 000000000..dc7edf800 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/sending-data/create-api-key.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/sending-data/initial-settings.png b/smart_tests/docs/modules/ROOT/assets/images/sending-data/initial-settings.png new file mode 100644 index 000000000..dc7edf800 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/sending-data/initial-settings.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/sending-data/record-session.png b/smart_tests/docs/modules/ROOT/assets/images/sending-data/record-session.png new file mode 100644 index 000000000..a1e6b4aa9 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/sending-data/record-session.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/sending-data/test-session-interface.png b/smart_tests/docs/modules/ROOT/assets/images/sending-data/test-session-interface.png new file mode 100644 index 000000000..344bbb1be Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/sending-data/test-session-interface.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/shift-left.png b/smart_tests/docs/modules/ROOT/assets/images/shift-left.png new file mode 100644 index 000000000..dc1f77d3b Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/shift-left.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/single-session-compared.png b/smart_tests/docs/modules/ROOT/assets/images/single-session-compared.png new file mode 100644 index 000000000..759bc7f8e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/single-session-compared.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/single-session-result.png b/smart_tests/docs/modules/ROOT/assets/images/single-session-result.png new file mode 100644 index 000000000..abd183485 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/single-session-result.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/slack-desktop-with-app-messages.png b/smart_tests/docs/modules/ROOT/assets/images/slack-desktop-with-app-messages.png new file mode 100644 index 000000000..2615fbe5f Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/slack-desktop-with-app-messages.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-subset-formation-flow.png b/smart_tests/docs/modules/ROOT/assets/images/smart-subset-formation-flow.png new file mode 100644 index 000000000..c6681943d Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-subset-formation-flow.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests-builds-interface-v1png.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-builds-interface-v1png.png new file mode 100644 index 000000000..e6fb03d10 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-builds-interface-v1png.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests-cli-3.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-cli-3.png new file mode 100644 index 000000000..0be1cf226 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-cli-3.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests-monthly-time-saved.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-monthly-time-saved.png new file mode 100644 index 000000000..e3d432f64 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests-monthly-time-saved.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests_api_key_setting_20210613.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_api_key_setting_20210613.png new file mode 100644 index 000000000..6f02fe9db Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_api_key_setting_20210613.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests_invite_url_20220613.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_invite_url_20220613.png new file mode 100644 index 000000000..ee1c251c1 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_invite_url_20220613.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/smart-tests_settings_20220613.png b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_settings_20220613.png new file mode 100644 index 000000000..95a691bbe Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/smart-tests_settings_20220613.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/subscription-ux.png b/smart_tests/docs/modules/ROOT/assets/images/subscription-ux.png new file mode 100644 index 000000000..f61b4889f Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/subscription-ux.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/subset-creation.png b/smart_tests/docs/modules/ROOT/assets/images/subset-creation.png new file mode 100644 index 000000000..9d58ac102 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/subset-creation.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/subset-exclude.png b/smart_tests/docs/modules/ROOT/assets/images/subset-exclude.png new file mode 100644 index 000000000..d4398d966 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/subset-exclude.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/subsetting-diagram-2x.png b/smart_tests/docs/modules/ROOT/assets/images/subsetting-diagram-2x.png new file mode 100644 index 000000000..1a97df3d0 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/subsetting-diagram-2x.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/target-line.png b/smart_tests/docs/modules/ROOT/assets/images/target-line.png new file mode 100644 index 000000000..806885894 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/target-line.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-insights-example.png b/smart_tests/docs/modules/ROOT/assets/images/test-insights-example.png new file mode 100644 index 000000000..a812b6232 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-insights-example.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-pyramid.png b/smart_tests/docs/modules/ROOT/assets/images/test-pyramid.png new file mode 100644 index 000000000..a66c17ed0 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-pyramid.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-runs-log.png b/smart_tests/docs/modules/ROOT/assets/images/test-runs-log.png new file mode 100644 index 000000000..3df05142b Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-runs-log.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-2.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-2.png new file mode 100644 index 000000000..23986a632 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-2.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-3.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-3.png new file mode 100644 index 000000000..c04e3ace7 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-3.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-4.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-4.png new file mode 100644 index 000000000..1d00cb477 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition-4.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-definition.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition.png new file mode 100644 index 000000000..7c0c16c9c Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-definition.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-details-with-content.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-details-with-content.png new file mode 100644 index 000000000..58eb73ef3 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-details-with-content.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-session-report-example.png b/smart_tests/docs/modules/ROOT/assets/images/test-session-report-example.png new file mode 100644 index 000000000..cb486352a Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-session-report-example.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/test-sessions-layout.png b/smart_tests/docs/modules/ROOT/assets/images/test-sessions-layout.png new file mode 100644 index 000000000..9e6b424dd Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/test-sessions-layout.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/testing-bottleneck-infinity.png b/smart_tests/docs/modules/ROOT/assets/images/testing-bottleneck-infinity.png new file mode 100644 index 000000000..5f0ba2288 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/testing-bottleneck-infinity.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/training-wheels.png b/smart_tests/docs/modules/ROOT/assets/images/training-wheels.png new file mode 100644 index 000000000..487fad226 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/training-wheels.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/trends.png b/smart_tests/docs/modules/ROOT/assets/images/trends.png new file mode 100644 index 000000000..332b118f0 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/trends.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/triage-email-demo.avif b/smart_tests/docs/modules/ROOT/assets/images/triage-email-demo.avif new file mode 100644 index 000000000..6a967374e Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/triage-email-demo.avif differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/use-case-1.png b/smart_tests/docs/modules/ROOT/assets/images/use-case-1.png new file mode 100644 index 000000000..d95e41f4d Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/use-case-1.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/use-case-2.png b/smart_tests/docs/modules/ROOT/assets/images/use-case-2.png new file mode 100644 index 000000000..9b2d2babb Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/use-case-2.png differ diff --git a/smart_tests/docs/modules/ROOT/assets/images/zis-with-groups.png b/smart_tests/docs/modules/ROOT/assets/images/zis-with-groups.png new file mode 100644 index 000000000..f4eba2b02 Binary files /dev/null and b/smart_tests/docs/modules/ROOT/assets/images/zis-with-groups.png differ diff --git a/smart_tests/docs/modules/ROOT/nav.adoc b/smart_tests/docs/modules/ROOT/nav.adoc new file mode 100644 index 000000000..3e76f2fce --- /dev/null +++ b/smart_tests/docs/modules/ROOT/nav.adoc @@ -0,0 +1,116 @@ +include::ROOT:partial$abbr.adoc[] + +* xref:ROOT:index.adoc[Overview] + +* xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[Send data to {PRODUCT}] +** xref:send-data-to-smart-tests:getting-started/getting-started.adoc[Getting started] +*** xref:send-data-to-smart-tests:getting-started/use-the-cli-with-a-public-repository.adoc[Use the CLI with a public repository] +*** xref:send-data-to-smart-tests:getting-started/migration-to-github-oidc-auth.adoc[Update tokenless authentication to use GitHub OIDC] +*** xref:send-data-to-smart-tests:getting-started/use-the-cli-with-docker-image.adoc[Use the CLI with Docker image] +** xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds] +*** xref:send-data-to-smart-tests:record-builds/choose-a-value-for-build-name.adoc[Choose a value for build name] +*** xref:send-data-to-smart-tests:record-builds/record-builds-from-multiple-repositories.adoc[Record builds from multiple repositories] +*** xref:send-data-to-smart-tests:record-builds/run-under-restricted-networks.adoc[Run under restricted networks] +*** xref:send-data-to-smart-tests:record-builds/deal-with-shallow-clones.adoc[Deal with shallow clones] +** xref:send-data-to-smart-tests:record-sessions/record-sessions.adoc[Record sessions] +** xref:send-data-to-smart-tests:subset/subset-predictive-test-selection.adoc[Subset for Predictive test selection] +** xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results] +*** xref:send-data-to-smart-tests:record-test-results/convert-test-reports-to-junit-format.adoc[Convert test reports to JUnit format] +*** xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs] +*** xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts] +*** xref:send-data-to-smart-tests:record-test-results/use-flavors-to-run-the-best-tests-for-an-environment.adoc[Use 'flavors' to run the best tests for an environment] +*** xref:send-data-to-smart-tests:record-test-results/separate-out-test-suites.adoc[Separate out test suites] +*** xref:send-data-to-smart-tests:record-test-results/attach-log-files.adoc[Attach log files] + +.Features +* xref:features:intelligent-test-failure-diagnostics.adoc[Intelligent Test Failure Diagnostics] +* xref:features:predictive-test-selection.adoc[Predictive Test Selection] +** xref:features:predictive-test-selection/use-cases-for-predictive-test-selection.adoc[Use cases for Predictive Test Selection] +** xref:features:predictive-test-selection/observe-subset-behavior.adoc[Observe subset behavior] +** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc[Request and run a subset of tests] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choose a subset optimization target] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the CLI] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/smart-subset-optimization-target.adoc[Smart subset optimization targets] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/combine-with-rule-based-test-selection.adoc[Combine with rule-based test selection] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/replace-static-parallel-suites-dynamic-parallel-subset.adoc[Replace static parallel suites with a dynamic parallel subset] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] +*** xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc[Use groups to split subsets] +** xref:features:predictive-test-selection/smart-tests-extension-for-VS-code.adoc[{PRODUCT} extension for VS code] +** xref:features:predictive-test-selection/viewing-time-savings.adoc[View time savings] +** xref:features:predictive-test-selection/how-we-select-tests.adoc[How we select tests] +** xref:features:predictive-test-selection/faq.adoc[FAQ] +* xref:features:trends.adoc[Test Suite Trends] +* xref:features:unhealthy-tests.adoc[Unhealthy Test Insights] +* xref:features:test-results-and-reports.adoc[Test Reports] +* xref:features:test-suite-parallelization.adoc[Test Suite Parallelization] +** xref:features:test-suite-parallelization/parallelize-your-test-suite-with-the-smart-tests-cli.adoc[Parallelize your test suite with the {PRODUCT} CLI] +* xref:features:test-notifications.adoc[Test Notifications] +** xref:features:test-notifications/github-app-for-test-sessions.adoc[Github app for test sessions] +** xref:features:test-notifications/test-notifications-via-slack.adoc[Test Notifications via Slack] + +.Concepts +* xref:concepts:object-model.adoc[Object model] +* xref:concepts:organization.adoc[Organization] +* xref:concepts:workspace.adoc[Workspace] +* xref:concepts:build.adoc[Build] +* xref:concepts:test-session.adoc[Test Session] +* xref:concepts:subset.adoc[Subset] +* xref:concepts:insight.adoc[Insight] +* xref:concepts:branch.adoc[Branch] +* xref:concepts:test-suite.adoc[Test Suite] + +.Resources +* xref:resources:onboarding-guide.adoc[{PRODUCT} Onboarding guide] +* xref:resources:cli-reference.adoc[CLI reference] +* xref:resources:integrations.adoc[Integrations] +** xref:resources:integrations/adb.adoc[Android Debug Bridge (adb)] +** xref:resources:integrations/ant.adoc[Ant] +** xref:resources:integrations/bazel.adoc[Bazel] +** xref:resources:integrations/behave.adoc[Behave] +** xref:resources:integrations/ctest.adoc[CTest] +** xref:resources:integrations/cucumber.adoc[cucumber] +** xref:resources:integrations/cypress.adoc[Cypress] +** xref:resources:integrations/dotnet-test.adoc[dotnet test] +** xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc['file' profile for unsupported test runners] +** xref:resources:integrations/go-test.adoc[Go Test] +** xref:resources:integrations/googletest.adoc[GoogleTest] +** xref:resources:integrations/gradle.adoc[Gradle] +** xref:resources:integrations/jest.adoc[Jest] +** xref:resources:integrations/karma.adoc[Karma] +** xref:resources:integrations/maven.adoc[Maven] +** xref:resources:integrations/minitest.adoc[minitest] +** xref:resources:integrations/playwright.adoc[Playwright] +** xref:resources:integrations/prove.adoc[prove for Perl] +** xref:resources:integrations/pytest.adoc[pytest] +** xref:resources:integrations/raw.adoc['raw' profile for custom test runners] +** xref:resources:integrations/robot.adoc[Robot] +** xref:resources:integrations/rspec.adoc[RSpec] +* xref:resources:supported-languages.adoc[Supported languages] +** xref:resources:supported-languages/c-plus-plus.adoc[C++] +** xref:resources:supported-languages/dotnet.adoc[.NET] +** xref:resources:supported-languages/go.adoc[Go] +** xref:resources:supported-languages/java.adoc[Java] +** xref:resources:supported-languages/javascript.adoc[JavaScript] +** xref:resources:supported-languages/perl.adoc[Perl] +** xref:resources:supported-languages/python.adoc[Python] +** xref:resources:supported-languages/ruby.adoc[Ruby] +* xref:resources:supported-test-frameworks.adoc[Supported test frameworks] +** xref:resources:supported-test-frameworks/appium.adoc[Appium] +** xref:resources:supported-test-frameworks/cucumber.adoc[Cucumber] +** xref:resources:supported-test-frameworks/googletest.adoc[GoogleTest] +** xref:resources:supported-test-frameworks/jest.adoc[Jest] +** xref:resources:supported-test-frameworks/junit.adoc[JUnit] +** xref:resources:supported-test-frameworks/minitest.adoc[minitest] +** xref:resources:supported-test-frameworks/nunit.adoc[NUnit] +** xref:resources:supported-test-frameworks/prove.adoc[prove for Perl] +** xref:resources:supported-test-frameworks/robot.adoc[Robot] +** xref:resources:supported-test-frameworks/rspec.adoc[RSpec] +** xref:resources:supported-test-frameworks/selenium.adoc[Selenium] +** xref:resources:supported-test-frameworks/testng.adoc[TestNG] +* xref:resources:ci-tool-integrations.adoc[CI tool integrations] +** xref:resources:ci-tool-integrations/github-actions.adoc[GitHub Actions] +* xref:resources:policies.adoc[Policies] +** xref:resources:policies/data-privacy-and-protection.adoc[Data privacy and protection] +** xref:resources:policies/data-examples.adoc[Data examples] +** xref:resources:policies/security-policies.adoc[Security policies] +* xref:resources:troubleshooting.adoc[Troubleshoot] diff --git a/smart_tests/docs/modules/ROOT/pages/getting-started.adoc b/smart_tests/docs/modules/ROOT/pages/getting-started.adoc new file mode 100644 index 000000000..4740ad8fa --- /dev/null +++ b/smart_tests/docs/modules/ROOT/pages/getting-started.adoc @@ -0,0 +1,9 @@ +include::ROOT:partial$abbr.adoc[] +:slug: getting-started += Getting Started + +After reviewing the xref:ROOT:index.adoc[Product Overview], you can sign up for a {PRODUCT} account at https://app.launchableinc.com/signup[https://app.launchableinc.com/signup]. + +After creating your user account and verifying your email address (if necessary), you'll be prompted to create an xref:concepts:organization.adoc[Organization].or your company and a xref:concepts:workspace.adoc[Workspace].or your test suite. + +Then you can start sending data. For more information, refer to xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[Send data to {PRODUCT}]. diff --git a/smart_tests/docs/modules/ROOT/pages/index.adoc b/smart_tests/docs/modules/ROOT/pages/index.adoc new file mode 100644 index 000000000..fd26c3ecb --- /dev/null +++ b/smart_tests/docs/modules/ROOT/pages/index.adoc @@ -0,0 +1,55 @@ += Overview + +https://www.launchableinc.com/[{PRODUCT}] is a *software development intelligence platform* focused on continuous integration (CI). Using data from your CI runs, {PRODUCT} provides various features to speed up your testing workflow so you can ship high-quality software faster. + +[#predictive-test-selection] +== Predictive test selection + +xref:features:predictive-test-selection.adoc[Predictive test selection] uses large language models (LLMs) to understand your code changes and automatically identify the most relevant tests to run. + +By analyzing both source code and test files, {PRODUCT} builds a deep understanding of how your code is structured and how different components relate to one another. Using commit information, it calculates the similarity between changed files and test files to determine which tests are most likely impacted — producing an optimized test execution plan where critical tests are prioritized and redundant ones are safely skipped. + +With Predictive test selection, {PRODUCT} brings code-aware test execution intelligence to your CI process — helping teams run fewer but relevant tests, gain faster feedback, and ship high-quality software with confidence. + +.Predictive test selection data flow +image::ROOT:pts-v2-working.png[Predictive test selection data flow,role="screenshot"] + +For more information, refer to xref:features:predictive-test-selection.adoc[Predictive test selection]. + +[#test-suite-trends] +== Test suite trends + +To see aggregate info about your test sessions, including average test session duration, test session frequency, and how often sessions fail refer to xref:features:trends.adoc[test suite trends]. + +Seeing this data over time gives you a picture of how your test suite evolves; for example, perhaps tests are taking twice as long as they did six months ago, and you need to cut it down! Similarly, perhaps your team's running tests a lot more often than expected, driving up resource costs. Or maybe there are some broken tests that are driving up the overall failure rate. + +[#unhealthy-tests] +== Unhealthy tests + +{PRODUCT} also surfaces unhealthy tests within your test suite allowing you to fix them and run tests more reliably. + +[#test-reports] +== Test reports + +As test results are sent to {PRODUCT} using the {PRODUCT} CLI, you can view those xref:features:test-results-and-reports.adoc[test results and reports] in the {PRODUCT} dashboard. {PRODUCT} provides a richer view of test results, helping developers triage and fix failures more quickly. + +For quick access, the {PRODUCT} CLI prints out a link to the results view every time you record results. In addition, {PRODUCT} shows all of your test runs in one place for easy navigation. No more digging around build logs. + +[#test-suite-parallelization] +== Test suite parallelization + +Using duration information from past test runs, {PRODUCT} can automatically create evenly-sized bins of tests for you to run your test suite in parallel. For more information, refer to xref:features:test-suite-parallelization.adoc[test suite parallelization]. + +[#test-notifications] +== Test Notifications + +{PRODUCT} provides test notifications so developers can immediately take action on the results, whether that's to triage failures or merge a PR. Developers can create subscriptions to receive personal notifications about test sessions run against their branches/pull requests or other test sessions they care about. +The following apps are available for receiving notifications: + +* GitHub app: xref:features:test-notifications/github-app-for-test-sessions.adoc[GitHub app for test sessions] + +* Slack app: xref:features:test-notifications/test-notifications-via-slack.adoc[Test Notifications via Slack] + +The apps will send notifications _directly_ to developers eliminating the need to manually check their email or navigate to their pull request to see if their tests passed. + +To sign up for a {PRODUCT} account, refer to https://app.launchableinc.com/signup[https://app.launchableinc.com/signup]. Once done, you can start xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[send data to {PRODUCT}]. \ No newline at end of file diff --git a/smart_tests/docs/modules/ROOT/partials/abbr.adoc b/smart_tests/docs/modules/ROOT/partials/abbr.adoc new file mode 100644 index 000000000..dbd5b288e --- /dev/null +++ b/smart_tests/docs/modules/ROOT/partials/abbr.adoc @@ -0,0 +1,11 @@ +//include user-defined abbreviation for conditional directives for this specific product repo +include::ROOT:partial$conditionals.adoc[] + +//include shared abbreviations across all docsite repos +include::docsite-global::partial$abbr.adoc[] + +//add additional attributes specific to this component ONLY below this line +//attributes that should be available to all docs and training go in the docsite-global abbr.adoc file + +//set the value of {PRODUCT} that should be used throughout this component +:PRODUCT: CloudBees Smart Tests diff --git a/smart_tests/docs/modules/ROOT/partials/conditionals.adoc b/smart_tests/docs/modules/ROOT/partials/conditionals.adoc new file mode 100644 index 000000000..e69de29bb diff --git a/smart_tests/docs/modules/concepts/pages/branch.adoc b/smart_tests/docs/modules/concepts/pages/branch.adoc new file mode 100644 index 000000000..7d79b4d2e --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/branch.adoc @@ -0,0 +1,12 @@ += Branch +:slug: branch + +Branch is an attribute of a xref:concepts:build.adoc[build] that tracks which branch of development the build came from. This information helps you focus on the area you want to look into. For example, if you are responsible for the overall quality in the "main" branch, then tests run against builds from the "main" branch are of interest, but tests running in the feature branches might not be. + +By default, branch information is obtained automatically from the Git workspace when `smart-tests record build` runs. + +== Manual branch configuration + +If your build setup is complex, for example if your build spans multiple Git repositories, then you will be asked to record this information explicitly via `smart-tests record build --branch NAME` option. + +In these situations, the notion of "branch' starts to deviate from that of Git branch, but conceptually there's still a logical thread of development, and you should be able to name it. Really the key idea here is that when you have parallel threads of developments (such as production vs dev, or version4 vs version5), we'd like to be able to separate them. \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/build.adoc b/smart_tests/docs/modules/concepts/pages/build.adoc new file mode 100644 index 000000000..39e734ea3 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/build.adoc @@ -0,0 +1,42 @@ += Build +:slug: build + +Every time you run automated tests, you're testing the behavior of some software. A *build* represents that software. Each time you send test results to {PRODUCT}, you record them against a specific build so that {PRODUCT} knows that you ran X tests against Y software with Z results. + +Therefore, before you run your tests, you can create a build using `smart-tests record build`. + +This step is not required. However, we strongly recommend recording builds if you plan to use Predictive Test Selection. If recording builds is a challenge -- for example, if you need to coordinate multiple teams to make changes to your pipeline -- start recording tests first, then come back to recording builds. + +== Build attributes + +Every build has two primary attributes: + +. Its name (refer to xref:send-data-to-smart-tests:record-builds/choose-a-value-for-build-name.adoc[choose a value for build name]). +. Its relationships to commits in Git repositories. + +Let's expand on the second part: the relationship between builds and repositories. + +When you record a build, you tell the CLI which source repo(s) and commit hash(es) the software was built from. For example, in the simplest case, the software is a single binary built from code in a single Git repository. + +First, assume you've cloned the Git repository into the current directory (`.`) and that the relevant `HEAD` commit has already been checked out (e.g., `29932f39`). This is typically already available if you're building software and running tests on the same machine. + +So, once you've done that, the command looks like this: + +`smart-tests record build --build $BUILD_NAME --branch $BRANCH_NAME --source src=.` + +Running this command in your CI pipeline creates a build in your {PRODUCT} workspace. That build has a name (whatever `$BUILD_NAME` expanded to, e.g., `jenkins-myproject-123`) and one repository commit relation (name: `src` , commit: `29932f39`). + +== Commit collection + +By default, the `smart-tests record build` command _also_ runs the `smart-tests record commit` command. + +This command collects the details of the changes in each commit in your repository (not just the `HEAD` commit) so that changes between builds can be compared later. + +[NOTE] +-- +By default, `smart-tests record build` runs `smart-tests record commit` , but these operations can be separated. +-- + +== More complex build/test pipelines + +However, in many other cases, the software being tested might be a single binary built from several repositories. Furthermore, the tested software might combine several services deployed to a single testing environment. The xref:send-data-to-smart-tests:record-builds/record-builds-from-multiple-repositories.adoc[record builds from multiple repositories] page outlines how to instrument your pipeline in these situations. \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/insight.adoc b/smart_tests/docs/modules/concepts/pages/insight.adoc new file mode 100644 index 000000000..f7cb9b7a1 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/insight.adoc @@ -0,0 +1,13 @@ += Insight +:slug: insight + +An *insight* is a presentation of [some aggregate data within {PRODUCT}] that helps [a persona] identify or fix [a problem] that has an [impact]. + +For example: + +* The *test session duration graph* _(insight)_ helps a team lead _(persona)_ identify test session duration slowly creeping up _(problem)_ , which signals increased developer cycle time _(impact of the problem)_ . +* The *test session frequency graph* helps a team lead identify that tests are being run less often, which signals various negative health metrics (like increased cycle time, reduced quality, or fewer changes flowing through the pipeline). +* The *test session failure ratio graph* helps a team lead identify that tests fail more often, which could signal release instability. +* The *flaky test table* helps developers identify which tests to fix first to reduce flakiness in their team’s test suite. + +Stay tuned as we add more insights and insights pages! \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/object-model.adoc b/smart_tests/docs/modules/concepts/pages/object-model.adoc new file mode 100644 index 000000000..dcf0c97a1 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/object-model.adoc @@ -0,0 +1,20 @@ += Object model +:slug: object-model + +{PRODUCT}'s object model includes concepts that reflect objects in your development/CI environment (like xref:concepts:build.adoc[Build] and xref:concepts:test-session.adoc[Test Session]) alongside objects which are value-add concepts unique to {PRODUCT} like xref:concepts:subset.adoc[Subset] and xref:concepts:insight.adoc[Insight]. + +The diagram below illustrates how these objects relate to one another: which entities own others, which ones are associated only as context, and how information flows through the system. It is intended to help build a clear mental model of the product and understand the role of each entity you interact with. + +image::ROOT:object-model-v2.png[] + + +For more information, refer to: + +* xref:concepts:organization.adoc[Organization] +* xref:concepts:workspace.adoc[Workspace] +* xref:concepts:build.adoc[Build] +* xref:concepts:test-session.adoc[Test Session] +* xref:concepts:subset.adoc[Subset] +* xref:concepts:insight.adoc[Insight] +* xref:concepts:branch.adoc[Branch] +* xref:concepts:test-suite.adoc[Test Suite] diff --git a/smart_tests/docs/modules/concepts/pages/organization.adoc b/smart_tests/docs/modules/concepts/pages/organization.adoc new file mode 100644 index 000000000..fb636e1a5 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/organization.adoc @@ -0,0 +1,45 @@ += Organization +:slug: organization + +The *organization* is the top-level object in the {PRODUCT} object model. + +== Users and workspaces + +*Users* are members of organizations. *Workspaces* belong to organizations. + +A user may be a member of only one organization at a time. All members of an organization can access all workspaces in that organization. + +If your organization has multiple workspaces, you can navigate between them using the dropdown in the left navigation. + +Contact your customer success manager if you'd like to create a new workspace in your organization. + +== Organization settings + +Organization settings can be found on the settings page in any _workspace_ in your organization: + +image::ROOT:smart-tests_settings_20220613.png[] + +=== Automatic Organization Discovery + +This feature allows users on the same email domain to automatically join a specified {PRODUCT} organization upon signing up. + +To enable this feature for your organization, mailto:support@launchableinc.com[contact us] . It is not shown in the UI. + +=== Organization invitation link + +Create an invitation link to invite your teammates to access the {PRODUCT} dashboard. Just create a single link and share it with your team. They'll be prompted to join your organization after they click the link and sign up for an account. Alternatively, contact us to enable Automatic Organization Discovery (above). + +image::ROOT:smart-tests_invite_url_20220613.png[] + +[NOTE] +-- +Invitation links expire after 90 days. After that, simply create a new one. +-- + +=== SAML 2.0 + +{PRODUCT} supports SAML 2.0 for SSO authentication. Most Identity Providers (e.g. Okta, OneLogin, etc.) support SAML 2.0 for SSO. + +Contact your customer success manager to enable this feature for your organization. You'll need to provide {PRODUCT} an X.509 signing certificate from your Identity Provider in PEM or CER format. + +After SAML is enabled for your organization, all organization members must log in with SAML using an account under your email domain. They can't log in with their previous credentials, such as an email and password combination or a GitHub account. \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/subset.adoc b/smart_tests/docs/modules/concepts/pages/subset.adoc new file mode 100644 index 000000000..b62604f55 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/subset.adoc @@ -0,0 +1,176 @@ += Subset +:slug: subset + +A *subset* is a set of tests dynamically selected from a larger test suite using xref:features:predictive-test-selection.adoc[Predictive test selection]. +_Dynamically selected_ refers to how the tests returned in subsets change based on the request parameters. + +== Properties + +A subset is the output of a subset _request_ made using the `smart-tests subset` CLI command. You make a subset request every time you want to run a subset of tests in your CI pipeline: + +.Subset diagram +image::ROOT:subsetting-diagram-2x.png[Subset diagram,role="screenshot"] + +A subset request requires the following inputs: + +. The xref:concepts:build.adoc[Build] being tested. +. A subset *optimization target*. +. The *test runner* to use. +. The *input test list*: the complete list of tests that would typically run in a non-subset session (or "full run"). + +And outputs: + +. A *subset* list of tests formatted for your test runner. +. [Optional] The *remainder* list of tests formatted for your test runner. + +=== Build being tested + +When you request a subset of tests to run in your CI process, you pass in the name of the xref:concepts:build.adoc[Build] being tested: + +`smart-tests subset --build $BUILD_NAME --session $SESSION_NAME [other options...]` + +This is important so that the Predictive Test Selection service can analyze the changes in the build and select tests appropriately. + +=== Optimization target + +When you request a subset of tests to run in your CI process, you include an *optimization target* : + +[source] +---- +smart-tests subset \ + # one of: + --target [PERCENTAGE] + # or + --confidence [PERCENTAGE] + # or + --time [STRING] \ + [other options...] +---- + +=== Test runner + +When you request a subset of tests, specify the test runner you will run tests with. This value should be the same between `smart-tests subset` and `smart-tests record tests` commands. + +The CLI uses this parameter to adjust three things automatically: + +* Input test list format +* Subset altitude +* Output test list format + +==== Input test list format + +The complete list of tests you would typically run is a crucial input to any subset request. {PRODUCT} uses this list to create a subset of tests. + +How this list is generated, formatted, and passed into the `smart-tests subset` depends on the test runner. In general, you don't have to worry about creating this list; the documentation for each test runner goes over the specific flow for your tool. + +However, for completeness, we'll outline the various methods used across test runners: + +. Some test runners can generate a list of tests via a particular command. The output of this command is then passed into `smart-tests subset`. +. Other test runners don't provide that feature. In that case, you pass the _directory/directories_ containing your tests into `smart-tests subset`. The CLI then creates the list of tests by scanning those directories and identifying tests using pattern-matching. +. Furthermore, some frameworks _can_ list individual tests only after compiling test packages. In this case, generating a list of higher-level packages can be preferable to individual test cases. (This relates to the next section.) + +==== Subset altitude and test items + +To run a subset of tests, you pass the returned subset list into your test runner for execution. + +Each test runner has an option for specifying a list of tests to run, and these options allow for different 'altitudes' of filtering. For example, some test runners only let you pass in a list of _files_ to run, others support filtering by _class_ , while some support filtering by _test case_ or _method_. + +Based on the test runner specified in `smart-tests subset`, the CLI automatically outputs a list of tests using the hierarchy level supported by that test runner. + +[NOTE] +-- +Another factor that impacts subset altitude is the ability of the test runner/CLI to _list_ tests at a low altitude. (See above section for more info.) +-- + +For example, Maven supports filtering by class, so we say that Maven's _subset altitude_ is _class_. A test item for Maven is equivalent to a class. Test results captured using `smart-tests record tests` for Maven will include both class _and_ test case identifiers, but the output of `smart-tests subset` will include a list of classes. + +Here's the mapping for all test runners: + +|=== +|Test runner |Altitude + +|Android Compatibility Suite (CTS) +|Class + +|Android Debug Bridge (adb) +|Class + +|Ant +|Class + +|Bazel +|Target + +|Behave +|File + +|Ctest +|Test case + +|cucumber +|File + +|Cypress +|File + +|dotnet test +|Test case + +|Go Test +|Test case + +|GoogleTest +|Test case + +|Gradle +|Class + +|Jest +|File + +|Karma +|File + +|Maven +|Class + +|minitest +|File + +|Nunit Console Runner +|Test case + +|pytest +|Test case + +|Robot +|Test case + +|RSpec +|File +|=== + +==== Output test list format + +To run a subset of tests, you pass the returned subset list into your test runner for execution. + +Each test runner has a method or option for specifying a list of tests to run. For example, one test runner might expect a comma-delimited list of tests, whereas another might expect a list separated by spaces, etc. + +The CLI adjusts the output format automatically based on the test runner used in the request. In general, you don't need to worry about the output format because you'll pass it directly into your test runner per the documentation for your tool. But this does mean that the contents of subset files/outputs change based on the test runner value. + +=== Input test list + +As described above, the complete list of tests you typically run is a crucial input to any subset request. {PRODUCT} uses this list to create a subset of tests. + +This list is essential because it can change between requests due to: + +* new tests, +* sub-suites being tested (refer to xref:concepts:workspace.adoc#sub-suites-within-larger-test-suites[Sub-suites within larger test suites]), and +* multiple test runner invocations per test session (refer to xref:concepts:test-session.adoc#static-bins[Static bins]). + +In general, you don't have to worry about creating the input test list, but it's essential to understand this concept because it relates to your optimization target. For more information, refer to xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choose a subset optimization target]. + +[NOTE] +-- +With xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero input subsetting], {PRODUCT} generates the input test list for you. +-- \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/test-session.adoc b/smart_tests/docs/modules/concepts/pages/test-session.adoc new file mode 100644 index 000000000..38eaec04c --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/test-session.adoc @@ -0,0 +1,105 @@ += Test Session +:slug: test-session + +== Overview + +After running your test suite against a xref:concepts:build.adoc[Build] in your CI system, you record your test results in {PRODUCT} using the CLI. Those results are recorded against a *test session* . + +Therefore, a test session is a record of: + +. A certain list of tests that ran +. How long each test took to run +. Each test's pass/fail/skipped status +. Aggregate statistics about the above (e.g., total duration, total count, and total passed/failed/skipped counts) + +image::ROOT:object-model-june.png[] + +== Test sessions across {PRODUCT} + +Test sessions are a key {PRODUCT} concept and, as such, are used for lots of purposes. + +=== Test results and reports + +xref:features:test-results-and-reports.adoc[Test results and reports] are organized by test sessions. Each test session has a details page that shows aggregate statistics about the session, including test counts, total duration, and failed tests. + +=== Insights + +xref:concepts:insight.adoc[Insights], particularly xref:features:trends.adoc[Trends], are aggregated by test session. For example, the _Test session duration_ and _Test session frequency_ insights show data aggregated across test sessions in a workspace. + +=== Predictive Test Selection + +Test sessions are used for evaluating xref:features:predictive-test-selection.adoc[Predictive Test Selection] models. For example, the Confidence curve is built by running existing test sessions through the model to see how long it would have taken for a model to find a failing test in a failing run. Therefore, the length of the X-axis of the Confidence curve corresponds with the length of your longest recent test session. + +[#test-session-layouts] +== Test session layouts + +Different teams organize their tests in different ways, called "layouts." + +Your team's layout impacts how you should record tests in a workspace. This section outlines a few common layouts, including guidance on when you might want to split your runs into multiple test sessions against a single build. For guidance on splitting tests between _workspaces_, refer to xref:concepts:workspace.adoc[Workspace] and, in particular, xref:concepts:workspace.adoc#test-suites-and-workspaces[Test suites and workspaces]. + +=== Default layout + +In many cases, builds and test sessions map 1:1. For example, a developer pushes a change, the change is built, and the build is tested. The test runner (e.g., pytest) is run once with a single list of tests. + +The CLI handles this default case without any extra steps. Just run `smart-tests record tests` at the end of your run to capture test results in a single session. + +. Test session definition 1 +image::ROOT:test-session-definition.png[Test session definition 1,role="screenshot"] + +=== Run tests in different environments + +Some teams run their tests across several environments. For example, UI tests might be run in different browsers. In this case, your build will have multiple test sessions per build: one per environment. + +.Test session definition 2 +image::ROOT:test-session-definition-2.png[Test session definition 2,role="screenshot"] + +Test sessions have an optional attribute called `flavor` that handles this. To implement this test session layout, see xref:send-data-to-smart-tests:record-test-results/use-flavors-to-run-the-best-tests-for-an-environment.adoc[Use 'flavors' to run the best tests for an environment]. + +[#parallel-tests] +=== Run tests in parallel + +Parallelization is a highly effective strategy for reducing test feedback delay. Depending on how you parallelize your tests and how you want to analyze them in {PRODUCT}, you may want to create multiple test sessions per build. + +==== Automatically generated parallel bins + +Some test runners support automatic test parallelization. In this scenario, the test runner typically lets you define how many workers to distribute tests via a configuration option. You kick off tests once, and then the test runner automatically distributes tests into bins for each worker. At the end of the run, test reports are often recorded in a single location on the same machine where tests were kicked off. + +This scenario _does not_ warrant separate test sessions for each worker. Since the parallelization process is automatic and opaque, the instrumentation is the same as the *default layout* described above. + +.Test session definition 3 +image::ROOT:test-session-definition-3.png[Test session definition 3,role="screenshot"] + +The main difference to note is that the test session duration shown in {PRODUCT} will be higher than the "wall clock time" perceived by developers since test reports include machine time and don't know about parallelization. Divide the test session duration by your parallelization factor to get the wall clock time. + +[WARNING] +-- +If your test runner are automatically distributes tests to parallel workers but _does not_ deposit test result files to a location on the original machine, you'll need to manually create a test session before you run tests. For more information, refer to xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc#combine-test-reports-from-multiple-runs[Combine test reports from multiple runs]. +-- +[[static-bins]] +==== Static bins + +Some teams parallelize their tests by manually splitting them into _static_ lists of tests (otherwise known as *bins* ). They might organize tests by functional area (for easier triage), typical duration (to create bins of roughly equal length), or something else. + +In this scenario, the test runner is individually invoked once per bin, like this: + +.Test session definition 4 +image::ROOT:test-session-definition-4.png[Test session definition 4,role="screenshot"] + +This gives you two options for aggregating reports into test sessions: + +. *One session per bin* _(purple boxes)_. This option is preferred if: +.. You have fewer than ~10 bins. +.. You plan to use Predictive Test Selection because a 1:1:1 relationship between test runner invocations, test sessions, and subset requests is preferred for the best performance. +. *One session per pipeline* _(orange box)_. This option is preferred if: +.. You have more than ~10 bins. At this scale, it becomes less useful to analyze tests at the bin level and more useful to analyze them at the pipeline level. + +Ultimately, you are the expert on your test suite layout, so you can aggregate at the hierarchy level that makes sense to you. Depending on your choice, you may need to see /docs/concepts/test-session/#managing-test-sessions-explicitly[#Managing test sessions explicitly]. + +[#managing-test-sessions-explicitly] +=== Managing test sessions explicitly + +In most cases, the CLI will manage test sessions on your behalf. The `smart-tests record tests` command and `smart-tests subset` command will automatically create a test session where needed. + +However, if your build, test, and/or test report collection processes occur across several machines/processes, you'll probably need to manage test sessions explicitly. This requires explicitly creating a test session using `smart-tests record session` and then passing the session value through your pipeline for use in `smart-tests subset` and `smart-tests record tests` . + +For a description on how to do this, refer to xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts]. \ No newline at end of file diff --git a/smart_tests/docs/modules/concepts/pages/test-suite.adoc b/smart_tests/docs/modules/concepts/pages/test-suite.adoc new file mode 100644 index 000000000..f92fddf72 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/test-suite.adoc @@ -0,0 +1,31 @@ += Test suite +:slug: test-suite + +If you are running different test suites against the same build, separating them into different "test suites" would improve the quality of data analysis throughout our system. + +== What is a test suite? + +Teams regularly group test cases into logical groups based on one or more combinations of: + +. the type of test (e.g., unit tests vs. UI tests). +. the tech stack used to run those tests (e.g., Maven for unit tests vs. Cypress for UI tests). +. when & where they run (e.g., nightly vs every change, in the CI system vs in the staging environment.) + +When thinking about test suites this way, the definition is more straightforward: for example, all the Maven unit tests become one test suite, and all the Cypress UI tests become another test suite. + +== How do you benefit from test suites? + +When you record test suites: + +* You can easily focus on test sessions & issues just from a particular test suite + +== How do you record test suite? + +When you invoke the `smart-tests record session` command, specify the additional `--test-suite` option and give it a test suite name. + +[source,shell] +---- +$ smart-tests record session --test-suite "ui tests" ... +---- + +If using the `smart-tests record session` command explicitly as per _managing complex test session layout_, then the `--test-suite` option should be used with that command instead of `record tests`. diff --git a/smart_tests/docs/modules/concepts/pages/workspace.adoc b/smart_tests/docs/modules/concepts/pages/workspace.adoc new file mode 100644 index 000000000..914e73931 --- /dev/null +++ b/smart_tests/docs/modules/concepts/pages/workspace.adoc @@ -0,0 +1,73 @@ += Workspace +:slug: workspace + +A *workspace* contains all your *test sessions* and *builds* for a specific test suite. Each workspace belongs to an *organization* . + +{PRODUCT} takes the data you send to your workspace and uses it to provide added value, such as Insights and Predictive Test Selection. + +[#test-suites-and-workspaces] +== Test suites and workspaces + +As mentioned, a workspace should house data for a specific test suite. This means that your team might need multiple workspaces in your organization. You can switch between workspaces in your organization using the dropdown menu in the left navigation. + +[NOTE] +-- +If you need to create another workspace in your organization, contact your customer success manager or email link:https://support.cloudbees.com[CloudBees Support]. +-- + +However, "test suite" is an ambiguous term that means different things to different teams, so let's expand on this. + +=== Suites divided by test types and/or test runners + +Many teams already divide their tests into logical groups based on: + +. the *type* of test (e.g., unit tests vs. UI tests, etc.), +. the *tech stack* used to run those tests (e.g., Maven for unit tests vs. Cypress for UI tests, etc.), or +. both (in most cases) + +If your team thinks about test suites this way, the decision should be straightforward: for example, you should send all your Maven unit tests into one workspace and all your Cypress UI tests into another. + +=== Suites divided by test characteristics + +If your team does _not_ think about test suites this way - for example, perhaps you use a custom test runner that abstracts away some of the tech stack differences - then you should divide your tests into different workspaces. All the test info you send to a specific workspace should exhibit similar characteristics, such as: + +. *Test count and test duration* - For example, unit test suites tend to have lots of very short tests; in contrast, UI test suites tend to have fewer tests that take longer to run +. *Overall duration* - some test suites take minutes, whereas others can take hours of machine time to run +. *Frequency* - Many teams run different suites in different phases of their software delivery lifecycle, usually based on how long they take to run +. *Failures* - Some test suites fail more often than others; within those failures, sometimes lots of tests fail versus only a few. Similarly, some suites are more *flaky* than others + +Common characteristics between tests in a workspace are important for two reasons: + +. Test insights are aggregated at the workspace level. If you mix tests with different characteristics, insights such as flakiness scoring will be less useful +. Predictive Test Selection models are evaluated at the workspace level. If you mix tests with many different characteristics, choosing the correct optimization target will be harder. + +[#sub-suites-within-larger-test-suites] +=== Sub-suites within larger test suites + +Sometimes teams consider small groups of tests _within_ larger suites as suites also. + +For example, a team might have a large group of tests called a "Regression test suite." Then, within that larger group, they divide tests into sub-suites based on components, like "Authentication," "API," etc. You might call these "sub-suites." + +Sometimes the entire test suite runs (perhaps nightly), and other times perhaps only sub-suites are run. + +If this applies to your team, we recommend using a single workspace for the entire _larger_ suite. In the example above, that team would have one workspace for the "Regression test suite." + +[NOTE] +-- +If you need to create another workspace in your organization, contact your customer success manager or email link:https://support.cloudbees.com[CloudBees Support]. +-- + +[#workspace-settings] +== Workspace settings + +Workspace settings are found under the Settings tab on the dashboard. + +.Settings tab on dashboard +image::ROOT:sending-data/create-api-key.png[Settings tab on dashboard,role=screenshot] + +[#workspace-api-key] +=== Workspace API key + +Each workspace gets it own API key for CLI authentication. API keys are created from the dashboard Settings tab. For more information, refer to xref:send-data-to-smart-tests:getting-started/getting-started.adoc#create-and-set-your-api-key[Getting started]. + +For security reasons, API keys can't be recovered once they've been created. If you lose your workspace API key, you must create a new one and update your CI scripts. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/intelligent-test-failure-diagnostics.adoc b/smart_tests/docs/modules/features/pages/intelligent-test-failure-diagnostics.adoc new file mode 100644 index 000000000..8611540f5 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/intelligent-test-failure-diagnostics.adoc @@ -0,0 +1,47 @@ += Intelligent Test Failure Diagnostics +:slug: intelligent-test-failure-diagnostics + +Test failure triage is not easy and can be time-consuming for both dev and QA teams. Determining whether issues are new or recurring and keeping track of them can be daunting tasks. + +Due to this overhead, test failure triaging can take up a significant amount of time and hinder teams that want to develop and release software quickly. + +{PRODUCT}'s *Intelligent Test Failure Diagnostics* , or ITFD, * * helps alleviate test triaging overhead and streamlines the process for Dev and QA teams. ITFD is a collection of various features found throughout the web app, which will be reviewed in this article. + +== Test Session-level Issues + +"Is there an unique underlying issue?" is the first question that teams ask when multiple tests fail in a test-session. {PRODUCT} has up-leveled this concept as a first class citizen. If there are multiple failures in a test session, it will group all similar failures with the help of Generative AI. Individuals involved in test failure triaging can then quickly review these issues. You can see an example of this below: + +As you send test session data to {PRODUCT}, an Issues tab will be visible inside each test session on the web app. + +image::ROOT:screenshot-2024-05-15-at-4-50-56-pm.png[] + +== Daily Report Email + +You can also configure the {PRODUCT} web app to send daily reports containing all Issues found in test sessions from each day. Developers and QAs can leverage these in daily triage meetings, where they can quickly discuss the top-level Issues instead of iterating over each failure. + +Configure the Daily Report Email at the workspace level in the web app. From the main page of the web app, click the cog icon in the upper left-hand pane to access the settings page. Once on the settings page, scroll down until you see the *Daily Report Email * section. It allows you to configure this report to be sent via email and on your chosen schedule. You can find an example of this below: + +image::ROOT:screenshot-2024-05-15-at-5-09-04-pm.png[] + +As far as the email report is concerned, you can find an example of that below: + +image::ROOT:triage-email-demo.avif[] + +== Workspace-level Issues (Beta feature) + +A workspace runs multiple test sessions. Each test session uncovers new underlying Issues. In addition to viewing the new Issues, teams care about tracking Issues across multiple test sessions and multiple branches. The Issues pane surfaces this information. + +From the {PRODUCT} web app's main page, there is a link to the left-hand pane titled *Issues* . Here, you can view all issues at the workspace level, filtered by branch. The branch can be configured in the top left corner of the page. + +[NOTE] +-- +To properly track branch information, the `--lineage` option must be added to various CLI commands. You can read more about that option on the https://www.launchableinc.com/docs/resources/cli-reference/[CLI reference page] . +-- + +image::ROOT:screenshot-2024-05-15-at-5-06-32-pm.png[] + +From this page, you can see each Issue and its unique ID. Other relevant details are included, such as the number of times failures have occurred for each Issue and the number of associated test cases. + +The unique ID for each Issue is clickable, and will display a page that includes additional details about each associated failing test case and test sessions in which these failures occurred. You can find an example below: + +image::ROOT:screenshot-2024-05-15-at-5-21-37-pm.png[] \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection.adoc new file mode 100644 index 000000000..036f10b08 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection.adoc @@ -0,0 +1,11 @@ += Predictive Test Selection +:slug: predictive-test-selection + +*Predictive Test Selection* uses large language models (LLMs) to understand your code changes and automatically identify the most relevant tests to run. + +By analyzing both source code and test files, {PRODUCT} builds a deep understanding of how your code is structured and how different components relate to one another. Using commit information, it calculates the similarity between changed files and test files to determine which tests are most likely impacted — producing an optimized test execution plan where critical tests are prioritized and redundant ones are safely skipped. + +With Predictive Test Selection, {PRODUCT} brings code-aware test execution intelligence to your CI process — helping teams run fewer but relevant tests, gain faster feedback, and ship high-quality software with confidence. + +.Predictive test selection overview +image::ROOT:pts-v2-working.png[Predictive test selection overview,role="screenshot"] diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/faq.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/faq.adoc new file mode 100644 index 000000000..5edc3a437 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/faq.adoc @@ -0,0 +1,73 @@ +include::ROOT:partial$abbr.adoc[] + += FAQ +:slug: faq + +== Problems that Predictive Test Selection can solve + +=== Key value proposition: Ship code faster by testing faster + +Software development teams are pressured to deliver code faster while maintaining high quality. + +There are numerous approaches to help teams deliver code faster: building a CI pipeline, automating tests, and continuously delivering code to production. However, none address the problem that running tests (long or short) is the bottleneck in delivering software. + +image::ROOT:testing-bottleneck-infinity.png[] + +{PRODUCT}'s solution is to intelligently prioritize tests to reduce testing times without sacrificing quality. Developers get feedback much earlier in the development cycle. {PRODUCT} helps teams ship code faster by _testing_ faster. + +=== Where does {PRODUCT} Predictive Test Selection fit into my development pipeline? + +{PRODUCT} Predictive Test Selection is test agnostic - send data from the test suites that cause the most pain in your delivery cycle. {PRODUCT} can help reduce the time it takes to run them - delivering feedback earlier. + +image::ROOT:test-pyramid.png[] + +==== How do I use {PRODUCT} Predictive Test Selection to change my testing lifecycle? + +You can use Predictive Test Selection for either *Shift left* or *In-place reduction* . + +Think of *Shift left* as an approach to test for risks earlier by testing earlier (typically by moving some nightly tests earlier) in your pipeline. + +Think of *In-place reduction* as an approach to provide faster feedback by running fewer tests in one stage (typically, tests run on each `git push` ) by shifting less important tests to a later stage. + +==== Does Predictive Test Selection only work for "greenfield" or "brownfield" applications? + +Predictive Test Selection helps in both use cases. + +The key question is, "Where are developers seeing pain from long testing times?" The answer tends to be different for different teams. Some teams want to cut down long integration test cycle times (from hours to minutes); this typically is the case in brownfield applications. Others want to cut down unit test cycles for faster feedback to developers (from 30 minutes to less than 5 minutes); this typically is the case in greenfield applications. + +[NOTE] +-- +The key is to bring {PRODUCT} in as early as you can so that you can get the benefit of shipping code faster earlier. +-- + +==== Does Predictive Test Selection work for microservices? Monoliths? + +A question with a similar flavor to "greenfield or brownfield applications" with a similar answer. Predictive Test Selection works equally well in both cases and solves similar challenges in both cases. + +*Monoliths* : Teams with monoliths typically use {PRODUCT} for the "nightly" test scenario. The team has accumulated a lot of tests over a period of time that cannot be run on every push. These teams look to shift left these nightly tests to provide feedback to developers as early as possible. Some teams use {PRODUCT} to help speed up unit or acceptance tests as the number of tests has increased. + +*Microservices* : Unit tests for individual microservices tend to run quickly for most organizations. However, the integration testing scenario remains a challenge (just as with monoliths). Thus, teams typically use {PRODUCT} to help with integration testing scenarios. Teams that really care about having a fast dev loop on every `git push` use {PRODUCT} to optimize their unit tests. + +== {PRODUCT}'s Impact + +=== What impact can {PRODUCT} Predictive Test Selection make? + +The key component that helps {PRODUCT} learn well is that the test suite should be run reasonably frequently and have *some failures* . Typically, teams see a 60-80% reduction in test times without impacting quality. + +The primary reason that teams like https://www.launchableinc.com/customers/reducing-test-runtime-for-a-ruby-on-rails-application/[Manba] use {PRODUCT} is that it has enabled the team to ship code faster and push more changes through. + +[quote, Masayuki Oguni, CEO and Lead Developer] +My test runtime went down 90 percent! Deployment to Heroku went from 30 to 10 minutes. It is great, just great! + +Larger teams have focused on improving developer productivity times and increasing software delivery velocity. See case studies of an https://www.launchableinc.com/customers/bmw-uses-launchable-to-optimize-testing-and-reduce-costs/[auto manufacturer] and a https://www.launchableinc.com/customers/silicon-valley-icon-improves-developer-happiness/[Silicon Valley Unicorn] using {PRODUCT} + +== Responses to key questions from customers + +=== Do I end up testing less with Predictive Test Selection? + +*Key Idea: You are testing more frequently* . In short, the answer is no: teams tend to test _more_ frequently. Because {PRODUCT} reduces testing times, you can execute more test runs as a result. The tests in each run are dynamically selected for every code change. + +=== What happens to the tests that are not run? This will surely impact quality! + +*Key idea: Defensive runs* . We ask our customers to view {PRODUCT} as a way to speed up tests and ship code faster. The tests that are not run as part of the subset should be run as part of a *defensive* run. The defensive run captures any tests that escape through the subset. The defensive run is instrumented to send us the test rests, and thus {PRODUCT} uses this run (in addition to the subset) to train the model. + diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/how-we-select-tests.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/how-we-select-tests.adoc new file mode 100644 index 000000000..925341495 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/how-we-select-tests.adoc @@ -0,0 +1,19 @@ +include::ROOT:partial$abbr.adoc[] += How we select tests + + +Predictive test selection (PTS) focuses on semantic similarity between the application source code and the test source code. For example, when you modify the order processing part of the application, the AI looks for tests that are "relevant". A test case that places an order would be deemed more relevant than a test case that checks the user registration page, for example. + +In order to do this, we preprocess the source code of the application and the tests to extract features that represent the semantics of the code. This happens during the `record build` invocations, incrementally, as your application and test source code evolve. The global context of the application as a whole is taken into account, as well as the local context of each file. + +When a subset is requested, we first compute the "change under test"—what application code has changed? For nightly tests, this is the delta from the previous night's run. For a pull request, this is all the changes in the pull request. This is a behavior you can influence. + +From the change under test, we compute the semantic similarity between the change and the tests. +All the tests are ranked in order of similarity. The top of this list is selected. How many tests get selected depends on the optimization target you choose (e.g., 10% of tests, all tests with relevance above 80%, etc.). Depending on other goals set, this behavior is further affected by other considerations, such as the last time a test failed, how flaky a test is, and so on. + + +== What we don't do: code coverage + +PTS currently does not depend on code coverage information. This allows us to work with any programming language and test framework, without slowing down test executions or adding runtime instrumentation that increases installation complexity. + +This approach also makes {PRODUCT} an excellent fit for distributed systems and end-to-end tests, where test and application execution happen simultaneously over a distance in ways that are hard to track. diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/observe-subset-behavior.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/observe-subset-behavior.adoc new file mode 100644 index 000000000..95603a229 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/observe-subset-behavior.adoc @@ -0,0 +1,41 @@ += Observe subset behavior +:slug: observe-subset-behavior + +Sometimes teams want to observe the potential impact and behavior of running subsets in a real environment before they start using them. In other words, they want to measure the subsets' real-world efficacy against the simulation shown in xref:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choosing a subset optimization target] . + +You can do this using *observation mode* , which is a special usage mode of `smart-tests record session` and, by extension, `smart-tests subset` . + +To enable observation mode, add `--observation` to the `smart-tests subset` command you added to your pipeline after following xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI] : + +`smart-tests subset --observation ` + +[WARNING] +-- +If your pipeline requires you to create a test session separately using `smart-tests record session` (after following the instructions in xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts] ), add the `--observation` option to _that_ command instead of `smart-tests subset` . + +[source] +---- +smart-tests record session \ + --build $BUILD_NAME \ + --observation + ... [other options] +---- + +Observation mode is a property of a xref:concepts:test-session.adoc[Test Session] , not a xref:concepts:subset.adoc[Subset] . +-- + +When observation mode is enabled for a test session, the output of each `smart-tests subset` command made against that test session will always include all tests, but the recorded results will be presented separately so you can compare running the subset against running the full suite. + +For example, let's imagine you have a test suite with 100 tests that each takes 1 second to run (100 seconds in total): + +* By default, if you requested a subset of this test suite with a 30% duration optimization target, the subset output would include 30 tests. +* However, with observation mode enabled, if you requested a subset of this test suite with a 30% duration optimization target, the subset output would include all 100 tests. {PRODUCT} will recognize this "full session" as a subset observation session when you record results. + +Because you marked the session as an observation session, {PRODUCT} can analyze what would have happened if you had actually run a subset of tests, such as; + +* whether the subset would have caught a failing session and +* how much time you could have saved by running only the subset of tests + +image::ROOT:predictive-test-selection-observation-mode.png[] + +You can use this data to compare your real-world results with your *Confidence curve*. For more information, refer to xref:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choose a subset optimization target]. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc new file mode 100644 index 000000000..998e1983d --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc @@ -0,0 +1,84 @@ += Choose a subset optimization target +:slug: choose-a-subset-optimization-target + +The optimization target you choose determines how {PRODUCT} populates your PTS subsets after xref:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/smart-subset-optimization-target.adoc[prioritizing tests]. + +.Optimization targets +image::ROOT:optimization-target.png[Optimization targets,role="screenshot"] + +You declare an optimization target when you run `smart-tests subset` as part of xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI] . + +There are three different optimization target types: + +* Confidence ( `--confidence` ) +* Fixed duration ( `--time` ) +* Percentage duration ( `--target` ) + +Use the *Confidence* *curve* shown on the Simulate page in the {PRODUCT} dashboard to help choose an optimization target. + +.Confidence curve +image::ROOT:confidence-curve.png[Confidence curve,role="screenshot"] + +== Confidence curve explanation +On the X-axis, we have test execution time. The upper bound of this axis is the maximum total execution time from all the evaluation sessions. + +On the Y-axis, we have the Confidence percentage. This is the probability of correctly catching a failing session. When choosing an optimization target, we need to run enough tests to not mark a session as passing when it would fail if you ran all the tests. That's what this percentage represents. + +The pink line represents the intersection of these two factors aggregated across all the evaluation test sessions. Logically, the line starts at (0,0%) and ends at ([max],100%): if we run no tests, we'll miss every failing session, and if we run all the tests, we'll catch every failing session. However, {PRODUCT}'s power is in the in-between. Notice how the pink line isn't straight: it has a steep portion on the left. This means that {PRODUCT} can catch more failing sessions in less time! + +For example, the above image tells us that if we set our optimization target to *90% confidence* , we should expect only to run 10 minutes of tests (X-axis) and expect to catch 90% of failing sessions (Y-axis). Similarly, if we set our optimization target to 25 minutes (X-axis), we should expect to catch 95% of failing sessions (Y-axis). Both of these are great improvements over running all the tests. + +== Optimization targets + +=== Confidence target ( `--confidence` ) + +[NOTE] +-- +The confidence target is designed for use with test suites where the list of tests in each xref:concepts:test-session.adoc[Test Session] used to train your model is the same each time. + +If your sessions have variable test lists, use the percentage time target instead. +-- + +*Confidence* is shown on the y-axis of a confidence curve. + +Confidence is the probability that the subset will catch a failing session. + +When you request a subset using `--confidence 90%` , {PRODUCT} will populate the subset with relevant tests up to the corresponding expected duration value on the x-axis. For example, if the corresponding duration value for 90% confidence is 3 minutes, {PRODUCT} will populate the subset with up to 3 minutes of the most relevant tests for the changes in that build. This is useful to start with because the duration should decrease over time as {PRODUCT} learns more about your changes and tests. + +[WARNING] +-- +It's possible for *all tests* to be returned in a subset request when you use `--confidence` . + +For example, let's say you request a subset with a 90% confidence target, which corresponds to 30 minutes of tests on the X-axis of your workspace's confidence curve. If the total estimated duration of the request's xref:concepts:subset.adoc[input test list] is less than 30 minutes, then all the input tests will be returned in the subset. + +This is why the confidence target should only be used with test suites that have consistent test lists. +-- + +==== Fixed time target ( `--time` ) + +[NOTE] +-- +The fixed time target is designed for use with test suites where the total duration of each run used to train the model is relatively stable. If your runs have highly variable duration, the percentage time target may be more useful. +-- + +*Time* is shown on the x-axis of a confidence curve. When you request a subset using `--time 10m` , {PRODUCT} will populate the subset with up to 10 minutes of the most relevant tests for the changes in that build. This is useful if you have a maximum test runtime in mind. + +[WARNING] +-- +It's possible for all tests to be returned in a subset request when you use `--time` . + +For example, let's say you request a subset with a time target of 30 minutes. If the total estimated duration of the request's xref:concepts:subset.adoc[input test list] is less than 30 minutes, then all the input tests will be returned in the subset. + +This is why the time target should only be used with test suites that have consistent test lists. +-- + +==== Percentage time target ( `--target` ) + +[NOTE] +-- +*Percentage time* is not yet shown in the {PRODUCT} dashboard. +-- + +When you request a subset using `--target 20%` , {PRODUCT} will populate the subset with 20% of the expected duration of the most relevant tests. For example, if the expected duration of the full list of tests passed to `smart-tests subset` is 100 minutes, {PRODUCT} will return up to 20 minutes of the most relevant tests for the changes in that build. + +This is useful if your test sessions vary in duration. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/smart-subset-optimization-target.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/smart-subset-optimization-target.adoc new file mode 100644 index 000000000..ee22428bd --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/smart-subset-optimization-target.adoc @@ -0,0 +1,95 @@ +include::ROOT:partial$abbr.adoc[] + += Smart subset optimization targets + +== The case for composing your own subsets + +Modern software teams juggle with flaky tests, resource limits, tight deadlines, and evolving codebases leading to challenges in their testing environment. + +{PRODUCT}’s composable subsets give you full control to navigate these problems – build your own test subset by using tools like optimization targets, sorting rules, and filters to control which tests you would like to run – all in a single command! + +.Subset formation flow +image::ROOT:smart-subset-formation-flow.png[Subset formation flow,role="screenshot"] + +Here are a few possible usage methods, backed by scenarios you may relate with: + +== How to make nightly test runs more slim without missing any regressions + +You want to ensure code coverage and run the entire test suite, but each run takes up too much time and the feedback to developers is late. + +With smart subsetting, you could compose a subset that takes significantly lesser time to run. At the same time you can make sure that a mix of most likely to fail tests and the ones which haven’t been run at all, are selected. + +Here’s how you would write the CLI command: + +[source,shell] +---- +smart-tests subset --goal-spec "select(timePercentage=25%),sortByNotRecentlySelected()" +---- + +To ensure 100% coverage, you would run this subset 3 times. (That’s possible now due to the time savings from the model’s smart subsets!) + +[NOTE] +==== +This example uses 25%, but the optimal percentage and frequency can vary by team. To ensure full test suite coverage, run the subset multiple times (e.g., three runs of 33% each or two runs of 50%), based on your chosen subset size. +==== + +.Rotate and optimize coverage +image::ROOT:illustration-rotate-optimize-coverage.png[Rotate and optimize coverage,role="screenshot"] + +== Ignore flakes: Reduce noise by ignoring flaky tests + +Flaky tests may be causing frequent failures leading to too much noise. This can cause delays and be frustrating for your team. However, removing these tests from the suite altogether is also not an option. + +With smart subsetting, you could compose a subset that ignores all tests that are flaky beyond a certain threshold, while still catching failures up to a high confidence level. + +Here’s how you would write the CLI command: + +[source,shell] +---- +smart-tests subset --goal-spec "dropFlakyTests(score=0.5),select(confidence=95%)" +---- + +.Ignore flakes and catch failures +image::ROOT:illustration-ignore-flakes-1.png[Ignore flakes and catch failures,role="screenshot"] + +== How to combine rule-based static test selection and AI-based dynamic test selection + +To prepare for release, it may be critical for your team to always run a set of pre-defined tests to ensure they are not failing. Along with that, you want to shorten the suite’s run time too. + +With smart subsetting, you could compose a subset which always includes tests provided by attaching a test prioritization file as a part of the command. + +[source,shell] +---- +smart-tests subset --goal-spec "prioritizeByTestMapping(),select(confidence=80%)" +---- + +[NOTE] +==== +To use this feature, specify the `--prioritized-tests-mapping` option. +==== + +.Full test suite vs. whitelist mapped tests +image::ROOT:illustion-run-whitelisted-tests-1.png[Full test suite vs. whitelist mapped tests,role="screenshot"] + +== Run recently failed tests: Prioritize tests that have failed recently + +Tests that have failed in recent runs point to unstable or problematic areas in your codebase. You want to ensure code quality, but also reduce the time it takes to run the entire suite. + +With smart subsetting, you could compose subsets which prioritize recently failed tests to be re-run promptly in upcoming test runs, while shortening the test run time. + +[source,shell] +---- +smart-tests subset --goal-spec "prioritizeRecentlyFailed(time=24h),select(timePercentage=50%)" +---- + +.Run recently failed tests +image::ROOT:illustration-run-recently-failed-tests.png[Run recently failed tests,role="screenshot"] + +{PRODUCT}’s composable mechanism enables you to address these situations and more by defining custom sequences of operations to build your subsets. + +== Why this matters for your team? + +1. Cut costs and save on cloud resources. +2. Shorter feedback loops for developers. +3. No need to make trade-offs: Get both speed and test coverage while reducing costs and enhancing developer productivity. + diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc new file mode 100644 index 000000000..dc2177550 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc @@ -0,0 +1,19 @@ += Request and run a subset of tests +:slug: request-and-run-a-subset-of-tests + +Once you've started, you can connect your test runner with {PRODUCT} to request and run a subset of tests selected using Predictive Test Selection. + +The diagram below illustrates the interactions between your tools, the {PRODUCT} CLI, and the {PRODUCT} platform: + +.Subsetting diagram +image::ROOT:subsetting-diagram-2x.png[Subsetting diagram,role="screenshot"] + +First, see xref:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choose a subset optimization target] . + +Then, follow the instructions to start subsetting in your pipeline: + +* xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-test-runner-integrations.adoc[Subset with test runner integrations] +** Supports *nose*. + +* xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI] +** Supports *Android Debug Bridge*, *Ant*, *Bazel*, *Behave*, *CTest*, *cucumber*, *Cypress*, *GoogleTest*, *Go Test*, *Gradle*, *Jest*, *Karma*, *Maven*, *minitest*, *pytest*, *Robot*, *Rspec*, and other/custom test runners. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/combine-with-rule-based-test-selection.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/combine-with-rule-based-test-selection.adoc new file mode 100644 index 000000000..a5e8cc567 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/combine-with-rule-based-test-selection.adoc @@ -0,0 +1,49 @@ += Combine with rule-based test selection +:slug: combine-with-rule-based-test-selection + +If you have an existing test selection mechanism or other rule-based test selection mechanisms of the following pattern, you can combine them with {PRODUCT}'s ML-based predictive test selection. + +This usually results in sub-optimal performance when finding more failures from test runs, but this can improve psychological safety by making the algorithm more “explainable;" there’s a clear, unambiguous link between committed changes and selected tests. + +== Create a rule definition file + +Using a rule definition file that maps source directories to tests, you can provide rules to prioritize tests. For example, the following rule snippet says any change under `src/payment` should result in `payment_test.py` and `new_payment_test.py` to be selected automatically: + +[source] +---- +{ ... + { + "src/payment" : ["file=test/payment_test.py", "file=test/new_payment_test.py"] + } +} +---- + +The full format of this file is as follows: + +[source] +---- +{ + "format": "prioritized-tests-v1", + "mappings": { + $REPO_NAME: { + $DIRECTORY: [$TEST_PATH, ...], + ... // repeatable + }, + ... // repeatable + } +} +---- + +* `$REPO_NAME` refers to the repository name given in `smart-tests record build --source REPO_NAME=DIR` . That’s how the CLI can match the information in the mapping file to what has changed. +* `$DIRECTORY` refers to the relative path within the enclosing repository. +* `$TEST_PATH` is the name of a test represented in {PRODUCT}’s internal test path notation. You can use xref:resources:cli-reference.adoc[the inspect tests command] to see the test paths for your recorded tests. + +== Use the rule definition file + +Pass this definition file through the `--prioritized-tests-mapping` option when making a `smart-tests subset` invocation, as follows: + +`smart-tests subset --prioritized-tests-mapping foo/bar/test-mapping.json ...` + +== Behaviors of subsetting + +Tests selected via mapping rules *are always prioritized first* before {PRODUCT} starts selecting tests based on the estimated likelihood of failures. If the specified optimization target (e.g., `--duration` ) allows for more tests to be selected, then {PRODUCT} will add tests it estimates will be most effective. On the other hand, if tests selected via mapping rules are already too big to fit the goal, some of those tests will be dropped to create a subset that meets the goal. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/replace-static-parallel-suites-dynamic-parallel-subset.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/replace-static-parallel-suites-dynamic-parallel-subset.adoc new file mode 100644 index 000000000..b558214c0 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/replace-static-parallel-suites-dynamic-parallel-subset.adoc @@ -0,0 +1,112 @@ += Replacing static parallel suites with a dynamic parallel subset +:slug: replacing-static-parallel-suites-dynamic-parallel-subset + +== Replacing static parallel suites with a dynamic parallel subset + +Some teams manually split their test suites into several "bins" to run them in parallel. This presents a challenge in adopting {PRODUCT} because you don't want to lose the benefit of parallelization. + +With *split subsets* , you can replace your manually selected bins with automatically populated bins from a {PRODUCT} subset. + +For example, let's say you currently run ~80 minutes of tests split coarsely into four bins and run in parallel across four workers: + +* Worker 1: ~20 minutes of tests +* Worker 2: ~15 minutes of tests +* Worker 3: ~20 minutes of tests +* Worker 4: ~25 minutes of tests + +With a split subset, you can generate a subset of the full 80 minutes of tests and then call {PRODUCT} once in each worker to get the bin of tests for that runner. + +The high-level flow is: + +. Request a subset of tests to run from {PRODUCT} by running `smart-tests subset` with the `--split` option. Instead of outputting a list of tests, the command will output a subset ID that you should save and pass into each runner. +. Start up your parallel test worker, e.g., four runners from the example above +. Request the bin of tests that the worker should run. To do this, run `smart-tests split-subset` with: . the `--subset-id` option set to the ID you saved earlier, and +. the `--bin` value set to `bin-number/bin-count` . +. If you're using xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] , add the `--output-exclusion-rules` option. +. Run the tests on each worker. +. After each run finishes in each worker, record test results using `smart-tests record tests` with the `--subset-id` option set to the ID you saved earlier. + +In pseudocode: + +[source] +---- +# main +$ smart-tests record build --build $BUILD_ID --source src=. +$ smart-tests record session --build $BUILD_ID --session $SESSION_ID +$ smart-tests subset --split --confidence 90% --build $BUILD_ID --session $SESSION_ID bazel . +subset/12345 + +... + +# worker 1 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 1/3 --rest rest.txt > subset.txt +$ bazel test $(cat subset.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . + + +# worker 2 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 2/3 --rest rest.txt > subset.txt +$ bazel test $(cat subset.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . + +# worker 3 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 3/3 --rest rest.txt > subset.txt +$ bazel test $(cat subset.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . +---- + +=== [Beta; Gradle only] Dynamic parallel subset with same bin option + +Even though `smart-tests split-subset` offers evenly split subset bins, there are some cases where certain tests should not run concurrently. You want to use `split-subset` to split a subset into multiple bins and want certain tests to belong to the same bin to avoid simultaneous execution. + +E.g., TestA and TestB use the same record in a database, and their concurrent access may cause the tests to fail. + +We provide the `--same-bin ` option to avoid running these tests simultaneously. By adding `--same-bin ` option to `split-subset` , the test cases listed in the `` will be placed in the same bin. + +In pseudocode: + +[source] +---- +# main +$ smart-tests subset --target 90% --build BUILD_ID --session $SESSION_ID --split gradle src/test/java +subset/12345 +Your model is currently in training +{PRODUCT} created subset 12345 for build test (test session 12345) in workspace launchableinc/mothership + +| | Candidates | Estimated duration (%) | Estimated duration (min) | +|-----------|--------------|--------------------------|----------------------------| +| Subset | 7 | 77.7778 | 0.000116667 | +| Remainder | 2 | 22.2222 | 3.33333e-05 | +| | | | | +| Total | 9 | 100 | 0.00015 | + +Run `smart-tests inspect subset --subset-id 12345` to view full subset details + +--- + +# worker 1 +$ cat same_bin0.txt +example.DB0Test +example.DB1Test + +$ smart-tests split-subset \ + --subset-id subset/12345 \ + --bin 1/2 \ + --same-bin same_bin0.txt \ + gradle +--tests example.DB0Test --tests example.DB1Test --tests example.MulTest --tests example.DivTest + +--- + +# worker 2 +$ cat same_bin0.txt +example.DB0Test +example.DB1Test + +$ smart-tests split-subset \ + --subset-id subset/12345 \ + --bin 2/2 \ + --same-bin same_bin0.txt \ + gradle +--tests example.AddTest --tests example.SubTest --tests example.PowTest +---- \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc new file mode 100644 index 000000000..984c982cd --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc @@ -0,0 +1,1324 @@ +include::ROOT:partial$abbr.adoc[] += Subset with the {PRODUCT} CLI +:slug: subset-with-the-smart-tests-cli + +== Overview + +Use `smart-tests subset` to request a subset of tests from {PRODUCT}. Then run this command before your standard test runner command. It generates a list of tests that you can pass into your test runner to run. + +If you want to test out subset behavior before running in production, refer to xref:predictive-test-selection/observe-subset-behavior.adoc[Observing subset behavior]. Also, refer to xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] for an alternative subsetting interface that is useful in some scenarios. + +[[options]] +== Options + +[WARNING] +-- +*Read this section first!* +-- + +`smart-tests subset` takes various options: + +* high-level options +* test runner +* test runner options + +`smart-tests subset ` + +`` is always a string representing the test runner in use, e.g., `maven` , `ant` , etc. + +For brevity, the examples below do not include all high-level options, so read this section and the Subset section of the xref:resources:cli-reference.adoc[CLI reference] before you continue. Test runner options are listed in each section. + +=== Required options + +==== Optimization target + +At a minimum, you *must* specify an optimization target option, either + +* `--confidence` +* `--time` +* `--target` + +See xref:predictive-test-selection/request-and-run-a-subset-of-tests/choose-a-subset-optimization-target/choose-a-subset-optimization-target.adoc[Choosing a subset optimization target] for more information. + +==== Build or session identifier + +The examples below include the high-level `--build ` option, used for specifying the build for which to request test recommendations. This is the same build that you already created to record tests. The subset command goes in between these. + +*Before subsetting (simplified)* + +[source] +---- +# build process +smart-tests record build --build $BUILD_NAME + + +# test process +smart-tests record session --build $BUILD_NAME --session $SESSION_NAME + +smart-tests record tests --build $BUILD_NAME --session $SESSION_NAME +---- + +*After subsetting (simplified)* + +[source] +---- +# build process +smart-tests record build --build $BUILD_NAME + + +# test process +smart-tests record session --build $BUILD_NAME --session $SESSION_NAME +smart-tests subset --build $BUILD_NAME --session $SESSION_NAME # and related commands + +smart-tests record tests --build $BUILD_NAME --session $SESSION_NAME +---- + +However, if you are generating a test session manually you want to use `--session` instead of `--build`. For more information, refer to xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts]. + +== Instructions for test runners/build tools + +[NOTE] +-- +If not using any of the following runners or tools, use the xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc[`file` profile for unsupported test runners], the xref:resources:integrations/raw.adoc[`raw` profile for custom test runners], or mailto:support@launchableinc.com[request a plugin]. +-- + +=== Android Compatibility Test Suite (CTS) + +[NOTE] +-- +This profile only supports xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting]. See that page for instructions. +-- + +=== Android Debug Bridge (ADB) + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list into `adb` to run. + +==== Requesting a subset of tests + +Find the `adb` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `adb` command you normally use to run tests and add the `-e log true` option. Then, output the result to a text file. For example: + +`adb shell am instrument -e log true com.yourdomain.test/androidx.test.runner.AndroidJUnitRunner > test_list.txt` + +This command outputs the full list of tests that would normally run (without actually running them) to a file called `test_list.txt` + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset adb --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test classes formatted for passing into your normal `adb` command, shown next. + +==== Running a subset of tests + +Now you can run only the subset of tests by adding the `-e class $(cat smart-tests-subset.txt)` option to your standard `adb` command, like this: + +`adb shell am instrument -e class $(cat smart-tests-subset.txt) com.yourdomain.test/androidx.test.runner.AndroidJUnitRunner` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +adb shell am instrument com.yourdomain.test/androidx.test.runner.AndroidJUnitRunner +---- + +And the flow after: + +[source] +---- +# generate the complete list of tests in your suite +adb shell am instrument -e log true com.yourdomain.test/androidx.test.runner.AndroidJUnitRunner > test_list.txt +# request a subset from the full list +cat test_list.txt | smart-tests subset adb --build --session > smart-tests-subset.txt +# run the results of the subset request +adb shell am instrument -e class $(cat smart-tests-subset.txt) com.yourdomain.test/androidx.test.runner.AndroidJUnitRunner +---- + +=== Ant + +First, you'll request a subset of tests from your complete suite. Then, you'll pass this list into your `build.xml` file to limit what Ant runs. + +==== Requesting a subset of tests + +First, find the `ant` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset ant --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Set `` to the path(s) containing your test files. The CLI will look in those path(s) and generate the full list of tests that would normally run. The subset service divides this whole list into a subset and a remainder. + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test classes formatted for passing into your `build.xml` file, shown next. + +==== Running a subset of tests + +Separately, update your `build.xml` file to use `smart-tests-subset.txt` : + +[source] +---- + + … + + + + + + + + + + + + + + + + + + + + + + + … + +---- + +Finally, you run tests command as normal, such as: + +`ant junit ` + +=== Bazel + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list to Bazel to run. + +==== Requesting a subset of tests + +Find the `bazel` command used to run tests in your CI script. These commands will go _before_ that command. + +First, run `bazel query` and output the result to a text file. For example: + +`bazel query 'tests(//...)' > test_list.txt` + +This command outputs the complete list of test targets that typically run (without running them) to a file called `test_list.txt` . The subset service will divide this list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset bazel --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` that you can pass into Bazel. + +==== Running a subset of tests + +AppendYour the list of tests to run to your existing command, such as: + +`bazel test $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your standard command to run tests looks something like this +bazel test +---- + +And the flow after: + +[source] +---- +# generate the full list +bazel query 'tests(//...)' > test_list.txt +# request a subset +cat test_list.txt | smart-tests subset bazel --build --session > smart-tests-subset.txt +# run the results of the subset request +bazel test $(cat smart-tests-subset.txt) +---- + +=== Behave + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list to Behave to run. + +==== Requesting a subset of tests + +Find the `behave` command used to run tests in your CI script. These commands will go _before_ that command. + +First, run `find ./features/` (or a similar command for your environment) and output the result to a text file. For example: + +`find ./features/ > test_list.txt` + +This command writes the list of test files that typically run (without actually running them) to `test_list.txt` . The subset service will divide this list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset behave --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` that you can pass into Behave. + +==== Running a subset of tests + +To run a subset, run `behave` with the `-i` option and pass in the subset list. For example: + +`behave -i "$(cat smart-tests-subset.txt)"` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +behave +---- + +And the flow after: + +[source] +---- +# generate the full list +find ./features/ > test_list.txt +# request a subset +cat test_list.txt | smart-tests subset behave --build --session > smart-tests-subset.txt +# run the results of the subset request +behave -i "$(cat smart-tests-subset.txt)" +---- + +=== CTest + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list into CTest to run. + +==== Requesting a subset of tests + +Find the `ctest` command used to run tests in your CI script. These commands will go _before_ that command. + +First, run `ctest` with the `--show-only` option and output the result to a JSON file. For example: + +`ctest --show-only=json-v1 > test_list.json` + +This command creates the complete list of test files that typically run (without actually running them) to a file called `test_list.json` . The subset service will divide this list into a subset and a remainder list. + +Next, pass the file you just created into `smart-tests subset` to request a subset from the full list. + +`smart-tests subset ctest --build --session --confidence --output-regex-files --output-regex-files-dir=subsets test_list.json` + +* See #options[#options] for setting `` and `` . +* The `--output-regex-files` instructs CLI to write the regular expression for the subset tests into the directory specified in `--output-regex-files-dir` . + +This creates files under the `subsets` directory. `subset_N` are the files that contain regular expressions of the chosen subset of tests. If you use the `--rest` option, `rest_N` will contain the non-chosen tests. + +==== Running a subset of tests + +Then, run `ctest` for each subset output file: + +[source] +---- +for file in subset/subset_*; do + ctest -T test --no-compress-output -R "$(cat "$file")" +done +---- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +ctest -T test --no-compress-output +---- + +And the flow after: + +[source] +---- +# generate the full list that would normally run +ctest --show-only=json-v1 > test_list.json +# request a subset +smart-tests subset ctest --build --session --confidence --output-regex-files --output-regex-files-dir=subsets test_list.json +# run the results of the subset request +for file in subset/subset_*; do + ctest -T test --no-compress-output -R "$(cat "$file")" +done +---- + +=== cucumber + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list to cucumber to run. + +==== Requesting a subset of tests + +First, find the `bundle exec cucumber` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset cucumber --build --session --base $(pwd) > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Don't forget the `--base $(pwd)` option (or equivalent) before `cucumber` . +* Set `` to the glob expression representing your `.feature` files, e.g., `features/**/*.feature` . The CLI will look in those path(s) and generate the complete list of tests that would typically run. The subset service divides this list into a subset and a remainder list. + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test files formatted for passing into cucumber, shown next. + +==== Running a subset of tests + +Append the subset list to your `bundle exec cucumber` command to run a subset. For example: + +`bundle exec cucumber -f junit -o reports $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +bundle exec cucumber -f junit -o reports +---- + +And the flow after: + +[source] +---- +# request a subset from all features that would typically run +smart-tests subset cucumber --build --session > smart-tests-subset.txt +# run the results of the subset request +bundle exec cucumber -f junit -o reports $(cat smart-tests-subset.txt) +---- + +=== Cypress + +Find the `cypress run` command used to run tests in your CI script. These commands will go _before_ that command. + +First, run find `./cypress/integration -type f` (or a similar command for your platform) and output the result to a text file. For example: + +`find ./cypress/integration -type f > test_list.txt` + +This command writes the complete list of test files that typically run (without actually running them) to `test_list.txt` . The subset service will divide this list into a subset and a remainder list. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset cypress --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` that you can pass into Cypress. + +==== Running a subset of tests + +To run a subset, use the `--spec` option with the subset list text file. For example: + +`cypress run --reporter junit --spec "$(cat smart-tests-subset.txt)"` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +cypress run --reporter junit +---- + +And the flow after: + +[source] +---- +# generate the complete list that would typically run +find ./cypress/integration -type f > test_list.txt +# request a subset from all features that would typically run +cat test_list.txt | smart-tests subset cypress --build --session > smart-tests-subset.txt +# run the results of the subset request +cypress run --reporter junit --spec "$(cat smart-tests-subset.txt)" +---- + +=== dotnet test + +[NOTE] +-- +This profile only supports xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] . See that page for instructions. +-- + +[[flutter-subset]] +=== Flutter + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list into Flutter to run. + +==== Requesting a subset of tests + +First, find the `flutter test` command used to run tests in your CI script. + +[source,bash] +---- +smart-tests subset --build flutter +---- + +==== Running a subset of tests + +[source,bash] +---- +flutter test $(cat smart-tests-subset.txt) --machine > report.json +---- + +* See <> for setting `` and ``. + +==== Summary + +In summary, here's the flow before: + +[source,bash] +---- +flutter test --machine > report.json +---- + +And the flow after: + +[source,bash] +---- +# request a subset +smart-tests subset --build flutter test/**/*.dart + +# run the results of the subset request +flutter run $(cat smart-tests-subset.txt) --machine > report.json +---- + +=== GoogleTest + +Find the GoogleTest command used to run tests in your CI script. These commands will go _before_ that command. + +First, invoke GoogleTest with the `--gtest_list_tests` option and output the result to a text file. For example: + +`./my-test --gtest_list_tests > test_list.txt` + +This command outputs the complete list of tests that normally run (without running them) to a file called `test_list.txt` . The subset service will divide this list into a subset and a remainder list. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset googletest --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` that you can pass into GoogleTest. + +==== Running a subset of tests + +Add the `--gtest_filter` option to your existing command, such as: + +`./my-test --gtest_filter="$(cat smart-tests-subset.txt)"` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +./my-test +---- + +And the flow after: + +[source] +---- +# generate the full list +./my-test --gtest_list_tests > test_list.txt +# request a subset +cat test_list.txt | smart-tests subset googletest --build --session > smart-tests-subset.txt +# run the results of the subset request +./my-test --gtest_filter="$(cat smart-tests-subset.txt)" +---- + +=== Go Test + +Find the `go test` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `go test` command you normally use to run tests and add the `-list` option. Then, output the result to a text file. For example: + +`go test -list="Test|Example" ./... > test_list.txt` + +This command outputs the complete list of tests that normally run (without running them) to a file called `test_list.txt` . The subset service will divide this full list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset go-test --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` that you can pass into `go test` . + +==== Running a subset of tests + +Add the `-run` option to your existing command, such as: + +`go test -run $(cat smart-tests-subset.txt) ./... | go-junit-report > report.xml` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +go test ./... +---- + +And the flow after: + +[source] +---- +# generate the full list +go test -list="Test|Example" . ./... > test_list.txt +# request a subset +cat test_list.txt | smart-tests subset go-test --build --session > smart-tests-subset.txt +# run the results of the subset request +go test -run $(cat smart-tests-subset.txt) ./... | go-junit-report > report.xml +---- + +=== Gradle + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list to Gradle. + +==== Requesting a subset of tests + +First, find the `gradle` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset gradle --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Set `` to the path(s) containing your test files, e.g., `project1/src/test/java project2/src/test/java` . The CLI will look in those path(s) and generate the full list of tests that would normally run. The subset service divides this full list into a subset and a remainder. + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test classes formatted for passing into Gradle, like this: + +`--tests MyTestClass1 --tests MyTestClass2 ...` + +==== Running a subset of tests + +Then simply pass this file into your existing command, like shown below. + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| Gradle +| Gradle plugin for Android + +a| [source] +---- +gradle test $(cat smart-tests-subset.txt) +# equivalent to gradle test --tests MyTestClass1 --tests MyTestClass2 ... +---- + +a| The *Gradle plugin for Android* requires a different command, because the built-in `test` task does not support the `--tests` option. Use `testDebugUnitTest` or `testReleaseUnitTest` instead: + +[source] +---- +./gradlew testDebugUnitTest $(cat smart-tests-subset.txt) +# or +./gradlew testReleaseUnitTest $(cat smart-tests-subset.txt) +---- + +|=== + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +gradle test +---- + +And the flow after: + +[source] +---- +# request a subset from all tests +smart-tests subset gradle --build --session > smart-tests-subset.txt +# run the results of the subset request +gradle test $(cat smart-tests-subset.txt) +---- + +=== Gradle + TestNG + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list to Gradle. + +==== Requesting a subset of tests + +First, find the `gradle` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset gradle --build --session --bare > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Set `` to the path(s) containing your test files, e.g. `project1/src/test/java project2/src/test/java` . The CLI will look in those path(s) and generate the full list of tests that would normally run. The subset service divides this full list into a subset and a remainder. +* Don't forget the `--bare` option after `gradle` ! + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test classes formatted for passing into Gradle, like this: + +[source] +---- +com.example.FooTest +com.example.BarTest +... +---- + +==== Running a subset of tests + +First, you need to add a dependency declaration to `build.gradle` so that the right subset of tests get executed when TestNG runs: + +[source] +---- +dependencies { + ... + testRuntime 'com.launchableinc:launchable-testng:1.2.1' +} +---- + +Then simply export the subset file path as an environment variable before you run `gradle test` , like shown below. + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| Gradle +| Gradle plugin for Android + +a| [source] +---- +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +gradle test +---- + +a| The *Gradle plugin for Android* requires a different command, because the built-in `test` task does not support the `--tests` option. Use `testDebugUnitTest` or `testReleaseUnitTest` instead: + +[source] +---- +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +./gradlew testDebugUnitTest $(cat smart-tests-subset.txt) +# or +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +./gradlew testReleaseUnitTest $(cat smart-tests-subset.txt) +---- + +|=== + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +gradle test +---- + +And the flow after: + +[source] +---- +# request a subset from all tests +smart-tests subset gradle --build --session --bare > smart-tests-subset.txt +# run the results of the subset request using the `launchable-testng` plugin +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +gradle test +---- + +=== Jest + +Find the `jest` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `jest` command you normally use to run tests and add the `--listTests` option. Then, output the result to a text file. For example: + +`jest --listTests > test_list.txt` + +This command creates the full list of test files that would normally run (without actually running them) to a file called `test_list.txt` . The subset service will divide this full list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset jest --build --session --base $(pwd) > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Don't forget the `--base $(pwd)` option before `cypress` . + +This creates a file called `smart-tests-subset.txt` that you can pass into Jest. + +==== Running a subset of tests + +To run the subset, include the subset list after `jest` . For example: + +`jest $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +jest +---- + +And the flow after: + +[source] +---- +# generate the full list that would normally run +jest --listTests > test_list.txt +# request a subset from all features that would normally run +cat test_list.txt | smart-tests subset jest --build --session --base $(pwd) > smart-tests-subset.txt +# run the results of the subset request +jest $(cat smart-tests-subset.txt) +---- + +=== Karma + +Before you invoke your tests with Karma, run the `subset karma` command, which expects a list of test files as input, for example: + +[source,bash] +---- +find src -name "*.spec.ts" -o -name "*.spec.js" | \ +smart-tests subset --session karma > subset.txt +---- + +* See link:#options[options] for setting `` and ``. + +`subset.txt` will contain a list of test files that are selected as a subset, which you then pass to the test runner invocation. If you run Karma via `ng test`, you can use the `--with` ng option so that the output will be in the form of `--include path/to/some.spec.ts`, which is expected by `ng test`. + +[source,bash] +---- +find ... | smart-tests ... karma --with ng > subset.txt +ng test $(subset.txt) +---- + +=== Maven + +Find the `mvn test` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `mvn test` command you normally use to run tests, but change `test` to `test-compile` . For example: + +`mvn test-compile ` + +This command creates `.lst` files that list the test classes that would normally run (without running them). The subset service will combine these and divide this full list into a subset and a remainder. + +Next, run `smart-tests subset` to request a subset from the full list. + +`smart-tests subset maven --build --session --test-compile-created-file <(find . -path '*/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst' -exec cat {} \;) > smart-tests-subset.txt` + +* See #options[#options] for how to set `` and `` . +* The `<(find...` section combines the `.lst` files across your projects into a single file for processing. You might need to change this for your platform. + +This creates a file called `smart-tests-subset.txt` that you can pass into Maven. + +==== Running a subset of tests + +To run the subset, use the `-Dsurefire.includesFile` option. For example: + +`mvn test -Dsurefire.includesFile=$PWD/smart-tests-subset.txt` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +mvn test +---- + +And the flow after: + +[source] +---- +# generate the full list(s) that would normally run +mvn test-compile +# request a subset from all features that would normally run +smart-tests subset maven --build --session --test-compile-created-file <(find . -path '*/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst' -exec cat {} \;) > smart-tests-subset.txt +# run the results of the subset request +mvn test -Dsurefire.includesFile=$PWD/smart-tests-subset.txt +---- + +=== Maven + TestNG + +Find the `mvn test` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `mvn test` command you normally use to run tests, but change `test` to `test-compile` . For example: + +`mvn test-compile ` + +This command creates `.lst` files that list the test classes that would normally run (without running them). The subset service will combine these and divide this full list into a subset and a remainder. + +Next, run `smart-tests subset` to request a subset from the full list. + +`smart-tests subset maven --build --session --test-compile-created-file <(find . -path '*/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst' -exec cat {} \;) > smart-tests-subset.txt` + +* See #options[#options] for how to set `` and `` . +* The `<(find...` section combines the `.lst` files across your projects into a single file for processing. + +This creates a file called `smart-tests-subset.txt` that you can pass into Maven. + +==== Running a subset of tests + +First, modify your `pom.xml` so that it includes {PRODUCT} TestNG integration as a test scope dependency: + +[source] +---- + + com.launchableinc + launchable-testng + 1.2.1 + test + +---- + +Then simply export the subset file path as an environment variable before you run `mvn test` , like shown below. + +[source] +---- +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +mvn test +---- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +mvn test +---- + +And the flow after: + +[source] +---- +# generate the full list of tests +mvn test-compile +# request a subset from all tests +smart-tests subset maven --build --session --test-compile-created-file <(find . -path '*/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst' -exec cat {} \;) > smart-tests-subset.txt +# run the results of the subset request using the `launchable-testng` plugin +export SMART_TESTS_SUBSET_FILE_PATH=$PWD/smart-tests-subset.txt +mvn test +---- + +=== minitest + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list into minitest to run. + +==== Requesting a subset of tests + +First, find the `bundle exec rails test` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset minitest --build --session > smart-tests-subset.txt` + +* See #options[#options] for how to set `` and `` . +* Set `` to the glob expression representing your `.rb` test files, e.g. `test/**/*.rb` . The CLI will look in those path(s) and generate the full list of tests that would normally run. The subset service divides this full list into a subset and a remainder. + +This creates a file called `smart-tests-subset.txt` . This file contains a list of tests formatted for passing into minitest. + +==== Running a subset of tests + +To run a subset, pass the subset list into `bundle exec rails test` . For example: + +`bundle exec rails test $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +bundle exec rails test +---- + +And the flow after: + +[source] +---- +# request a subset of your existing test suite +smart-tests subset minitest --build --session > smart-tests-subset.txt +# run the results of the subset request +bundle exec rails test $(cat smart-tests-subset.txt) +---- + +=== NUnit Console Runner + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list into `nunit3-console` to run. + +==== Requesting a subset of tests + +Find the `nunit3-console` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `nunit3-console` command you normally use to run tests, and add the `--explore` option. For example: + +`nunit3-console --explore=test_list.xml path/to/myassembly.dll` + +This command writes the full list of tests that normally run (without running them) to `test_list.xml` . + +Next, pass the file you just created into `smart-tests subset` to request a subset from the full list. + +`smart-tests subset nunit --build --session test_list.xml > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` . This file contains a list of test classes formatted for passing into your normal `adb` command, shown next. + +Note: If you want to subset tests across multiple DLLs (for example, if multiple DLLs are combined into a logical 'suite'), you can run `nunit3-console --explore...` once for each DLL, then pass all the files into `smart-tests subset` , such as: + +[source] +---- +nunit3-console --explore=myassembly1.xml path/to/myassembly1.dll +nunit3-console --explore=myassembly2.xml path/to/myassembly2.dll +nunit3-console --explore=myassembly3.xml path/to/myassembly3.dll + +smart-tests subset nunit --build --session myassembly1.xml myassembly2.xml myassembly3.xml > smart-tests-subset.txt +---- + +==== Running a subset of tests + +Now you can run only the subset of tests by adding the `--testlist` option to your normal `nunit3-console` command, like this: + +`nunit3-console --testlist=smart-tests-subset.txt path/to/myassembly.dll [path/to/myassembly2.dll] [path/to/myassembly3.dll]` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +nunit3-console path/to/myassembly.dll +---- + +And the flow after: + +[source] +---- +# generate the full list of tests in your suite +nunit3-console --explore=test_list.xml path/to/myassembly.dll +# request a subset from the full list +smart-tests subset nunit --build --session test_list.xml > smart-tests-subset.txt +# run the results of the subset request +nunit3-console --testlist=smart-tests-subset.txt path/to/myassembly.dll [path/to/myassembly2.dll] [path/to/myassembly3.dll] +---- + +=== prove for Perl + +Find the `prove` command used to run tests in your CI script. These commands will go _before_ that command. + +First, pipe the test files you have into `smart-tests subset` to request a subset from the full list. + +[source] +---- +# Assuming your test directory is `./t`. +find ./t -name '*.t' | smart-tests subset prove --build --session > smart-tests-subset.txt +---- + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` . This file contains a list of tests formatted for passing into your normal `prove` command, shown next. + +==== Running a subset of tests + +Now you can run only the subset of tests by passing the `smart-tests-subset.txt` file into `prove` , like this: + +[source] +---- +# You must pass the environment variable JUNIT_NAME_MANGLE=none to generate the JUnit XML report in {PRODUCT}'s supported format. +export JUNIT_NAME_MANGLE=none +prove -Ilib --harness TAP::Harness::JUnit -r $(cat smart-tests-subset.txt) +---- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +prove -Ilib --harness TAP::Harness::JUnit -r t +---- + +And the flow after: + +[source] +---- +# request a subset from the full list +find ./t -name '*.t' | smart-tests subset prove --build --session > smart-tests-subset.txt +# run the results of the subset request +export JUNIT_NAME_MANGLE=none +prove -Ilib --harness TAP::Harness::JUnit -r $(cat smart-tests-subset.txt) +---- + +=== Playwright + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list to Playwright to run. + +==== Requesting a subset of tests + +Find the `playwright test` command used to run tests in your CI script. + +First, list the result in a text file. For example: + +`find tests/*.spec.ts > test_list.txt` + +This command outputs the complete list of test targets that typically run (without running them) to a file called `test_list.txt` . The subset service will divide this list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset playwright --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +==== Running a subset of tests + +Append your the list of tests to run to your existing command, such as: + +`playwright test $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +`playwright test ./tests` + +And the flow after: + +[source] +---- +# generate the test list +find ./tests/*.spec.ts > test_list.txt +# request a subset +cat test_list.txt | smart-tests subset playwright --build --session > smart-tests-subset.txt +# run the results of the subset request +playwright test $(cat smart-tests-subset.txt) +---- + +=== pytest + +Find the `pytest` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `pytest` command you normally use to run tests and add the `--collect-only` and `-q` options. Then output that to a file. For example: + +`pytest --collect-only -q > test_list.txt` + +This command writes the full list of tests that normally run (without running them) to `test_list.txt` . + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list. + +`cat test_list.txt | smart-tests subset pytest --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` . This file contains a list of tests formatted for passing into your normal `pytest` command, shown next. + +==== Running a subset of tests + +Now you can run only the subset of tests by passing the `smart-tests-subset.txt` file into `pytest` , like this: + +`pytest --junit-xml=test-results/subset.xml $(cat smart-tests-subset.txt)` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +pytest --junit-xml=test-results/subset.xml +---- + +And the flow after: + +[source] +---- +# generate the full list of tests in your suite +pytest --collect-only -q > test_list.txt +# request a subset from the full list +cat test_list.txt | smart-tests subset pytest --build --session > smart-tests-subset.txt +# run the results of the subset request +pytest --junit-xml=test-results/subset.xml $(cat smart-tests-subset.txt) +---- + +=== Robot + +Find the `robot` command used to run tests in your CI script. These commands will go _before_ that command. + +First, duplicate the `robot` command you normally use to run tests, and add the `--dryrun` and `-o` options. For example: + +`robot --dryrun -o test_list.xml` + +This command writes the full list of tests that normally run (without running them) to `test_list.xml` . + +Next, pass the file you just created into `smart-tests subset` to request a subset from the full list. + +`smart-tests subset robot --build --session test_list.xml > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . + +This creates a file called `smart-tests-subset.txt` . This file contains a list of tests formatted for passing into your normal `pytest` command, shown next. + +==== Running a subset of tests + +Now you can run only the subset of tests by passing the `smart-tests-subset.txt` file into `robot` , like this: + +`robot $(cat smart-tests-subset.txt) .` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +robot +---- + +And the flow after: + +[source] +---- +# generate the full list of tests in your suite +robot --dryrun -o test_list.xml +# request a subset from the full list +smart-tests subset robot --build --session test_list.xml > smart-tests-subset.txt +# run the results of the subset request +robot $(cat smart-tests-subset.txt) . +---- + +=== RSpec + +First, you'll request a subset of tests from your full test suite. Then, you'll pass this list into minitest to run. + +==== Requesting a subset of tests + +First, find the `bundle exec rspec` command used to run tests in your CI script. + +Before that command, add the `smart-tests subset` command to request a subset of tests from your full test suite: + +`smart-tests subset rspec --build --session > smart-tests-subset.txt` + +* See #options[#options] for setting `` and `` . +* Set `` to the glob expression representing your `.rb` test files, e.g., `spec/**/*_spec.rb` . The CLI will look in those path(s) and generate the full list of tests that would normally run. The subset service divides this full list into a subset and a remainder. + +This creates a file called `smart-tests-subset.txt` . This file contains a list of tests formatted for passing into RSpec. + +==== Running a subset of tests + +To run a subset, pass the subset list into `bundle exec rails test` . For example: + +`bundle exec rspec $(cat smart-tests-subset.txt) --format d --format RspecJunitFormatter --out rspec.xml ` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# Your normal command to run tests looks something like this +bundle exec rspec --format RspecJunitFormatter --out report/rspec.xml +---- + +And the flow after: + +[source] +---- +# request a subset of your existing test suite +smart-tests subset rspec --build --session > smart-tests-subset.txt +# run the results of the subset request +bundle exec rspec $(cat smart-tests-subset.txt) --format d --format RspecJunitFormatter --out rspec.xml +---- + +[[vitest-subset]] +=== Vitest + +First, you'll request a subset of tests from your entire test suite. Then, you'll pass this list to Vitest to run. + +==== Requesting a subset of tests + +Find the `vitest run test` command used to run tests in your CI script. + +First, duplicate the `vitest` command you normally use to run `list` command and add the `--filesOnly` option. Then, output the result to a text file. For example: + +[source,bash] +---- +vitest list --filesOnly > test_list.txt +---- + +This command outputs the complete list of test targets that would normally run (without actually running them) to a file called `test_list.txt`. +The subset service will divide this full list into a subset and a remainder. + +Next, pipe the file you just created into `smart-tests subset` to request a subset from the full list: + +[source,bash] +---- +cat test_list.txt | smart-tests subset --build +---- + +* See <> for setting `` and ``. + +This creates a file called `smart-tests-subset.txt` that you can pass into Vitest. + +==== Running a subset of tests + +To run the subset, include the subset list after `vitest`. For example: + +[source,bash] +---- +vitest run $(cat smart-tests-subset.txt) +---- + +==== Summary + +In summary, here's the flow before: + +[source,bash] +---- +# Your normal command to run tests looks something like this +vitest run test +---- + +And the flow after: + +[source,bash] +---- +# generate the full list that would normally run +vitest list --filesOnly > test_list.txt + +# request a subset from all features that would normally run +cat test_list.txt | smart-tests subset --build + +# run the results of the subset request +vitest run $(cat smart-tests-subset.txt) +---- + +[[xctest-subsetting]] +=== XCTest + +This profile only supports xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting]. +See that page for instructions. + + +== Other instructions + +If you're not using any of these, see xref:resources:integrations/raw.adoc[`raw` profile for custom test runners] or xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc[`file` profile for unsupported test runners] . + +== Checking for integration issues + +[NOTE] +-- +Coming soon! +-- diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc new file mode 100644 index 000000000..4001dec11 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc @@ -0,0 +1,184 @@ += Use groups to split subsets +:slug: use-groups-to-split-subsets + +It's common for teams to split up a large test suite into many smaller groups, often executed in parallel. + +Perhaps your pipeline tests multiple components or plugins, or maybe your framework forces this kind of organization (e.g., xref:resources:integrations/dotnet-test.adoc[dotnet test] organizes tests by `.dll` ). In any case, all the groups are tested together (comprising a single xref:concepts:test-session.adoc[Test Session] ), but each group has its own small test suite. + +To better support this scenario, xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] makes it possible to request a single "high level" subset across all components. + +Now, a new concept called *groups* intends to improve the usability of this approach: + +* First, you can now use `--split-by-group` to split the "high level" subset file into one file for each group, simplifying test distribution across groups. +* Second, when you use `--split-by-group` , the CLI writes a special file informing you which groups you can skip entirely, saving setup time + +[NOTE] +-- +Currently, only the *Maven* profile supports test groups, so this document uses instructions for Maven. +-- + +== Assign tests to groups + +Before you can use `--split-by-group` , you need to assign your tests to their respective groups. Each test can belong to one test group at a time. A group aligns with component/plugin/DLL -- whatever your organizational container is. + +You can assign a set of tests to a group by running `smart-tests record tests` with the `--group=[groupName]` option. + +This means you'll run smart-tests record tests ten times if you have ten groups. + +=== Example: Group assignment using the CLI + +For example, we have three components: A, B, and C. Each group has 5 test items. We'll assign each to groups. (Note the use of `--group` on each `smart-tests record tests` ) + +image::ROOT:groups-2-record-after.png[Assigning tests to 3 groups] + +For clarity, here are the commands: + +[source] +---- +# before building software +smart-tests record build \ + --name jenkins-build-123\ + [...other options] + +...[build steps]... + +# before running tests create a test session so we can collect all the results together +smart-tests record session \ + --build jenkins-build-123 \ + --session test-session-123 + + # componentA tests + ...[run componentA tests]... + smart-tests record tests \ + --build jenkins-build-123 \ + --session test-session-123 \ + --group=componentA \ + [...other options] \ + /path/to/componentA/results + + # componentB tests + ...[run componentB tests]... + smart-tests record tests \ + --build jenkins-build-123 \ + --session test-session-123 \ + --group=componentB \ + [...other options] \ + /path/to/componentB/results + + # componentB tests + ...[run componentC tests]... + smart-tests record tests \ + --build jenkins-build-123 \ + --session test-session-123 \ + --group=componentC \ + [...other options] \ + /path/to/componentC/results +---- + +[NOTE] +-- +The examples on this page describe a scenario with only 3 groups. This is just for illustrative purposes. In reality, this approach is for teams with lots of groups (e.g. 10+). +-- + +== Split subsets by group + +Once you've assigned your tests to groups, you can create a high-level subset and split it by group. This involves two commands run one after another in your main CI pipeline before you run any component pipelines: + +. `smart-tests subset` with the `--split` option added. This option modifies the command's output to return a subset ID string instead of the subset contents. You'll use this ID in the next command. * Include the `--get-tests-from-previous-sessions` and `--output-exclusion-rules` options. +* {PRODUCT} creates a "high-level" exclusion list in this step and stores it for retrieval in the next step. +. `smart-tests split-subset` with the `--split-by-group` option. This command outputs several files for your pipeline (see below). * The `--subset-id` option is also required. This uses the value from the previous command. +* In this step, {PRODUCT} splits the just-created exclusion list by group. + +=== Special output files + +When you run `smart-tests split-subset` with the `--split-by-group` and the `--output-exclusion-rules` option, the CLI creates several files: + +* `subset-groups.txt` * Since you used `--output-exclusion-rules` with `smart-tests subset` , this file contains a list of the groups you can skip entirely. +* `subset-[groupname].txt` (one file for each group) * Each file contains the normal subset output but only for that group's tests. You can pass these files into the test process for each group. +* Since you used `--output-exclusion-rules` with `smart-tests subset` , these files contain exclusion rules. You're supposed to *exclude* these tests. +* `subset-nogroup.txt` * This file contains tests that had no group assignment, if there are any. + +See the xref:resources:cli-reference.adoc[CLI reference] for additional options. + +==== Example: Split output by group using the CLI + +In this example, we'll continue the scenario from above. We have three groups, each with five tests. We've already assigned each test to its respective group. Now we want to use that. + +This diagram shows the flow. First, we create a subset from all the tests across all groups. Then we split those into groups. Note the special file `subset-groups.txt` , which shows us we can skip component B entirely. + +image::ROOT:zis-with-groups.png[] + +[NOTE] +-- +Note that the diagram shows the contents of `subset-component*.txt` as a list of classes. This is the output format for Maven. + +If you use a different test runner, your output might be different. Every test runner has its own exclusion syntax. +-- + +[source] +---- +# before building software +smart-tests record build \ + --name jenkins-build-123\ + [...other options] + +...[build steps]... + +# before running tests create a test session so we can collect all the results together +smart-tests record session \ + --build jenkins-build-123 \ + --session test-session-123 + +# create the server side subset +smart-tests subset \ + --build jenkins-build-123 \ + --session test-session-123 \ + --split \ + --get-tests-from-previous-sessions \ + --output-exclusion-rules > subset-id.txt + +# split that for use locally +smart-tests split-subset \ + --subset-id $(cat subset-id.txt) \ + --split-by-groups \ + --output-exclusion-rules \ + maven +---- + +At this stage, we have several files: + +* `subset-groups.txt` * `componentB` +* `subset-componentA.txt` * Exclusion rules for component A +* `subset-componentB.txt` * Exclusion rules for component B +* `subset-componentC.txt` * Exclusion rules for component C + +Because `subset-groups.txt` contains `componentB` , we can write a script to skip that group's test setup entirely. (How you do this depends on your setup. Need help? Let us know.) + +Finally, we pass each component's exclusion file into the remaining test processes for each group: + +* `subset-componentA.txt` gets passed into the test process for component A +* `subset-componentC.txt` gets passed into the test process for component C + +We follow the normal instructions for using an exclusion rule (see the documentation for your test runner) so that only those tests run. + +For example, here's a basic invocation of xref:resources:integrations/maven.adoc[Maven] for component A, complete with test recording at the end: + +[source] +---- +# component A +# run component A tests with {PRODUCT} exclusions +mvn test -Dsurefire.excludesFile=$PWD/subset-componentA.txt + +# record tests +smart-tests record tests \ + --session test-session-123 \ + --group=componentA \ + [...other options] \ + /path/to/componentA/results +---- + +As a result, we entirely eliminated component B (in this example) from the test process, saving time! + +==== A note about new tests + +Because component B was entirely skipped, if there are any *new* group B tests, they didn't get run. This is the tradeoff of saving the setup time. To mitigate this, you should have a full run scheduled later in your pipeline, so they'll run then. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc new file mode 100644 index 000000000..ee2e1dac0 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc @@ -0,0 +1,336 @@ += Zero Input Subsetting +:slug: zero-input-subsetting + +Normally, when you run `smart-tests subset` , the {PRODUCT} CLI gathers the full list of tests on the client side and submits it with the subset request. (Highlighted in gray) + +The subset request then returns a list of tests to *include* (i.e., run these tests): + +.Subsetting with the CLI +image::ROOT:subsetting-diagram-2x.png[Subsetting with the CLI,role="screenshot"] + +We've created a complementary approach called *Zero Input Subsetting* . With this approach, the CLI does not have to gather and submit the full list of tests. Instead, the server generates the full list of tests from the last two weeks of recorded sessions. To ensure new tests are run, the CLI outputs exclusion rules instead of inclusion rules. + +[WARNING] +-- +Zero Input Subsetting works better with some xref:concepts:test-session.adoc[Test Session] Layouts than others, so contact your Customer Success Manager before you start using this feature. We're here to help! +-- + +You can adopt this approach by adding two options to `smart-tests subset` : + +* `--get-tests-from-previous-sessions` , and +* `--output-exclusion-rules` + +The subset request then returns a list of tests to *exclude* (i.e., *don't* run these tests): + +.Subsetting using Zero Input Subsetting +image::ROOT:subset-exclude.png[Subsetting using Zero Input Subsetting,role="screenshot"] + +The following CLI profiles/integrations support Zero Input Subsetting: + +* #dotnet-test[#dotnet-test] +* #gradle[#gradle] +* #maven[#maven] +* xref:resources:integrations/raw.adoc[`raw` profile for custom test runners] + +https://www.launchableinc.com/support[Let us know] if you want to see support for another test runner! + +Also, see xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc[Using groups to split subsets] which expands this behavior. + +== Instructions for test runners/build tools + +=== Android Compatibility Test Suite (CTS) + +Find your `run cts` command in your CI script. These commands will go before that command. + +First, request an exclusion pattern: + +`smart-tests subset --get-tests-from-previous-sessions --output-exclusion-rules --build --session cts > smart-tests_exclusion_filter.txt` + +See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . + +Then pass that exclusion pattern into `run cts` using `xargs` : + +`cat ./smart-tests_exclusion_filter.txt | xargs ./tools/cts-tradefed run cts ` + +=== dotnet test + NUnit +// We'll keep "Launchable NUnit" here until we publish a renamed package +==== Adding Launchable NUnit integration as a dependency + +Your test project needs to depend on https://www.nuget.org/packages/Launchable.NUnit[Launchable NUnit integration] . Run the following command against your project: + +`dotnet add package Launchable.NUnit` + +You then need to add the assembly level attribute to activate this integration in your test project. Typically this goes into `AssemblyInfo.cs:` + +[source] +---- +// activate Launchable NUnit integration +[assembly: Launchable.NUnit.Launchable] +---- + +See https://github.com/launchableinc/nunit[https://github.com/launchableinc/nunit] for full source code. + +==== Request and execute subset + +Find your `dotnet test` command in your CI script. These commands will go before that command. + +First, request an exclusion pattern: + +`smart-tests subset --get-tests-from-previous-sessions --output-exclusion-rules --build --session dotnet --bare > smart-tests_exclusion.txt` + +See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . + +Then set the `SMART_TESTS_REST_FILE_PATH` enviroment variable to point this file, then run the tests like you normall do: + +[source] +---- +export SMART_TESTS_REST_FILE_PATH=$PWD/smart-tests_exclusion.txt +dotnet test +---- + +=== Gradle + +First, you'll request an exclusion list from your full test suite. Then, you'll pass this list to Gradle. + +==== Requesting an exclusion list + +First, you need to add a snippet to your Gradle config to enable test exclusion via the Gradle command line: + +[source] +---- +test { + if (project.hasProperty('excludeTests')) { + exclude project.property('excludeTests').split(',') + } +} +---- + +Then, find the `gradle` command used to run tests in your CI script. + +Before that command, run `smart-tests subset` to request an exclusion list. The subset and exclusion lists are generated from the union of tests recorded in the last two weeks. + +`smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules gradle > smart-tests-exclusion-list.txt` + +See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . + +This creates a file called `smart-tests-exclusion-list.txt` . This file contains a list of test classes formatted for passing into Gradle like this: + +`-PexcludeTests=com/example/FooTest.class,com/example/BarTest.class` + +==== Running a subset of tests + +Then pass this file into your existing command, like shown below. + +[source] +---- +gradle test $(cat smart-tests-exclusion-list.txt) +# equivalent to gradle test -PexcludeTests=com/example/FooTest.class,com/example/BarTest.class +---- + +Note: If the exclusion list is very large, it may be unable to specify it directly from the command. In that case, you can change the Gradle config to read from `smart-tests-exclusion-list.txt` . + +Change the Gradle config as follows: + +[source] +---- +test { + if (project.hasProperty('excludeTestsTxt')) { + exclude new File(project.property('excludeTestsTxt')).text.replaceFirst('-PexcludeTests=', '').trim().split(',') + } +} +---- + +Then, specify the exclusion tests file from the command. + +`gradle test -PexcludeTestsTxt=$PWD/smart-tests-exclusion-list.txt` + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +gradle test +---- + +And the flow after: + +[source] +---- +# request an exclusion list from all tests +smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules gradle > smart-tests-exclusion-list.txt +# run tests, excluding deprioritized tests, leaving only the recommended subset +gradle test $(cat smart-tests-exclusion-list.txt) +---- + +=== Gradle + TestNG + +First, you'll request an exclusion list from your full test suite. Then, you'll pass this list to Gradle. + +==== Requesting an exclusion list + +First, find the `gradle` command used to run tests in your CI script. + +Before that command, run `smart-tests subset` to request an exclusion list. The subset and exclusion lists are generated from the union of tests recorded in the last two weeks. + +`smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules gradle --bare > smart-tests-exclusion-list.txt` + +* See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . +* Don't forget the `--bare` option after `gradle` ! + +This creates a file called `smart-tests-exclusion-list.txt` . This file contains a list of test classes formatted for passing into Gradle, like this: + +[source] +---- +com.example.FooTest +com.example.BarTest +... +---- + +==== Running a subset of tests + +First, you need to add a dependency declaration to `build.gradle` so that the right tests get excluded when TestNG runs: + +[source] +---- +dependencies { + ... + testRuntime 'com.launchableinc:launchable-testng:1.3.0' +} +---- + +Then export the exclusion list file path as an environment variable before you run `mvn test` , like shown below. + +[source] +---- +export SMART_TESTS_REST_FILE_PATH=$PWD/smart-tests-exclusion-list.txt +gradle test +---- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +gradle test +---- + +And the flow after: + +[source] +---- +# request an exclusion list from all tests +smart-tests subset --build --session gradle --bare > smart-tests-exclusion-list.txt +# run tests, excluding deprioritized tests, leaving only the recommended subset +export SMART_TESTS_REST_FILE_PATH=$PWD/smart-tests-exclusion-list.txt +gradle test +---- + +=== Maven + +First, you'll request an exclusion list from your full test suite. Then, you'll pass this list to Maven. + +==== Requesting an exclusion list + +Find the `mvn test` command used to run tests in your CI script. + +Before that command, run `smart-tests subset` to request an exclusion list. The subset and exclusion lists are generated from the union of tests recorded in the last two weeks. + +`smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules maven > smart-tests-exclusion-list.txt` + +See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . + +This creates a file called `smart-tests-exclusion-list.txt` that you can pass into Maven. + +==== Running a subset of tests + +To exclude deprioritized tests and only run the recommended subset, use the `-Dsurefire.excludesFile` option. For example: + +`mvn test -Dsurefire.excludesFile=$PWD/smart-tests-exclusion-list.txt` + +[WARNING] +-- +If your build already depends on `surefire.includesFile` , or `/` , those and our exclusion list will collide and not work as expected. https://www.launchableinc.com/support[Contact us] to resolve this problem. +-- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +mvn test +---- + +And the flow after: + +[source] +---- +# get an exclusion list from the server +smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules maven > smart-tests-exclusion-list.txt +# run tests, excluding deprioritized tests, leaving only the recommended subset +mvn test -Dsurefire.excludesFile=$PWD/smart-tests-exclusion-list.txt +---- + +=== Maven + TestNG + +First, you'll request an exclusion list from your full test suite. Then, you'll pass this list to Maven. + +==== Requesting an exclusion list + +Find the `mvn test` command used to run tests in your CI script. + +Before that command, run `smart-tests subset` to request an exclusion list. The subset and exclusion lists are generated from the union of tests recorded in the last two weeks. + +`smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules maven > smart-tests-exclusion-list.txt` + +See xref:resources:cli-reference.adoc[the CLI reference] for details about `` and `` . + +This creates a file called `smart-tests-exclusion-list.txt` that you can pass into Maven. + +==== Running a subset of tests + +First, modify your `pom.xml` so that it includes {PRODUCT} TestNG integration as a test scope dependency: + +[source] +---- + + com.launchableinc + launchable-testng + 1.2.1 + test + +---- + +Then export the exclusion list file path as an environment variable before you run `mvn test` , like shown below. + +[source] +---- +export SMART_TESTS_REST_FILE_PATH=$PWD/smart-tests-exclusion-list.txt +mvn test +---- + +==== Summary + +In summary, here's the flow before: + +[source] +---- +# your normal command to run tests looks something like this +mvn test +---- + +And the flow after: + +[source] +---- +# get an exclusion list from the server +smart-tests subset --build --session --get-tests-from-previous-sessions --output-exclusion-rules maven > smart-tests-exclusion-list.txt +# run tests, excluding deprioritized tests, leaving only the recommended subset +export SMART_TESTS_REST_FILE_PATH=$PWD/smart-tests-exclusion-list.txt +mvn test +---- diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/smart-tests-extension-for-VS-code.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/smart-tests-extension-for-VS-code.adoc new file mode 100644 index 000000000..d79dd0b53 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/smart-tests-extension-for-VS-code.adoc @@ -0,0 +1,35 @@ +include::ROOT:partial$abbr.adoc[] + += {PRODUCT} extension for Visual Studio Code + +{PRODUCT} extension for Visual Studio Code is ideal when you want to quickly sanity check your local commit before pushing the changes back to the team repository. You can easily use Predictive Test Selection on your local machine, outside CI. + +== Requirements + +The following are required to use {PRODUCT} extension for Visual Studio Code: + +* link:https://pypi.org/project/launchable/[{PRODUCT} CLI] +* link:https://marketplace.visualstudio.com/items?itemName=ms-python.python[Python extension for VS Code] + +== Usage + +. Install link:https://marketplace.visualstudio.com/items?itemName=launchable.vscode-launchable[{PRODUCT} extension for VS Code]. +. Click the {PRODUCT} icon, which appears in the Activity bar. +. Press the button *Start Test*. +. Enter the API key. +. Choose your Test runner. +. Results will start appearing, after the test execution finishes. + +== Supported test runners + +Currently, {PRODUCT} extension for VS Code, supports the following test runners: + +- maven +- rspec +- go-test +- pytest + +[NOTE] +-- +If not using any of these, then use the `file` profile. +-- diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/use-cases-for-predictive-test-selection.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/use-cases-for-predictive-test-selection.adoc new file mode 100644 index 000000000..f85043a39 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/use-cases-for-predictive-test-selection.adoc @@ -0,0 +1,75 @@ += Use-cases for Predictive Test Selection + +[[use-case-1]] +== Use-case 1: In-place subset of a test suite + +*Challenge:* +An existing test suite may be taking a long time to run. Some teams’ capacity for executing tests is finite and limited, yet the demand to run tests is too high. Even in cases where test execution capacity is scalable and elastic, the number of tests are so many that it’s costing too much money. + +To that end, the team would want to shorten the execution time of that test suite. + +[NOTE] +==== +This use-case also serves to enhance developer productivity, especially when merging changes. +Test failures arising from flaky tests often lead to frustration and unnecessary delays. +==== + +*Proposed Solution:* +Introduce a subset in place. Run a subset of tests at the same stage of your software delivery lifecycle. + +*Value unlocked by the use-case:* + +1. *Reduction in test runtime:* + The Time Savings Report gives the amount of time saved running a test suite. + - Lower machine costs (cloud compute costs, mobile devices, cheaper UI testing with Selenium, etc.) +2. *More frequent runs:* + Since test runtime reduces, your team can run tests more frequently and test more often (if desired). + - Optimized resource allocation (instead of running tests that don’t fail, use your testing budget to run tests that fail). + +=== Implement the solution + +This is the easiest use-case to implement with {PRODUCT}. A {PRODUCT} subset is used to replace the current full test execution. + +Adding a defensive run: +After subsetting your tests, you should make sure to run the full suite of tests at some point later in your pipeline. +This enables you to catch any issues that may not be caught by the subset. + +For example, once you start running a subset of an integration test suite that runs on pull requests, you should make sure to run the *full* integration test suite after a PR is merged (and record the outcome of those runs with `{PRODUCT} record tests`). + +.Use case 1 example flow +image::ROOT:use-case-1.png[Use case 1 example flow,role="screenshot"] + +''' + +[[use-case-2]] +== Use-case 2: Shift-left a test suite to find issues earlier in the pipeline + +*Challenge:* +The feedback on changes may be coming in too late because the tests are towards the right in your delivery pipeline (e.g. UI tests, E2E, nightly tests). +These are run infrequently because of long runtimes. In several teams, another common challenge is that their main/dev branch is too unstable, causing QA engineers a lot of overhead to deal with failures. + +*Proposed Solution:* +Run a subset of tests earlier in your software delivery lifecycle (“shift left”). + +*Value unlocked by the use-case:* + +1. *Ensure a stable main/dev branch,* maintain release readiness, and reduce effort in tracking failures. +2. *Faster feedback time* by running tests earlier (e.g. a nightly test run can be run on every PR, or every hour to get fast feedback; reducing feedback time by hours if not days). +3. *More testing by more frequent test runs:* + - Optimized resource allocation (instead of running tests that don’t fail, use your testing budget to run tests that fail and run them more often). + +=== Implement the solution + +If you aim to run a short subset of a long test suite earlier in the development process, +you may need to set up a new pipeline to run tests in that development phase. +For example, if you currently run a long nightly test suite and want to run a subset every hour, +you may need to create a pipeline to build, deploy, and run the subset if one doesn’t already exist. + +You will also want to continue running the full test suite every night +(and recording the outcome of those runs with `{PRODUCT} record tests`). + +.Use case 2 example flow +image::ROOT:use-case-2.png[Use case 2 example flow,role="screenshot"] + +''' + diff --git a/smart_tests/docs/modules/features/pages/predictive-test-selection/viewing-time-savings.adoc b/smart_tests/docs/modules/features/pages/predictive-test-selection/viewing-time-savings.adoc new file mode 100644 index 000000000..b77460a6e --- /dev/null +++ b/smart_tests/docs/modules/features/pages/predictive-test-selection/viewing-time-savings.adoc @@ -0,0 +1,71 @@ += Viewing time savings +:slug: viewing-time-savings + +You can view aggregate time savings from Predictive Test Selection on the *Time Savings* page. Time savings is the test execution time hypothetically saved by not executing all tests via Predictive Test Selection. + +image::ROOT:smart-tests-monthly-time-saved.png[] + +You can also view time savings for an individual test session in the *Subset Impact* section of a test session row. Generally speaking, though, this page explains the monthly report page. + +== Month + +Time Savings is aggregated by month based on the timestamp of the test session. + +A test session is not closed until seven days after creation. Until then, you can record test results. Time Savings values are only official after that point. This means it takes seven days for last month’s values to finalize, hence the yellow "Not Final Yet" badge. + +== PTS Test Sessions (count) + +This is the number of test sessions for which time savings was calculated. This number might be slightly lower than the total number of sessions with subset requests. + +Reasons for this include: + +. no tests were reported for the session +. too many recorded tests had no past history and/or +. the test session had too many subset requests + +== Duration + +Time savings is “the test execution time hypothetically saved by not executing all tests [using Predictive Test Selection]." + +Since you didn't execute these tests, we do not know the actual test time. However, based on the historical test executions, we can estimate the time it would have taken to execute. + +[NOTE] +-- +{PRODUCT} learns about test durations from the JUnit reports you record. Therefore they represent machine time. +-- + +We use *up to 90 days of test execution results* to estimate the time to execute a test. + +=== Total Duration Without {PRODUCT} + +For each test session, we estimate how long your test sessions *would have* taken this long to run in total. We use each xref:concepts:subset.adoc[Subset] Test List to calculate this. Then we sum this up for all test sessions. + +This calculation handles various edge cases described in the below table. + +|=== +|Test recorded in test session? |Recorded in last 90 days? |Duration used in calculation + +|Yes +|N/A +|Actual recorded duration + +|No +|Yes +|Average duration over last 90 days + +|No +|No +|Average duration of all tests in the test session +|=== + +=== Total Duration With {PRODUCT} + +Difference between Time Saved and Total Duration Without {PRODUCT}. + +Note that because all tests in the input list are included in `Total Duration Without {PRODUCT}` -- even if they aren't recorded -- the recorded duration for a test session can be different than this value if you don't record all subset tests (or if tests are new). + +=== Time Saved + +For each test session, we take the sum of the estimated duration of all unrecorded remainder/rest tests, using the average duration of each test over the last 90 days. This value is shown on each test session row in the {PRODUCT} web app. + +Then, for the Time Savings page, then we sum this for all test sessions grouped by month. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-notifications.adoc b/smart_tests/docs/modules/features/pages/test-notifications.adoc new file mode 100644 index 000000000..3f337fa05 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-notifications.adoc @@ -0,0 +1,7 @@ += Test Notifications +:slug: test-notifications + +{PRODUCT} supports various ways to notify engineers as soon as tests fail, so that developers & QA people can immediately start analyzing the problems and working toward a resolution. + +* xref:features:test-notifications/github-app-for-test-sessions.adoc[Test Notifications via GitHub PR] +* xref:features:test-notifications/test-notifications-via-slack.adoc[Test Notifications via Slack] \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-notifications/github-app-for-test-sessions.adoc b/smart_tests/docs/modules/features/pages/test-notifications/github-app-for-test-sessions.adoc new file mode 100644 index 000000000..efc4c4b1e --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-notifications/github-app-for-test-sessions.adoc @@ -0,0 +1,22 @@ += GitHub app for test sessions +:slug: github-app-for-test-sessions + +Developers can get convenient and timely access to the details of the tests run in their GitHub pull requests, so that they can immediately start working on failures, even before the whole CI process finishes. These are delivered as comments to pull requests. + +They can also quickly access associated workflows and test files via links provided in the {PRODUCT} web app. + +image::ROOT:github-comment.png[] + +== Configuration and features + +First, link:https://github.com/apps/launchable-app/installations/new[install the {PRODUCT} GitHub app] . Your GitHub org admin might have to approve that process. + +Next, verify that your test session is associated with a GitHub pull request. From link:https://app.launchableinc.com/[the {PRODUCT} webapp] , find a test session that should be associated with a GitHub pull request. If you see the dropdown as shown below, you are fully integrated. This association happens automatically when the {PRODUCT} CLI detects environment variables that are set by typical CI systems to indicate that the test is running for a GitHub pull request. + +image::ROOT:screenshot-2024-04-29-at-2-14-11-pm.png[] + +Selecting "View workflow run" takes you to the workflow in GitHub Actions that this test session was triggered from. Clicking "View pull request" will take you directly to the pull request page for this session. + +Additionally, once the sendGitHub app is integrated, you will also be able to jump directly to failing tests that are tied to files stored in sendGitHub repositories. To access this, navigate to any test session page with a failure. Once there, you will see a column at the bottom of the page that says "Failed tests". For each failed test, you will see an ellipses to the far right, as shown below. Once clicked, you will see an "Open in sendGitHub" link. This allows you to open this test file directly in sendGitHub. + +image::ROOT:screenshot-2024-04-29-at-1-48-30-pm.png[] \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-notifications/test-notifications-via-slack.adoc b/smart_tests/docs/modules/features/pages/test-notifications/test-notifications-via-slack.adoc new file mode 100644 index 000000000..d6109b4f2 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-notifications/test-notifications-via-slack.adoc @@ -0,0 +1,173 @@ += Test Notifications via Slack +:slug: test-notifications-via-slack + +{PRODUCT}'s test notification feature notifies developers via Slack when their test sessions finish so they can immediately take action on the results, whether to triage failures or merge a PR. Developers can create subscriptions to receive personal notifications about test sessions run against their branches/pull requests or other test sessions they care about. + +image::ROOT:slack-desktop-with-app-messages.png[] + +== Getting started + +To set up notifications: + +. Start sending data to {PRODUCT}. +. Install the {PRODUCT} Slack app in your Slack workspace. +. Link your {PRODUCT} account and your Slack account. +. Create your first notification subscription in Slack. + +== Sending data to {PRODUCT} + +To begin sending data to {PRODUCT}, follow the steps in xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[Send data to {PRODUCT}]. You must have test sessions recorded in your {PRODUCT} workspace to receive notifications about them. + +== Install the {PRODUCT} Slack app + +. Log into the https://app.launchableinc.com/[{PRODUCT} dashboard]. +. Navigate to Test Notifications. +. Select btn:[Install Slack App] in the Slack app section. This will open the Slack app authorization flow. You may need to log in to Slack at this stage. +. Authorize the app to install it. +. Done! + +== Link your {PRODUCT} and Slack accounts + +Once the {PRODUCT} Slack app has been installed, you and your teammates can link your {PRODUCT} and Slack accounts. This lets you set up subscriptions via the app. + +[NOTE] +-- +If you haven't signed up for {PRODUCT}, or are not a member of your team's {PRODUCT} organization, request an /docs/concepts/organization/#organization-invitation-link[#Organization invitation link] so you can sign up (if needed) and join. +-- + +You communicate with the app through direct messages: + +. In Slack, select the New message icon in the top section of the left navigation (or use the Command+N or Control+N keyboard shortcut) to compose a new message. +. In the "To:" field, enter *{PRODUCT}* and select the app from the dropdown list. +. In the message text field, enter */smart-tests link* and hit Enter. +. Select the button in the response message to initiate the flow to connect your {PRODUCT} account to your Slack account. + +== Create a notification subscription + +After linking your {PRODUCT} and Slack accounts, create your first notification subscription. + +=== Via UI + +After linking your account, create a new subscription from the home tab of the {PRODUCT} app in Slack: + +image::ROOT:subscription-ux.png[] + +=== Via message + +Create a subscription by sending a message to the {PRODUCT} app. The syntax for creating a subscription is: + +`/smart-tests subscribe =` + +* `` is the workspace's name containing the test sessions you want to be notified about. +* `=` is a key-value pair that contains the CI environment variable and value that indicates the test sessions you want to subscribe to (e.g., `GITHUB_ACTOR=octocat` ). More on this below ⤵ + +==== Common key-value pairs for subscriptions + +Typically, you will want to be notified about _your_ test runs. Each CI system has an environment variable that indicates the user that kicked off a build or pipeline. + +For example, GitHub Actions has an environment variable called `GITHUB_ACTOR` . So if you use GitHub Actions, you can subscribe to your test runs using a command such as: + +`/smart-tests subscribe GITHUB_ACTOR=` + +The tabbed section below describes how to compose `/smart-tests subscribe` for major CI tools: + +[.multilanguage-custom-table, options="header", cols="5"] +|=== +| Azure DevOps +| CircleCI +| GitHub Actions +| GitLab CI +| Jenkins + +a| + +|=== +|Environment variable |`BUILD_REQUESTEDFOREMAIL` +|Description |The person who pushed or checked in the changes. +|=== + +|Example: + +`/smart-tests subscribe Build.RequestedForEmail=` + +a| +[cols="1,1"] +|=== +|Environment variable |`CIRCLE_USERNAME` +|Description |The GitHub or Bitbucket username of the user who triggered the pipeline (only if the user has a CircleCI account). +|=== + +|Example: + +`/smart-tests subscribe CIRCLE_USERNAME=` + +a| +[cols="1,1"] +|=== +|Environment variable |`GITHUB_ACTOR` +|Description |The name of the person or app that initiated the workflow. For example, `octocat`. +|=== + +|Example: + +`/smart-tests subscribe GITHUB_ACTOR=` + +a| [NOTE] +-- +GitLab CI has 2 environment variables you can use. +-- + +[cols="1,1"] +|=== +|Environment variable |`GITLAB_USER_EMAIL` +|Description |The email of the user who started the job. +|=== + +|Example: + +`/smart-tests subscribe GITLAB_USER_EMAIL=` + +[cols="1,1"] +|=== +|Environment variable |`GITLAB_USER_LOGIN` +|Description |The login username of the user who started the job. +|=== + +|Example: + +`/smart-tests subscribe GITLAB_USER_LOGIN=` + +a| [NOTE] +-- +Requires the *build user vars* Jenkins plugin: https://plugins.jenkins.io/build-user-vars-plugin/[https://plugins.jenkins.io/build-user-vars-plugin/] +-- + +[cols="1,1"] +|=== +|Environment variable |`BUILD_USER_EMAIL` +|Description |Email address of the user who started the build. +|=== + +|Example: + +`/smart-tests subscribe BUILD_USER_EMAIL=` + +|=== + +==== Other key-value pairs + +Since the subscription mechanism is based on CI environment variables, you have a lot of flexibility regarding subscriptions. CI tools expose many environment variables, (and you can add your own) that you can pass into `/smart-tests subscribe` to create custom subscriptions. For more info, refer to your CI tool's documentation. + +== Receive notifications + +Once you have set up a subscription, the {PRODUCT} app will send you a personal message each time a new test session matching the subscription criteria is recorded: + +[cols="1,1", options="header"] +|=== +|Passing notification |Failing notification + +|image:https://www.datocms-assets.com/59840/1673618053-passing-session-notification.png[Passing notification] +|image:https://www.datocms-assets.com/59840/1673618081-failed-session-notification.png[Failing notification] +|=== + +Failing notifications include a link to view test results in {PRODUCT} and a quick summary of failing tests, including annotations for Unhealthy Tests. This helps you get started triaging without checking your email over and over. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-results-and-reports.adoc b/smart_tests/docs/modules/features/pages/test-results-and-reports.adoc new file mode 100644 index 000000000..cae1d667f --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-results-and-reports.adoc @@ -0,0 +1,24 @@ += Test Reports +:slug: test-results-and-reports + +You can view test reports in the {PRODUCT} web app as soon as you start sending test results + to {PRODUCT} using the {PRODUCT} CLI. {PRODUCT} provides a richer view of test results, helping developers triage and quickly fix failures. + +image::ROOT:test-session-details-with-content.png[] + +For quick access to test results, the {PRODUCT} CLI prints out a link to this page every time you record test results: + +image::ROOT:link-to-results-example.png[] + +In addition, the *Test Sessions* page shows all of your sessions in one place for easy navigation. No more digging around build logs: + +image::ROOT:test-sessions-layout.png[] + +== Test Status + +{PRODUCT} defines the following statuses. For example, let's say 100 tests were run, 10 failed, and 3 of them flaked. This means 90 tests passed, but 3 of them passed after a number of retries, and 10 tests kept failing throughout. + +* Success: A test that passed. +* Failure: A test that failed in all executions. +* Flake: A test failure that eventually succeeded after a number of retries within the same test session. +* Skip: A test that was skipped. \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-suite-parallelization.adoc b/smart_tests/docs/modules/features/pages/test-suite-parallelization.adoc new file mode 100644 index 000000000..dea2ee361 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-suite-parallelization.adoc @@ -0,0 +1,6 @@ += Test Suite Parallelization +:slug: test-suite-parallelization + +Using duration information from past test runs, {PRODUCT} can automatically create evenly-sized bins of tests for you to run your test suite in several parallel workers. This can reduce the effective length of your test suite drastically. + +For instructions on how to do this, see xref:features:test-suite-parallelization/parallelize-your-test-suite-with-the-smart-tests-cli.adoc[Parallelizing your test suite with the {PRODUCT} CLI] . \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/test-suite-parallelization/parallelize-your-test-suite-with-the-smart-tests-cli.adoc b/smart_tests/docs/modules/features/pages/test-suite-parallelization/parallelize-your-test-suite-with-the-smart-tests-cli.adoc new file mode 100644 index 000000000..ede037e02 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/test-suite-parallelization/parallelize-your-test-suite-with-the-smart-tests-cli.adoc @@ -0,0 +1,42 @@ +include::ROOT:partial$abbr.adoc[] += Parallelize your test suite with the {PRODUCT} CLI +:slug: parallelize-your-test-suite-with-the-smart-tests-cli + +To parallelize your test suite, you can take advantage of the parallelization feature built into `smart-tests subset` . `smart-tests subset` 's primary purpose is Predictive Test Selection, but here, we're using `--target 100%` to skip the test selection part but keep the parallelization feature. + +. Record your test results via xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[Sending data to {PRODUCT}] . +. Kick off the process by running `smart-tests subset` with the `--split` option and `--target 100%` . The command will output an ID string you should save and pass into each runner. See xref:predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc[Requesting and running a subset of tests] for more details about this command. +. Start up your parallel test worker(s). +. Request the bin of tests that the worker should run. To do this, run `smart-tests split-subset` with: . the `--subset-id` option set to the ID you saved earlier, and +. the `--bin` value set to `bin-number/bin-count` . For example, to split your test suite across 3 workers, use `1/3` , `2/3` , etc. +. If you're using xref:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] , add the `--output-exclusion-rules` option. +. Run the tests on each worker as outlined in xref:predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc[Requesting and running a subset of tests] . +. After each run finishes in each worker, record test results using `smart-tests record tests` with the `--subset-id` option set to the ID you saved earlier. + +In pseudocode: + +[source] +---- +# main +$ smart-tests record build --build $BUILD_ID --source src=. +$ smart-tests record session --build $BUILD_ID --session $SESSION_ID +$ smart-tests subset bazel --split --target 100% --build $BUILD_ID --session $SESSION_ID . +subset/12345 + +... + +# worker 1 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 1/3 > worker.txt +$ bazel test $(cat worker.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . + +# worker 2 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 2/3 > worker.txt +$ bazel test $(cat worker.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . + +# worker 3 +$ smart-tests split-subset bazel --subset-id subset/12345 --bin 3/3 > worker.txt +$ bazel test $(cat worker.txt) +$ smart-tests record tests --build $BUILD_ID --session $SESSION_ID --subset-id subset/12345 bazel . +---- \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/trends.adoc b/smart_tests/docs/modules/features/pages/trends.adoc new file mode 100644 index 000000000..cc98b9f35 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/trends.adoc @@ -0,0 +1,8 @@ += Test Suite Trends +:slug: trends + +The *Trends* page shows aggregate info about your test runs, including average test session duration, test session frequency, and how often sessions fail. + +Seeing this data over time gives you a picture of how your test suite is evolving; for example, perhaps your tests are taking twice as long as they did six months ago, and you need to cut it down! Similarly, perhaps your team's running tests a lot more often than expected drives up resource costs. Or maybe you have some broken tests driving up the overall failure rate. + +image::ROOT:trends.png[] \ No newline at end of file diff --git a/smart_tests/docs/modules/features/pages/unhealthy-tests.adoc b/smart_tests/docs/modules/features/pages/unhealthy-tests.adoc new file mode 100644 index 000000000..9bd395519 --- /dev/null +++ b/smart_tests/docs/modules/features/pages/unhealthy-tests.adoc @@ -0,0 +1,79 @@ += Unhealthy Test Insights +:slug: unhealthy-tests + +Tests are hard to maintain. Once you write them, they have a tendency to stick around, even when it’s no longer clear what value they provide or when they are hurting more than helping. + +SMEs working on maintaining those tests often struggle to make convincing arguments as to what work needs to be done to improve the effectiveness of tests and get frustrated. + +The overall quality of tests suffers, and in the worst case, the annoyance of the tests goes too high, and developers lose trust in the tests. + +Hence the *Unhealthy Tests* page in {PRODUCT}! This page surfaces tests that exhibit specific issues so you can investigate and make necessary changes. + +[NOTE] +-- +Unhealthy Test stats are aggregated at the 'altitude' that your test runner uses to run tests. See /docs/concepts/subset/#subset-altitude-and-test-items[#Subset altitude and test times] for more info on this concept. +-- + +== Flaky Tests + +=== About flaky tests + +Flaky tests are automated tests that fail randomly during a run for reasons not related to the code changes being tested. They are often caused by timing issues, concurrency problems, or the presence of other workloads in the system. + +Flaky tests are a common problem for many development teams, especially as test suites grow. They are more common at higher levels of the Test Pyramid, especially in UI and system tests. + +Like the fictional boy who cried “wolf,” tests that send a false signal too often are sometimes ignored. Or worse, people spend real time and effort trying to diagnose a failure, only to discover that it has nothing to do with their code changes. When flakiness occurs with many tests, it can make people weary of all tests and all failures—not just flaky tests—causing a loss of trust in tests. + +Tests that produce flaky results should be repaired or removed from the test suite. + +=== Flaky Test Insights + +To help with this, {PRODUCT} can analyze your test runs to identify flaky tests in your suite. + +Start by xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[sending data to {PRODUCT}]. The *Flaky tests* page will be populated within a few days. + +* However, for flakiness scores to populate, you need to run the same test multiple times against the same xref:concepts:build.adoc[Build]. In other words, you need to have a retry mechanism in place to re-run tests when they fail. (This is usually already the case for test suites with flaky tests.) + +{PRODUCT} re-analyzes your test sessions to extract flakiness data every day. + +Flaky tests are automated tests that fail randomly during a run for reasons not related to the code changes being tested. They are often caused by timing issues, concurrency problems, or the presence of other workloads in the system. + +=== Flakiness score + +A test is considered flaky if you run it multiple times against the same build, and sometimes it passes and fails. + +The *flakiness score* for a test represents the probability that a test _fails_ but eventually _passes_ if you run it repeatedly. + +For example, let's say you have a test called `myTest1` with a flakiness score of 0.1. This means that if this test failed against ten different commits, in 1 of those ten commits, that failure was not a true failure. If you run that test repeatedly, it eventually _passes_ . This test is slightly flaky. + +Similarly, another test called `myTest2` has a flakiness score of 0.9. If this test failed against ten different commits, in 9 out of those ten commits, you saw a false failure that retry will yield a _passing_ result. That test is very flaky and should be fixed. + +=== Total duration + +The dashboard also includes the *total duration* of a flaky test. Since flaky tests are often retried multiple times, this adds lots of extra time to each test run. + +The total duration is useful for prioritizing which flaky tests to fix _first_ . + +For example, you might have a very flaky test (i.e., it has a high flakiness score) but either doesn't take very long to run each time, it doesn't run very often, or both. In comparison, you might have a less flaky test that takes a very long time to run -- so you'll probably want to fix that first. + +[NOTE] +-- +The table is sorted by flakiness score in descending order, not total duration. +-- + +== Never Failing Tests + +Tests that never fail are like cats who never catch any mice. They take up execution time and require maintenance, yet they may not add value. For each test, ask yourself if it provides enough value to justify its execution time. Consider moving the test to the right so that it runs less frequently. + +[NOTE] +-- +A test must run at least *five* (5) times in order to be considered. +-- + +== Longest Tests + +Slow tests are like gunk that builds up in your engine. Over time they slow down your CI cycle. + +== Most Failed Tests + +Tests that fail _too_ often are suspicious. Perhaps they are flaky. Perhaps they are fragile/high maintenance. Perhaps they are testing too many things in one shot. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/ci-tool-integrations.adoc b/smart_tests/docs/modules/resources/pages/ci-tool-integrations.adoc new file mode 100644 index 000000000..9917bff22 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/ci-tool-integrations.adoc @@ -0,0 +1,6 @@ += CI tool integrations +:slug: ci-tool-integrations + +Although *{PRODUCT} works with any CI tool* , we are working on direct integrations to make onboarding easier. See below. + +* xref:resources:ci-tool-integrations/github-actions.adoc[GitHub Actions] \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/ci-tool-integrations/github-actions.adoc b/smart_tests/docs/modules/resources/pages/ci-tool-integrations/github-actions.adoc new file mode 100644 index 000000000..f71167b89 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/ci-tool-integrations/github-actions.adoc @@ -0,0 +1,290 @@ += GitHub Actions +:slug: github-actions + +Follow the instructions on xref:send-data-to-smart-tests:getting-started/getting-started.adoc[Getting Started] to do the following: + +* Sign up for a Smart test account on {PRODUCT}. +* Create an organization and a workspace for your test suite. +* Create an API key for your workspace. This authentication token lets the CLI talk to your {PRODUCT} workspace. + +== link:https://github.com/marketplace/actions/record-build-and-test-results-action[{PRODUCT} record build and test results action] + +The {PRODUCT} record build and test results action enables you to integrate {PRODUCT} into your CI in simple way with less change. This action installs the link:https://github.com/launchableinc/cli[CLI] and runs `smart-tests record build` and `smart-tests record tests` to send data to {PRODUCT} so that the test results will be analyzed in link:https://app.launchableinc.com/[{PRODUCT}] to improve your developer productivity. + +=== Example usage + +[source] +---- +name: Test + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + SMART_TESTS_TOKEN: ${{ secrets.SMART_TESTS_TOKEN }} + SMART_TESTS_DEBUG: 1 + SMART_TESTS_REPORT_ERROR: 1 + +jobs: + tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Test + run: + - name: Record + uses: launchableinc/record-build-and-test-results-action@v1.0.0 + with: + build_name: $GITHUB_RUN_ID + test_runner: + report_path: + if: always() +---- + +[cols="1,1,1", options="header"] +|=== +|Test runner |`test_runner` value |Additional steps? + +|Ant +|`ant` +| + +|Bazel +|`bazel` +| + +|Behave +|`behave` +|Yes + +|CTest +|`ctest` +|Yes + +|cucumber +|`cucumber` +|Yes + +|Cypress +|`cypress` +| + +|GoogleTest +|`googletest` +|Yes + +|Go Test +|`go-test` +|Yes + +|Gradle +|`gradle` +| + +|Jest +|`jest` +|Yes + +|Maven +|`maven` +| + +|minitest +|`minitest` +|Yes + +|NUnit Console Runner +|`nunit` +|Yes + +|pytest +|`pytest` +|Yes + +|Robot +|`robot` +| + +|RSpec +|`rspec` +|Yes +|=== + +== Instructions for test runners/build tools + +=== Android Debug Bridge (adb) + +Currently, the CLI doesn't have a `record tests` command for ADB. Use the command for /docs/resources/ci-tool-integrations/github-actions/#gradle[#gradle] instead. + +=== Ant + +No special steps. + +=== Bazel + +=== Behave + +First, in order to generate reports that {PRODUCT} can consume, add the `--junit` option to your existing `behave` command: + +[source] +---- +# run the tests however you normally do +behave --junit +---- + +=== CTest + +First, run your tests with `ctest -T test --no-compress-output` . These options ensure test results are written to the `Testing` directory. + +=== cucumber + +First, run cucumber with the `-f junit` option, like this: + +`bundle exec cucumber -f junit -o reports` + +(If you use JSON, use the {PRODUCT} CLI instead.) + +=== Cypress + +=== GoogleTest + +First, configure GoogleTest to produce JUnit compatible report files. See link:https://github.com/google/googletest/blob/main/docs/advanced.md#generating-an-xml-report[their documentation] for how to do this. You'll end up with a command something like this: + +[source] +---- +# run the tests however you normally do +./my-test --gtest_output=xml:./report/my-test.xml +---- + +=== Go Test + +First, in order to generate reports that {PRODUCT} can consume, use link:https://github.com/jstemmer/go-junit-report[go-junit-report] to generate a JUnit XML file after you run tests: + +[source] +---- +# install JUnit report formatter +go get -u github.com/jstemmer/go-junit-report + +# run the tests however you normally do, then produce a JUnit XML file +go test -v ./... | go-junit-report -set-exit-code > report.xml +---- + +=== Gradle + +`**/build/**/TEST-*.xml` . + +=== Jest + +First, in order to generate reports that {PRODUCT} can consume, use link:https://www.npmjs.com/package/jest-junit[jest-junit] to generate a JUnit XML file after you run tests. + +[source] +---- +# install jest-junit reporter +npm install jest-junit --save-dev +# or +yarn add --dev jest-junit +---- + +You'll need to configure jest-junit to include file paths in reports. + +You can do this using environment variables: + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| Using environment variables +| Using package.json + +a| Recommended config: + +[source] +---- +export JEST_JUNIT_CLASSNAME="{classname}" +export JEST_JUNIT_TITLE="{title}" +export JEST_JUNIT_SUITE_NAME="{filepath}" +---- + +Minimum config: + +`export JEST_JUNIT_SUITE_NAME="{filepath}"` + +a| Add the following lines to your `package.json` . The detail is the link:https://www.npmjs.com/package/jest-junit[jest-junit] section. + +Recommended config: + +[source] +---- +// package.json +"jest-junit": { + "suiteNameTemplate": "{filepath}", + "classNameTemplate": "{classname}", + "titleTemplate": "{title}" +} +---- + +Minimum config: + +[source] +---- +// package.json +"jest-junit": { + "suiteNameTemplate": "{filepath}" +} +---- + +|=== + +Then, run `jest` using jest-junit: + +[source] +---- +# run tests with jest-junit +jest --ci --reporters=default --reporters=jest-junit +---- + +=== Maven + +[NOTE] +-- +{PRODUCT} supports test reports generated using link:https://maven.apache.org/surefire/maven-surefire-plugin/[Surefire], the default report plugin for https://maven.apache.org/[Maven]. +-- + +`'./**/target/surefire-reports'` + +_Note: The invocation above relies on the CLI to expand GLOBs like _ `_**_` _._ + +=== minitest + +First, use link:https://github.com/circleci/minitest-ci[minitest-ci] to output test results to a file. If you already store your test results on your CI server, it may already be installed. + +=== NUnit Console Runner + +[NOTE] +-- +{PRODUCT} CLI accepts link:https://docs.nunit.org/articles/nunit/technical-notes/usage/XML-Formats.html[NUnit3 style test report XML files] produced by NUnit. +-- + +=== pytest + +First, run tests with the `--junit-xml` option: + +`pytest --junit-xml=test-results/results.xml` + +[WARNING] +-- +pytest changed its default test report format from `xunit1` to `xunit2` in version 6. Unfortunately, the new `xunit2` format does not include file paths, which {PRODUCT} needs. + +Therefore, if using pytest 6 or newer, you must also specify `junit_family=legacy` as the report format. See link:https://docs.pytest.org/en/latest/deprecations.html#junit-family-default-value-change-to-xunit2[Deprecations and Removals — pytest documentation] for instructions. +-- + +=== Robot + +`output.xml` + +=== RSpec + +First, use link:https://github.com/sj26/rspec_junit_formatter[rspec_junit_formatter] to output test results to a file in RSpec. If you already have a CI server storing your test results it may already be installed: + +`bundle exec rspec --format RspecJunitFormatter --out report/rspec.xml` diff --git a/smart_tests/docs/modules/resources/pages/cli-reference.adoc b/smart_tests/docs/modules/resources/pages/cli-reference.adoc new file mode 100644 index 000000000..5158216b6 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/cli-reference.adoc @@ -0,0 +1,710 @@ += CLI reference +:slug: cli-reference + +== Getting started + +=== Requirements + +* Python 3.13 or newer (not required when using uv) +* Java 8 or newer + +=== Install + +The {PRODUCT} CLI is a Python3 package that you can install from https://pypi.org/project/smart-tests/[PyPI]. + +==== Recommended: Using uv (fastest) + +We recommend using https://docs.astral.sh/uv/[uv], a fast Python package installer, for the best installation experience. + +First, install uv: + +[source,bash] +---- +curl -LsSf https://astral.sh/uv/install.sh | sh +export PATH="$HOME/.local/bin:$PATH" +---- + +For other installation methods (including Windows, Github Actions, etc.), see the https://docs.astral.sh/uv/getting-started/installation/[uv installation guide]. + +Then install {PRODUCT} CLI: + +[source,bash] +---- +uv tool install smart-tests +---- + +==== Alternative: Using pip + +You can also install the CLI using pip: + +`pip3 install --user --upgrade smart-tests` + +This creates a `~/.local/bin/smart-tests` executable that should be in your `PATH` . (See https://www.python.org/dev/peps/pep-0370/[PEP-370] for further details.) + +=== Authenticate + +Set your API key: + +`export SMART_TESTS_TOKEN=your_API_key` + +=== Verify + +Then run `smart-tests verify` in your CI environment to see if you've successfully configured the CLI. If it succeeds, you'll see a message like the one below. If you see an error message, refer to xref:resources:troubleshooting.adoc[Troubleshooting] . + +[source] +---- +$ smart-tests verify + +Organization: +Workspace: +Proxy: None +Platform: 'macOS-12.0.1-x86_64-i386-64bit' +Python version: '3.9.9' +Java command: 'java' +smart-tests version: '1.34.0' +Your CLI configuration is successfully verified 🎉 +---- + +== Required Workflow + +[IMPORTANT] +==== +With the smart-tests command update, you must now follow this mandatory sequence: + +1. `smart-tests record build` - Record the build information +2. `smart-tests record session` - Create and register a test session (REQUIRED) +3. `smart-tests subset` or `smart-tests record tests` - Use the session name registered in step 2 with the `--session` option + +The session name specified in `smart-tests record session` must be used in subsequent commands (`smart-tests subset` and `smart-tests record tests`) via the `--session` parameter. +==== + +== Command: inspect subset + +Display the details of a *subset* request. See /docs/features/predictive-test-selection/#inspecting-subset-details[Subsetting your test runs] for more info. + +// [generate:inspect subset] +`Usage: smart-tests inspect subset [OPTIONS]` + +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--json` +|Display JSON format +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--subset-id` INT +|Subset id +|Yes + +|=== +// [/generate] + +You can use `smart-tests inspect subset` to inspect the details of a specific subset, including rank and expected duration. This is useful for verifying that you passed the correct tests or test directory path(s) into `smart-tests subset` . + +The output from `smart-tests subset` includes a tip to run `smart-tests inspect subset` : + +[source] +---- +$ smart-tests subset minitest --build 123 --session session-123 --confidence 90% test/*.rb > subset.txt + +< summary table > + +Run `smart-tests inspect subset --subset-id 26876` to view full subset details +---- + +Running that command will output a table containing a row for each test, including: + +* Rank/order +* Test identifier +* Whether the test was included in the subset +* {PRODUCT}'s estimated duration for the test * Tests with a duration of `.001` seconds were not recognized by {PRODUCT} + +[NOTE] +-- +Note that the hierarchy level of the items in the list depends on the test runner in use. + +For example, since Maven can accept a list of test _classes_ as input, `smart-tests inspect subset` will output a prioritized list of test _classes_ . Similarly, since Cypress can accept a list of test _files_ as input, `smart-tests inspect subset` will output a list of prioritized test _files_ . (And so on.) +-- + +== Command: record attachment + +Attach log files to test session. For more information, refer to xref:send-data-to-smart-tests:record-test-results/attach-log-files.adoc[Attach log files]. + +// [generate:record attachment] +`Usage: smart-tests record attachment [OPTIONS] ...` + +[cols="2,4,1"] +|=== +|Argument |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`...` +|Attachment files to upload +|Yes + +|=== +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--session` SESSION +|Session ID obtained by calling 'smart-tests record session'. It also accepts '@path/to/file' if the session ID is stored in a file +|Yes + +|=== +// [/generate] + +== Command: record commit + +Sends *commit* details to {PRODUCT}. Records multiple commits from repo(s). + +// [generate:record commit] +`Usage: smart-tests record commit [OPTIONS]` + +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--import-git-log-output` FILE +|Import from the git-log output +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--max-days` DAYS +|The maximum number of days to collect commits retroactively +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--name` NAME +|Repository name +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--source` DIR +|Repository path +|No + +|=== +// [/generate] + +Commit collection happens automatically as a part of `record build` , so normally this command need not be invoked separately. It's only used for /docs/sending-data-to-smart-tests/using-the-smart-tests-cli/recording-builds-with-the-smart-tests-cli/recording-builds-from-multiple-repositories/#multiple-repositories-builtdeployed-separately-then-tested-together-eg-microservices[Multiple repositories built/deployed separately and then tested together (e.g., microservices)] . + +=== `--import-git-log-output` option + +Related to xref:send-data-to-smart-tests:record-builds/run-under-restricted-networks.adoc[Run under restricted networks] . + +If the `--import-git-log-output` option is used, it reads the specified file for the commit data instead of reading the commits from the repository specified by `--source` . The input file should contain the output of this Git command: + +`git log --pretty='format:{"commit": "%H", "parents": "%P", "authorEmail": "%ae", "authorTime": "%aI", "committerEmail": "%ce", "committerTime": "%cI"}' --numstat` + +== Command: record build + +Creates a record of a xref:concepts:build.adoc[Build] in {PRODUCT}. + +// [generate:record build] +`Usage: smart-tests record build [OPTIONS]` + +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--branch` NAME +|Set branch name. A branch is a set of test sessions grouped and this option value will be used for a branch name. +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--build` NAME +|Build name +|Yes + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--commit` REPO_NAME=COMMIT_HASH +|Set repository name and commit hash when you use --no-commit-collection option (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--link` PARSE_KEY_VALUE +|Set external link of a title and url (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--max-days` DAYS +|The maximum number of days to collect commits retroactively +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--no-commit-collection` +|Do not collect commit data. This is useful if the repository is a shallow clone and the RevWalk is not possible. The commit data must be collected with a separate fully-cloned repository. +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--no-submodules` +|Stop collecting information from Git Submodules +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--repo-branch-map` REPO_NAME=BRANCH_NAME +|Set repository name and branch name when you use --no-commit-collection option. Please use the same repository name with a commit option (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--source` DIR +|Path to local Git workspace, optionally prefixed by a label. like --source path/to/ws or --source main=path/to/ws (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--timestamp` TIMESTAMP +|Used to overwrite the build time when importing historical data. Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied) +|No + +|=== +// [/generate] + +The act of recording a build teaches {PRODUCT} that the specified set of commits has turned into a build and that the given name henceforth identifies this build. This forms the basis of how {PRODUCT} calculates the changes. + +Conceptually, a build is a collection of Git repositories, each at a specific commit. `REPO_NAME` identifies each repository contributing to a build, and it needs to be stable across different builds of the same project. Good examples include: + +* Relative directory paths to the repository from the "workspace root," such as `src/moduleX` if they are stable. +* `GitHubOrg/GitHubRepo` slug if your repositories are on GitHub since they are also stable. + +== Command: record session + +[IMPORTANT] +==== +This command is now **mandatory** and must be executed after `smart-tests record build` and before `smart-tests subset` or `smart-tests record tests`. +==== + +Creates a record of a *test session* in {PRODUCT}. + +// [generate:record session] +`Usage: smart-tests record session [OPTIONS]` + +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--build` NAME +|Build name +|Yes + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--flavor` KEY=VALUE +|Flavors (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--no-build` +|If you want to only send test reports, please use this option +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--observation` +|Enable observation mode +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--link` TITLE=URL +|Set external link of title and url (can be specified multiple times) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--test-suite` NAME +|Set test suite name. A test suite is a collection of test sessions. Setting a test suite allows you to manage data over test sessions and lineages. +|Yes + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--timestamp` TIMESTAMP +|Used to overwrite the session time when importing historical data. Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied) +|No + +|=== +// [/generate] + +This command tells {PRODUCT} that you are about to begin testing a build that was been recorded earlier with the `record build` command. **This command is now mandatory** and must be executed after every `smart-tests record build` command. + +The command outputs a string you can save for use in other commands (like smart-tests subset and smart-tests record tests) instead of `--build` . We suggest saving the value either to an environment variable or to a text file: + +[source] +---- +# environment variable + +smart-tests record build --build BUILD_NAME [OPTIONS] +export SMART_TESTS_SESSION=$(smart-tests record session --build BUILD_NAME --session SESSION_NAME [OPTIONS]) + +smart-tests record tests TESTRUNNER --session SESSION_NAME [OPTIONS] +---- + +[source] +---- +# text file + +smart-tests record build --build BUILD_NAME [OPTIONS] +smart-tests record session --build BUILD_NAME --session SESSION_NAME [OPTIONS] + +smart-tests record tests TESTRUNNER --session SESSION_NAME [OPTIONS] +---- + +(Otherwise, the command will write a session ID to `~/.config/smart-tests/sessions/{hash}.txt` . This location may change in the future, so don't rely on it.) + +== Command: record tests + +Send *test results* for the *test session* to {PRODUCT}. + +[IMPORTANT] +==== +The `--session` parameter is required and must use the session name that was registered with `smart-tests record session`. +==== + +// [generate:record tests] +`Usage: smart-tests record tests [OPTIONS] ...` + +[cols="2,4,1"] +|=== +|Argument |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`` +| +|Yes + +|=== +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--base` CONVERT +|(Advanced) base directory to make test names portable +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--group` NAME +|Grouping name for test results +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--no-base-path-inference` +|Do not guess the base path to relativize the test file paths. By default, if the test file paths are absolute file paths, it automatically guesses the repository root directory and relativize the paths. With this option, the command doesn't do this guess work. If --base-path is specified, the absolute file paths are relativized to the specified path irrelevant to this option. Use it if the guessed base path is incorrect. +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--post-chunk` INT +|Post chunk +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--session` SESSION +|Session ID obtained by calling 'smart-tests record session'. It also accepts '@path/to/file' if the session ID is stored in a file +|Yes + +|=== +// [/generate] + +This command reads JUnit (or similar) XML report files produced by test runners and sends them to {PRODUCT}. + +Exactly how this command generates the subset and what's required to do this depends on test runners. For available supported `TESTRUNNER` , see xref:resources:integrations.adoc[Integrations] + +== Command: split-subset + +Splits an existing *subset* from {PRODUCT} into chunks. This relates to xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/replace-static-parallel-suites-dynamic-parallel-subset.adoc[Replacing static parallel suites with a dynamic parallel subset] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/use-groups-to-split-subsets.adoc[Using groups to split subsets] . + +`smart-tests split-subset TESTRUNNER [OPTIONS] ...` + +Intended for use with `smart-tests subset` with the `--split` option. + +=== Options for xref:features:predictive-test-selection/requesting-and-running-a-subset-of-tests/subsetting-with-the-smart-tests-cli/replacing-static-parallel-suites-dynamic-parallel-subset.adoc[Replacing static parallel suites with a dynamic parallel subset] + +[cols="1,2,1"] +|=== +|Option |Description |Required + +|`--bin BIN_NUMBER/BIN_COUNT` +|The portion of the subset to retrieve, e.g `--bin 1/3` +|Yes + +|`--rest FILE` +|Output the remainder of the subset to a file. This is useful for running the "rest of the tests" after you've run a subset. +|No + +|`--same-bin FILE` +|[Beta; Gradle only] Place tests listed in the FILE to belong to the same bin to avoid the tests running simultaneously. +|No + +|`--subset-id SUBSET-ID-STRING` +|The ID of the subset output from `smart-tests subset --split ...` (see `--split` under `subset`) +|Yes + +|`--output-exclusion-rules` +|Output a list of tests to _exclude_ instead of a list to _include_. See Zero Input Subsetting. +|No +|=== + +=== Options for xref:features:predictive-test-selection/requesting-and-running-a-subset-of-tests/subsetting-with-the-smart-tests-cli/zero-input-subsetting/using-groups-to-split-subsets.adoc[Using groups to split subsets] + +[cols="1,2,1"] +|=== +|Option |Description |Required + +|`--split-by-group` +|Splits an existing subset output into multiple files. See below. +|No + +|`--split-by-group-with-rest` +|Similar to `--split-by-group`, except remainder/rest files are also included. See below. +|No + +|`--subset-id SUBSETID` +|The ID of the subset output from `smart-tests subset --split ...` (see `--split` under `subset`) +|Yes + +|`--output-exclusion-rules` +|For use with Zero Input Subsetting. See examples below. +|No +|=== + +`*--split-by-group*` * outputs* + +When you run `smart-tests split-subset` with the `--split-by-group` option, the CLI creates several files. If you use `--output-exclusion-rules` to enable xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] , the behavior changes, as shown in the table below. + +|=== +|File |Default |--output-exclusion-rules + +|subset-groups.txt +|This file contains a list of the groups you must set up. +|This file contains a list of the groups you can skip entirely. + +|subset-[groupname].txt +(one file for each group) +|Each file contains the normal subset output but only for that group's tests. You can pass these files into the test process for each group. +|Each file contains the normal subset output but only for that group's tests. You can pass these files into the test process for each group. +These files will contain exclusion rules. You're supposed to exclude these tests. + +|subset-nogroup.txt +|This file contains tests that had no group assignment, if there are any. +|This file contains tests that had no group assignment, if there are any. +|=== + +`*--split-by-group-with-rest*` * outputs* + +When you run `smart-tests split-subset` with the `--split-by-group-with-rest` option, the CLI creates several files in addition to the ones described in the above table: + +|=== +|File |Default |--output-exclusion-rules + +|rest-groups.txt +|This file contains a list of the groups you don't need to set up. +|This file contains a list of the groups you can't skip. + +|rest-[groupname].txt +(one file for each group) +|Each file contains the normal --rest output, but only for that group's tests. +|Each file contains the normal --rest output, but only for that group's tests. +These files will contain exclusion rules. You're supposed to exclude these tests. + +|rest-nogroup.txt +|This file contains --rest tests that had no group assignment if there are any. +|This file contains --rest tests that had no group assignment if there are any. +|=== + +== Command: stats test-sessions + +Retrieves statistics about test sessions + +// [generate:stats test-sessions] +`Usage: smart-tests stats test-sessions [OPTIONS]` + +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--days` INT +|How many days of test sessions in the past to be stat +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--flavor` KEY=VALUE +|flavors (can be specified multiple times) +|No + +|=== +// [/generate] + +Output example: + +`{"averageDurationSeconds":653.168192926045,"count":311,"days":7}` + + +== Command: subset + +Produces a subset of *tests* to pass to your test runner. + +[IMPORTANT] +==== +The `--session` parameter is required and must use the session name that was registered with `smart-tests record session`. +==== + +// [generate:subset] +`Usage: smart-tests subset [OPTIONS] ...` + +[cols="2,4,1"] +|=== +|Argument |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`` +| +|Yes + +|=== +[cols="2,4,1"] +|=== +|Option |Description |Required + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--base` DIR +|(Advanced) base directory to make test names portable +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--confidence` PERCENTAGE +|Subsetting by confidence from 0% to 100% +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--goal-spec` GOAL_SPEC +|Subsetting by programmatic goal definition +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--ignore-flaky-tests-above` N +|Ignore flaky tests above the value set by this option. You can confirm flaky scores in WebApp +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--ignore-new-tests` +|Ignore tests that were added recently. NOTICE: this option will ignore tests that you added just now as well +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--get-tests-from-guess` +|Get subset list from guessed tests +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--get-tests-from-previous-sessions` +|Get subset list from previous full tests +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--output-exclusion-rules` +|Outputs the exclude test list. Switch the subset and rest. +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--no-base-path-inference` +|Do not guess the base path to relativize the test file paths. By default, if the test file paths are absolute file paths, it automatically guesses the repository root directory and relativize the paths. With this option, the command doesn't do this guess work. If --base is specified, the absolute file paths are relativized to the specified path irrelevant to this option. Use it if the guessed base path is incorrect. +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--prioritize-tests-failed-within-hours` N +|Prioritize tests that failed within the specified hours; maximum 720 hours (= 24 hours * 30 days) +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--prioritized-tests-mapping` FILE +|Prioritize tests based on test mapping file +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--rest` FILE +|Output the subset remainder to a file, e.g. --rest=remainder.txt +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--session` SESSION +|Session ID obtained by calling 'smart-tests record session'. It also accepts '@path/to/file' if the session ID is stored in a file +|Yes + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--target` PERCENTAGE +|Subsetting target from 0% to 100% +|No + +// GENERATED. MODIFY IN CLI SOURCE CODE +|`--time` TIME +|Subsetting by absolute time, in seconds e.g) 300, 5m +|No + +|=== +// [/generate] + +Exactly how this command generates the subset and what's required to do this depends on test runners. For available supported `TESTRUNNER` s, see xref:resources:integrations.adoc[Integrations] . + +When none of `--target` , `--time` , and `--confidence` is specified, {PRODUCT} chooses the subset target. This is convenient on the initial setup when you are unsure what the subset size should be. Later, you can choose the right target after you see the statistics of your test suite and potential time-savings based on the {PRODUCT} accumulated data. + +== Command: verify + +Verify that the CLI can communicate with the {PRODUCT} service and that you're authenticated properly. + +// [generate:verify] +`Usage: smart-tests verify` + +// [/generate] + +To avoid disrupting your CI/test process, the {PRODUCT} CLI is designed to tolerate and recover from service disruptions and other recoverable error conditions by falling back to no-op. This is an intentional design, but the downside is that such transparent failures can make troubleshooting difficult. + +Therefore, we recommend you keep `smart-tests verify || true` in a recognizable spot in your CI process. This way, when you suspect a problem in {PRODUCT}, you can check the output of this command as a starting point. + +== Global options + +[NOTE] +-- +These global options can be placed after the subcommand, for example `smart-tests verify --log-level audit` +-- + +=== --dry-run + +The dry-run mode does not actually send a payload to the server, and it is helpful to check the behavior. You can also see which APIs will be requested and their payload contents in the output. + +The payload contents will be output as an audit log, so if the log level is higher than the audit level, it will be forced to be set to the audit level. + +Strictly speaking, it does not mean no requests will be sent, but GET requests with no payload data or side effects may be sent. This is because sometimes the response data from the GET request is needed to assemble subsequent requests. + +=== --log-level + +You can use the `--log-level` option to output extra information from each command. + +`--log-level audit` is particularly useful if you want to see exactly what data gets passed to {PRODUCT} when you run CLI commands. For example: + +[source] +---- +% smart-tests record build --log-level audit --build 1234 --source src=. +Processed 1 commits +AUDIT:smart-tests:send request method:post path:/intake/organizations/example/workspaces/awilkes/builds headers:{'Content-Type': 'application/json'} args:{'data': b'{"buildNumber": "1234", "commitHashes": [{"repositoryName": "src", "commitHash": "45b2e6d9df8e0013334354f30df1978c8b4196f8"}]}', 'timeout': (5, 60)} +---- + +=== --plugins + +You can use the `--plugins` option to tell the CLI where to look for custom profiles/plugins. + +For example, if you have developed (or been provided) a custom profile file named `myProfile.py` , place that file in the directory of your choosing (e.g., `~/smart-tests-plugins` ) and use it like this: + +`smart-tests --plugins ~/smart-tests-plugins record tests myProfile --build $BUILD --session $SESSION /path/to/reports` + +Since `--plugins` is a global option, make sure to place it right after `smart-tests` but before `subset` or `record` in your command. + +=== --skip-cert-verification + +This option instructs the CLI to bypass the SSL certificate verification. This is inteded to be an escape hatch in case the system's Python setup is broken/incomplete. + +Alternatively, you can set the `SMART_TESTS_SKIP_CERT_VERIFICATION` environment variable to any value to have the same effect. This is more convenient if you want this behaviour across the board, instead of just one invocation. + +This flag will make your communication with {PRODUCT} less secure (for example, you can be vulnerable to a DNS spoofing attack). Use it with a caution. diff --git a/smart_tests/docs/modules/resources/pages/cli-version.txt b/smart_tests/docs/modules/resources/pages/cli-version.txt new file mode 100644 index 000000000..ba61fd363 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/cli-version.txt @@ -0,0 +1,3 @@ +# version of CLI to put in the reference doc +# update this file and create a PR to auto-trigger the cli-reference.adoc rebuild +v2.1.0 diff --git a/smart_tests/docs/modules/resources/pages/integrations.adoc b/smart_tests/docs/modules/resources/pages/integrations.adoc new file mode 100644 index 000000000..71f947409 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations.adoc @@ -0,0 +1,32 @@ += Integrations +:slug: integrations + +== Test runners/build tools + +The {PRODUCT} CLI includes pre-built integrations with the following test runners/build tools: + +* xref:resources:integrations/adb.adoc[Android Debug Bridge (adb)] +* xref:resources:integrations/ant.adoc[Ant] +* xref:resources:integrations/bazel.adoc[Bazel] +* xref:resources:integrations/behave.adoc[Behave] +* xref:resources:integrations/ctest.adoc[CTest] +* xref:resources:integrations/cucumber.adoc[cucumber] +* xref:resources:integrations/cypress.adoc[Cypress] +* xref:resources:supported-test-frameworks/googletest.adoc[GoogleTest] +* xref:resources:integrations/go-test.adoc[Go Test] +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:supported-test-frameworks/jest.adoc[Jest] +* xref:resources:integrations/karma.adoc[Karma] +* xref:resources:integrations/maven.adoc[Maven] +* xref:resources:integrations/minitest.adoc[minitest] +* xref:resources:integrations/dotnet-test.adoc[dotnet test] +* xref:resources:integrations/playwright.adoc[Playwright] +* xref:resources:integrations/prove.adoc[prove for Perl] +* xref:resources:integrations/pytest.adoc[pytest] +* xref:resources:supported-test-frameworks/robot.adoc[Robot] +* xref:resources:supported-test-frameworks/rspec.adoc[RSpec] + +[NOTE] +-- +If not using any of the above, use the xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc[`file` profile for unsupported test runners] , the xref:resources:integrations/raw.adoc[`raw` profile for custom test runners] , or mailto:support@launchableinc.com?subject=Request%20a%20plugin[request a plugin] . +-- diff --git a/smart_tests/docs/modules/resources/pages/integrations/adb.adoc b/smart_tests/docs/modules/resources/pages/integrations/adb.adoc new file mode 100644 index 000000000..7ad3adfdc --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/adb.adoc @@ -0,0 +1,6 @@ += Android Debug Bridge (adb) +:slug: adb + +{PRODUCT} interfaces with adb via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/ant.adoc b/smart_tests/docs/modules/resources/pages/integrations/ant.adoc new file mode 100644 index 000000000..5f709ed6a --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/ant.adoc @@ -0,0 +1,6 @@ += Ant +:slug: ant + +{PRODUCT} interfaces with Ant via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/bazel.adoc b/smart_tests/docs/modules/resources/pages/integrations/bazel.adoc new file mode 100644 index 000000000..2371ecd80 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/bazel.adoc @@ -0,0 +1,6 @@ += Bazel +:slug: bazel + +{PRODUCT} interfaces with Bazel via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/behave.adoc b/smart_tests/docs/modules/resources/pages/integrations/behave.adoc new file mode 100644 index 000000000..a17ca63d6 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/behave.adoc @@ -0,0 +1,6 @@ += Behave +:slug: behave + +{PRODUCT} interfaces with Behave via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/ctest.adoc b/smart_tests/docs/modules/resources/pages/integrations/ctest.adoc new file mode 100644 index 000000000..2a73e5ae8 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/ctest.adoc @@ -0,0 +1,6 @@ += CTest +:slug: ctest + +{PRODUCT} interfaces with CTest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/cucumber.adoc b/smart_tests/docs/modules/resources/pages/integrations/cucumber.adoc new file mode 100644 index 000000000..7ca80b471 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/cucumber.adoc @@ -0,0 +1,6 @@ += cucumber +:slug: cucumber + +{PRODUCT} interfaces with cucumber via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/cypress.adoc b/smart_tests/docs/modules/resources/pages/integrations/cypress.adoc new file mode 100644 index 000000000..fa52aaa5e --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/cypress.adoc @@ -0,0 +1,6 @@ += Cypress +:slug: cypress + +{PRODUCT} interfaces with Cypress via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/dotnet-test.adoc b/smart_tests/docs/modules/resources/pages/integrations/dotnet-test.adoc new file mode 100644 index 000000000..a769c715f --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/dotnet-test.adoc @@ -0,0 +1,6 @@ += dotnet test +:slug: dotnet-test + +{PRODUCT} interfaces with dotnet test via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/flutter.adoc b/smart_tests/docs/modules/resources/pages/integrations/flutter.adoc new file mode 100644 index 000000000..4dd72e655 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/flutter.adoc @@ -0,0 +1,5 @@ += Flutter + +{PRODUCT} interfaces with Vitest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc#flutter-test-results[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc#flutter-subset[Subset with the {PRODUCT} CLI]. diff --git a/smart_tests/docs/modules/resources/pages/integrations/go-test.adoc b/smart_tests/docs/modules/resources/pages/integrations/go-test.adoc new file mode 100644 index 000000000..7640eaa68 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/go-test.adoc @@ -0,0 +1,6 @@ += Go Test +:slug: go-test + +{PRODUCT} interfaces with Go Test via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/googletest.adoc b/smart_tests/docs/modules/resources/pages/integrations/googletest.adoc new file mode 100644 index 000000000..1f89108d5 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/googletest.adoc @@ -0,0 +1,6 @@ += GoogleTest +:slug: googletest + +{PRODUCT} interfaces with GoogleTest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/gradle.adoc b/smart_tests/docs/modules/resources/pages/integrations/gradle.adoc new file mode 100644 index 000000000..b9a88f12f --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/gradle.adoc @@ -0,0 +1,6 @@ += Gradle +:slug: gradle + +{PRODUCT} interfaces with Gradle via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Recording test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/jest.adoc b/smart_tests/docs/modules/resources/pages/integrations/jest.adoc new file mode 100644 index 000000000..281674a51 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/jest.adoc @@ -0,0 +1,6 @@ += Jest +:slug: jest + +{PRODUCT} interfaces with Jest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Recording test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/karma.adoc b/smart_tests/docs/modules/resources/pages/integrations/karma.adoc new file mode 100644 index 000000000..08e267118 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/karma.adoc @@ -0,0 +1,6 @@ += Karma +:slug: karma + +{PRODUCT} interfaces with Karma via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/maven.adoc b/smart_tests/docs/modules/resources/pages/integrations/maven.adoc new file mode 100644 index 000000000..fa57d9a8a --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/maven.adoc @@ -0,0 +1,6 @@ += Maven +:slug: maven + +{PRODUCT} interfaces with Maven via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/minitest.adoc b/smart_tests/docs/modules/resources/pages/integrations/minitest.adoc new file mode 100644 index 000000000..93c2fbbe0 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/minitest.adoc @@ -0,0 +1,6 @@ += minitest +:slug: minitest + +{PRODUCT} interfaces with minitest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Recording test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/playwright.adoc b/smart_tests/docs/modules/resources/pages/integrations/playwright.adoc new file mode 100644 index 000000000..672aa2066 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/playwright.adoc @@ -0,0 +1,6 @@ += Playwright +:slug: playwright + +{PRODUCT} interfaces with Playwright via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] with-the-smart-tests-cli.adoc[Recording test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/prove.adoc b/smart_tests/docs/modules/resources/pages/integrations/prove.adoc new file mode 100644 index 000000000..e2d560256 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/prove.adoc @@ -0,0 +1,6 @@ += prove for Perl +:slug: prove + +{PRODUCT} interfaces with prove via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/pytest.adoc b/smart_tests/docs/modules/resources/pages/integrations/pytest.adoc new file mode 100644 index 000000000..47a959e20 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/pytest.adoc @@ -0,0 +1,6 @@ += pytest +:slug: pytest + +{PRODUCT} interfaces with pytest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/raw.adoc b/smart_tests/docs/modules/resources/pages/integrations/raw.adoc new file mode 100644 index 000000000..61e559dde --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/raw.adoc @@ -0,0 +1,249 @@ += `raw` profile for custom test runners +:slug: raw + +[NOTE] +-- +This is a reference page. For more comprehensive usage guidelines, refer to xref:send-data-to-smart-tests:getting-started/getting-started.adoc[Getting started], xref:send-data-to-smart-tests:send-data-to-smart-tests.adoc[Send data to {PRODUCT}], and xref:features:predictive-test-selection.adoc[Predictive Test Selection]. +-- + +The `raw` CLI profile provides a low-level interface for interacting with {PRODUCT}. It is meant for use with custom-built test runners and requires additional integration steps in comparison to the native profiles built for each test runner. + +Requirements: + +. You need to be able to programmatically create a list of tests you were going to run before running them +. Your test runner needs to be able to accept a list of tests to run +. You need to be able to convert your existing test results to JSON format, including creating {PRODUCT}-specific `testPath` identifiers for every test + +[WARNING] +-- +The `raw` profile is an advanced feature. We strongly suggest you https://www.launchableinc.com/support[contact us] if you plan to use it so that we can help! +-- + +== Recording test results + +The raw profile accepts JUnit XML files and JSON files. Depending on your setup you may want to use one or the other. Read the entire document before deciding. + +=== JUnit format + +When you pass JUnit files into the raw profile, the profile extracts the `classname` and `name` attributes from each `TestCase` . Each test is stored as `class={classname}#testcase={name}` . This is important because the subset service will return a list of tests in this same format. You need to confirm that your test runner can accept a list of classes and/or a list of classes+testcases to run/not run. + +If your test runner identifies tests using a different structure/hierarchy, you will need to construct test paths and convert to JSON format instead. See the next section for those instructions. + +[WARNING] +-- +Make sure you establish the correct format before you start sending production data! +-- + +To record tests, every time your tests finish, run: + +`smart-tests record tests --build --session raw /path/to/xml/files` + +You can use `smart-tests inspect tests --test-session-id [TEST SESSION ID]` to inspect the list of test paths that were submitted. + +[NOTE] +-- +You might need to take extra steps to make sure that `smart-tests record tests` always runs even if the build fails. See xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. +-- + +=== JSON format + +If your test runner identifies tests using a different structure than class+name (as described above), you'll need to convert your existing test results to JSON. This gives you total control over the stored data structure and the subsetting interface structure. + +==== Converting to JSON format + +After you run tests, convert your test results to a JSON document using link:https://github.com/cloudbees-oss/smart-tests-cli/search?q=https%3A%2F%2Flaunchableinc.com%2Fschema%2FRecordTestInput[this schema]. In particular, you will need to create a `testPath` value for every test (see below). + +[source] +---- +## Example JSON document +{ + "testCases": [ + { + "testPath": "file=login/test_a.py#class=class1#testcase=testcase899", + "duration": 42, + "status": "TEST_PASSED", + "stdout": "This is stdout", + "stderr": "This is stderr", + "createdAt": "2021-10-05T12:34:00" + } + ] +} +---- + +==== Constructing test paths + +A `testPath` is a unique string that identifies a given test, such as + +`file=login/test_a.py#class=class1#testcase=testcase899` + +This example declares a hierarchy of three levels: a `testcase` belongs to a `class` which belongs to a `file` (path). Your hierarchy may be different, but you'll need to include enough hierarchy to uniquely identify every test. + +When creating your `testPath` hierarchy, keep in mind that you'll also use this structure for subsetting tests. See https://github.com/cloudbees-oss/smart-tests-cli/blob/3210a70a6704a357b87da9ecb771666a049c88f5/docs/resources/integrations/raw.md[#subsetting-hierarchy] for examples. + +Finally, include relative file paths instead of absolute ones where possible. + +[NOTE] +-- +*A note about file paths on Windows and Unix* + +If you include file paths in your `testPath` values and a given set of tests runs on both Unix and Windows, submit file paths with _either_ forward slashes or backslashes, but not both. If you submit a test with forward slashes in the file path and then submit the same test with backslashes in the file path, you will record two separate tests. +-- + +==== Record JSON test results with the CLI + +Then, pass that JSON document (e.g. `test-results/results.json` ) to the {PRODUCT} CLI for submission: + +`smart-tests record tests --build --session raw test-results/results.json` + +You can use `smart-tests inspect tests --test-session-id [TEST SESSION ID]` to inspect the list of test paths that were submitted. + +[NOTE] +-- +You might need to take extra steps to make sure that `smart-tests record tests` always runs even if the build fails. For more information, refer to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. +-- + +=== Subset your test runs + +The high level flow for subsetting is: + +. Create file containing a list of all the tests in your test suite that you would _normally_ run +. Pass that file to `smart-tests subset` with an optimization target +. `smart-tests subset` will get a subset from the {PRODUCT} platform and output that list to a text file +. Use that file to tell your test runner to run only those tests + +==== Subset input file format + +The input file is a text file that contains test paths (e.g. `file=a.py#class=classA` ), one per line Lines that start with a hash ('#') are considered comments and are ignored. + +For example: + +[source] +---- +file=login/test_a.py#class=class1#testcase=testcase899 +file=login/test_a.py#class=class2#testcase=testcase900 +file=login/test_b.py#class=class3#testcase=testcase901 +file=login/test_b.py#class=class3#testcase=testcase902 +---- + +If you recorded JUnit XML files, the input file format is fixed to `class={classname}#testcase={testcase}` . For example: + +[source] +---- +class=class1#testcase=testcase899 +class=class2#testcase=testcase900 +class=class3#testcase=testcase901 +class=class3#testcase=testcase902 +---- + +==== Subsetting hierarchy + +One common scenario is that a test runner cannot subset tests at the same level of granularity used for reporting tests. + +For example, if your tests are organized into a hierarchy of `file` s, `class` es, and `testcase` s, then your `testPath` s for recording tests will look like `file=#class=#testcase=` , e.g.: + +[source] +---- +{ + "testCases": [ + { + "testPath": "file=login/test_a.py#class=class1#testcase=testcase899", + "duration": 10.051, + "status": "TEST_PASSED", + }, + { + "testPath": "file=login/test_a.py#class=class2#testcase=testcase900", + "duration": 13.625, + "status": "TEST_FAILED", + }, + { + "testPath": "file=login/test_b.py#class=class3#testcase=testcase901", + "duration": 14.823, + "status": "TEST_PASSED", + }, + { + "testPath": "file=login/test_b.py#class=class3#testcase=testcase902", + "duration": 29.784, + "status": "TEST_FAILED", + } + ] +} +---- + +This creates four `testPath` s in {PRODUCT}: + +[source] +---- +file=login/test_a.py#class=class1#testcase=testcase899 +file=login/test_a.py#class=class2#testcase=testcase900 +file=login/test_b.py#class=class3#testcase=testcase901 +file=login/test_b.py#class=class3#testcase=testcase902 +---- + +However, perhaps your test runner can only subset at the `class` level, not the `testcase` level. In that case, send {PRODUCT} a list of `testPath` s that terminate at the `class` level, e.g. + +[source] +---- +file=login/test_a.py#class=class1 +file=login/test_a.py#class=class2 +file=login/test_b.py#class=class3 +---- + +{PRODUCT} will then return a list of prioritized `class` es for you to run. + +Similarly, if your test runner can only testcase at the `file` level, then submit a list of `testPath` s that terminate at the `file` level, e.g.: + +[source] +---- +file=login/test_a.py +file=login/test_b.py +---- + +{PRODUCT} will then return a list of prioritized `file` s for you to run. + +==== Request a subset of tests to run with the CLI + +Once you've created a subset input file, use the CLI to get a subset of the full list from {PRODUCT}: + +[source] +---- +smart-tests subset raw \ + --build \ + --session \ + --confidence \ + subset-input.txt > subset-output.txt +---- + +* The `--build` should use the same `` value that you used before in `smart-tests record build` . +* The `--confidence` option should be a percentage; we suggest `90%` to start. You can also use `--time` or `--target` ; for more information, refer to xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/request-and-run-a-subset-of-tests.adoc[Request and run a subset of tests]. + +This will output `subset-output.txt` which contains a list of tests in the same format they were submitted. For example: + +[source] +---- +file=b.py#class=class4 +file=b.py#class=class3 +---- + +You can then process this file as needed for input into your test runner. + +=== Zero Input Subsetting + +To use xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/zero-input-subsetting/zero-input-subsetting.adoc[Zero Input Subsetting] with the raw profile: + +* Use the `--get-tests-from-previous-sessions` option +* Use the `--rest=` option to get a list of tests to _exclude_ (instead of _include_ ) so that new tests always run + +For example: + +[source] +---- +smart-tests subset raw \ + --build \ + --session \ + --confidence \ + --get-tests-from-previous-sessions \ + --rest=smart-tests-exclusion-list.txt \ + > smart-tests-inclusion-list.txt +---- + +Then `smart-tests-exclusion-list.txt` will contain the tests you can exclude when running tests. diff --git a/smart_tests/docs/modules/resources/pages/integrations/robot.adoc b/smart_tests/docs/modules/resources/pages/integrations/robot.adoc new file mode 100644 index 000000000..34eed2305 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/robot.adoc @@ -0,0 +1,6 @@ += Robot +:slug: robot + +{PRODUCT} interfaces with Robot via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Recording test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/rspec.adoc b/smart_tests/docs/modules/resources/pages/integrations/rspec.adoc new file mode 100644 index 000000000..7ddb5fd2b --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/rspec.adoc @@ -0,0 +1,6 @@ += RSpec +:slug: rspec + +{PRODUCT} interfaces with RSpec via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subsetting with the {PRODUCT} CLI]. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/integrations/use-the-generic-file-based-runner-integration.adoc b/smart_tests/docs/modules/resources/pages/integrations/use-the-generic-file-based-runner-integration.adoc new file mode 100644 index 000000000..01d37d608 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/use-the-generic-file-based-runner-integration.adoc @@ -0,0 +1,90 @@ += `file` profile for unsupported test runners +:slug: use-the-generic-file-based-runner-integration + +[NOTE] +-- +This is a reference page. For more comprehensive usage guidelines, refer to xref:send-data-to-smart-tests:getting-started/getting-started.adoc[Getting Started], xref:send-data-to-smart-tests:/send-data-to-smart-tests.adoc[Send data to {PRODUCT}], and xref:features:predictive-test-selection.adoc[Predictive test selection]. +-- + +== About + +The "file based" test runner integration is primarily designed to work with test runners that are not explicitly supported, such as custom test runners built in-house. + +In order to work with {PRODUCT} through this integration mechanism, your test runner has to satisfy the following conditions: + +. *File based test runner* : your test runner must accept file names as an input of a test execution in order to execute just those specified set of tests. +. *JUnit XML reports include file names/paths* : your test runner has to produce results of tests in a JUnit compatible format _with_ additional attributes that capture the *file names/paths* of the tests that run. If not, refer to xref:send-data-to-smart-tests:record-test-results/convert-test-reports-to-junit-format.adoc[Convert test reports to JUnit format] . + +For example, link:https://mochajs.org/#getting-started[Mocha] is a test runner that meets those criteria. You write tests in JavaScript files: + +[source] +---- +$ cat foo.js +var assert = require('assert'); +describe('Array', function() { + describe('#indexOf()', function() { + it('should return -1 when the value is not present', function() { + assert.equal([1, 2, 3].indexOf(4), -1); + }); + }); +}); +---- + +The Mocha test runner takes those files as arguments: + +`$ mocha --reporter mocha-junit-reporter foo.js` + +...and produces JUnit report files, where the name of the test file is captured, in this case, in the `file` attribute: + +[source] +---- +$ cat test-results.xml + + + + +... +---- + +The rest of this document uses Mocha as an example. + +== Record test results + +After running tests, point the CLI to your test report files to collect test results and train the model: + +`smart-tests record tests file --build --session ./reports/*.xml` + +[WARNING] +-- +You might need to take extra steps to make sure that `smart-tests record tests` + +always runs even if the build fails. See xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensuring record tests always runs] . +-- + +== Subset your test runs + +The high level flow for subsetting is: + +. Get the full list of test files and pass that to `smart-tests subset` with an optimization target for the subset +. `smart-tests subset` will get a subset from the {PRODUCT} platform and output that list to a text file +. Pass the text file into your test runner to run only those tests + +To retrieve a subset of tests, first pass the full list of test candidates to `smart-tests subset` . For example: + +[source] +---- +find ./test -name '*.js' | +smart-tests subset file \ + --build \ + --session \ + --target 10% \ + --rest smart-tests-remainder.txt \ + > subset.txt +---- + +* The `--build` should use the same `` value that you used before in `smart-tests record build` . +* The `--confidence` option should be a percentage; we suggest `90%` to start. You can also use `--time` or `--target` ; see xref:features:predictive-test-selection.adoc[Subsetting your test runs] for more info. + +This creates a file called `smart-tests-subset.txt` that you can pass into your command to run tests: + +`mocha $(< smart-tests-subset.txt)` diff --git a/smart_tests/docs/modules/resources/pages/integrations/vitest.adoc b/smart_tests/docs/modules/resources/pages/integrations/vitest.adoc new file mode 100644 index 000000000..a9791e159 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/vitest.adoc @@ -0,0 +1,6 @@ += Vitest +:slug: vitest + +{PRODUCT} interfaces with Vitest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc#vitest-record-tests[Record test results] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc#vitest-subset[Subset with the CLI]. diff --git a/smart_tests/docs/modules/resources/pages/integrations/xctest.adoc b/smart_tests/docs/modules/resources/pages/integrations/xctest.adoc new file mode 100644 index 000000000..df06f6a90 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/integrations/xctest.adoc @@ -0,0 +1,5 @@ += XCTest + + {PRODUCT} interfaces with Vitest via the {PRODUCT} CLI. + +For more information, refer to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc#xctest-recording-tests[Recording test results] and xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc#xctest-subsetting[Subset with the CLI]. diff --git a/smart_tests/docs/modules/resources/pages/onboarding-guide.adoc b/smart_tests/docs/modules/resources/pages/onboarding-guide.adoc new file mode 100644 index 000000000..657ba6ef5 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/onboarding-guide.adoc @@ -0,0 +1,241 @@ +include::ROOT:partial$abbr.adoc[] + += {PRODUCT} Onboarding Guide + +Two ways to start +----------------- +1. <> – run {PRODUCT} alongside your usual CI runs for a short window to generate a Confidence Curve – this shows how much test time can be safely saved +2. <> – go live with subsetting from day 1 when you need quick wins and are willing to iterate based on real results + +== Before you start + +As you are setting up the workspace, make sure you have completed the pre-requisites - *identify a use‑case* you would use PTS for, *review technical requirements*, and *tune optional configurations* based on your needs – so you can begin smoothly. + +[NOTE] +==== +A {PRODUCT} Sales Engineer will help you evaluate which use-case best fits based on your problem statement and setup. + +Our team will also assist you in implementing best practices and any configuration setup you may need. +==== + +=== Step 1: Identify a use-case + +First of all, you need to pick a problem that you want to solve for your team by using {PRODUCT}. Pick a use-case (listed below) that resonates, and dive deeper to see if it fits your needs. + +xref:features:predictive-test-selection/use-cases-for-predictive-test-selection.adoc#use-case-1[Use-case 1 – In-place subset of a test suite] + +An existing test suite may be taking a long time to run. Some teams' capacity for executing tests is finite & limited, yet the demand to run tests is high. Even in cases where test execution capacity is scalable & elastic, the number of tests are so many that it’s costing too much money. + +To that end, the team would want to shorten the execution time of that test suite. Read more about it here. + +xref:features:predictive-test-selection/use-cases-for-predictive-test-selection.adoc#use-case-2[Use-case 2 – Shift-left test selection to find issues earlier in the pipeline] + +The feedback on changes may be coming in too late because the tests are towards the right in your delivery pipeline (e.g. UI tests, E2E, Nightly tests). These are run infrequently (for example once in 3 days). In several teams, another common challenge is that their main/dev branch is too unstable, causing QA people a lot of overhead to deal with failures. + +Read in detail about the use-cases for *Predictive Test Selection*. + +=== Step 2: Review technical requirements + +Next, it’s time to look at the must-have tooling & environment requirements from your end, under which {PRODUCT} is supported. {PRODUCT} will not run without these requirements. + +*Language support* + +- Python 3 and Pip3: {PRODUCT} CLI is a Python 3 package and you need Pip3 to install it +- Java 8+ + +*Version control system* + +- Git (we work with all popular Git systems like GitHub, Bitbucket and GitLab) +- NOTE: Git optimization tools may add additional complexity + +*Internet access* + +- {PRODUCT} is a SaaS service and the CLI needs access to the internet. +- Team enabled to edit CI script. {PRODUCT} is integrated into the CI script. In some larger teams, access to editing CI scripts is secured to another team. Ensure that as you trial {PRODUCT}, you can edit the script. + +*Supported test frameworks* + +- Here’s a list of xref:docs/modules/resources/pages/supported-test-frameworks.adoc[supported test frameworks] + +*Test results in binary (true/false)* + +- {PRODUCT} doesn’t support tests that don’t report tests in binary form (usually performance tests). + +=== Step 3: Requirements for Predictive Test Selection + +Here, we will take you through a couple of must-have requirements for running PTS. Along with that, we have also listed the xref:docs/modules/resources/pages/smart-tests-onboarding/best-practices-checklist.adoc[*best practices and a checklist*] to help your team build a mental map for PTS. + +- No inter-test dependencies. Tests need to be redefined/re-arranged. PTS re-orders tests. If tests have dependencies, they may not work as “higher” priority. +- Test framework supports test file mapping. When an external file changes, {PRODUCT} sends a subset of tests in a test file as if it runs it. In most cases, this is per line. The underlying test framework needs to support linking a test back to a file and test mappings. List of frameworks supported is given [here](#). + +[IMPORTANT] +==== +Before you proceed, make sure you have read the xref:docs/modules/resources/pages/smart-tests-onboarding/best-practices-checklist.adoc[*best practices & checklist*]. +There are several important considerations for your team here. +==== + +=== Step 4: Configuration options for your use-case + +As you think about bringing in {PRODUCT}, there are a few xref:docs/modules/resources/pages/smart-tests-onboarding/optional-configurations.adoc[*configuration topology options*] to think about. + +[NOTE] +==== +Each of these configuration options may come in handy for your team. +We advise you to make sure you have read through the configurations and have an understanding of its implementation as well. +==== + +[[observation-mode]] +== Observation mode + +*When to consider?* To quantify PTS' impact before changing your CI behaviour. It’s ideal for teams that want to build confidence, evaluate accuracy, and present measurable ROI before rollout. + +*Typical duration:* 1–2 weeks worth of test run data + +=== Step 1: CI integration & verify data flow + +Configure {PRODUCT} CLI in your CI so it can start collecting build & test run data. + +==== What to do? + +1. Setup workspace & obtain API key +2. Edit CI script to integrate {PRODUCT} CLI (refer <>) + - Install CLI & verify installation + - Record build information + - Record test session + - Record test results after test run job + +*Expected outcome:* You can view the test results in the web-app once the CI runs: + +1. Open your workspace in web-app +2. Navigate to *Test sessions* tab +3. Check your latest recorded run + +=== Step 2: Turn on Observation mode + +Enable {PRODUCT} in passive mode to analyze your tests without changing which tests actually run. + +==== What to do? + +1. Update CI script to introduce subset command (refer <>) +2. Add `--observation` option + +*Expected outcome:* You can view potential time savings in the web-app once the CI runs: + +1. Open your workspace in web-app +2. Navigate to *Predictive Test Selection → Observe* tab +3. Open your latest recorded run +4. Note the *Observation mode* tag on the detail page + +=== Step 3: Record at least 20 test runs + +Now that the CI is configured correctly in observation mode, you just need to wait for {PRODUCT} to collect enough data. We can quantitatively analyze the effectiveness of test selection with your data - this is called the *Confidence Curve*. + +The Confidence Curve helps evaluate how effective the model is at predicting failing tests against how much time can be saved for every test run. + +=== Step 4: Review Confidence Curve v2 + +Assess how much test time {PRODUCT} could safely skip based on recorded execution history. + +==== What to do? + +[NOTE] +==== +Smart Tests team will schedule a walkthrough session to evaluate the Confidence curve and interpret its results +==== + +1. Interpret the Confidence curve + - *What is it?* This curve shows how subset size (number of tests selected) changes with TTFF (Time To First Failure). It helps visualize how quickly failures are found as you increase the number of tests executed. + - *X-axis* -> TTFF, measured in %/min, indicating how quickly a failure is detected relative to total test execution time + - *Y-axis* -> Subset size, the proportion or number of tests included in the subset +2. Identify a suitable Optimization target for subsetting + +*Expected outcome:* You're able to choose a suitable Optimization target using Confidence, Time or Target to add in your subset command [e. 90% confidence, 50% target] + +=== Step 5: Remove observation flag & go live + +Start using in production with an optimization target fitting your use-case and evaluation results from the previous step. + +==== What to do? + +1. Update the CI to remove the `--observation` flag from the subset command +2. Add optimization target option to the subset command, like - `--confidence 90%`, `--target 50%`, `--time 30m` + +*Expected outcome:* Only tests provided by {PRODUCT} subset are executed when the CI runs. You can view results in the web-app: + +1. Open your workspace in web-app +2. Navigate to *Predictive Test Selection → Analyze* tab +3. Open your latest recorded run +4. Note the *Time saved* tag with rest of the details about you subset run on the detail page + - Additionally, you can also view your optimization target, tests included in the subset and the ones that were left out + +[[immediate-use]] +== Immediate use + +*When to consider?* You want immediate time savings and are comfortable iterating in production. Ideal for teams that are looking to rapidly onboard and see quick wins. + +*Typical duration:* Immediate rollout with ongoing iteration over the first few sprints. + +=== Step 1: CI integration & verify data flow + +Configure {PRODUCT} CLI in your CI so it can start collecting build & test run data. + +==== What to do? + +1. Setup workspace & obtain API key +2. Edit CI script to integrate {PRODUCT} CLI (refer <>) + - Install CLI & verify installation + - Record build information + - Record test session + - Record test results after test run job + +*Expected outcome:* You can view the test results in the web-app once the CI runs: + +1. Open your workspace in web-app +2. Navigate to *Test sessions* tab +3. Check your latest recorded run + +=== Step 2: Go live with a conservative optimization target & iterate + +Start using in production with a conservative optimization target and iterate as you go. + +==== What to do? + +1. Update CI script to introduce subset command (refer <>) +2. Add optimization target option to the subset command + - Choose a conservative target for your first few runs like - `--confidence 95%`, `--target 80%`, etc. +3. Modify the optimization target by taking feedback on first few subset runs + - You may choose to decrease confidence in favour of more time savings + - You may prefer to increase confidence in favour of more coverage + +*Expected outcome:* Only tests provided by {PRODUCT} subset are executed when the CI runs. You can view results in the web-app: + +1. Open your workspace in web-app +2. Navigate to *Predictive Test Selection → Analyze* tab +3. Open your latest recorded run +4. Note the *Time saved* tag with rest of the details about you subset run on the detail page + - Additionally, you can also view your optimization target, tests included in the subset and the ones that were left out + +[[ci-integration-blueprint]] +== CI integration blueprint + +[source,yaml] +---- +# install, authenticate & verify +uv tool install smart-tests +export SMART_TESTS_TOKEN=your_API_key +smart-tests verify + +# record build +smart-tests record build --build BUILD_NAME [OPTIONS] + +# record session +smart-tests record session --build BUILD_NAME --session SESSION_NAME [OPTIONS] + +# subset +smart-tests subset -- confidence 90% --session SESSION_NAME [OPTIONS] + +<< run tests >> + +# record tests +smart-tests record tests --session SESSION_NAME [OPTIONS] +---- diff --git a/smart_tests/docs/modules/resources/pages/onboarding-guide/best-practices-checklist.adoc b/smart_tests/docs/modules/resources/pages/onboarding-guide/best-practices-checklist.adoc new file mode 100644 index 000000000..5a859bf4b --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/onboarding-guide/best-practices-checklist.adoc @@ -0,0 +1,31 @@ += PTS - Best Practices & Checklist + +Here are a few practices that we recommend for your team. As you start using *Predictive Test Selection* for subsetting your test runs – they will come in handy. + +[IMPORTANT] +==== +Best practices to remember before setting PTS. + +* *Failure rate of tests:* + A failure rate of *5–20%* yields the most useful confidence curve. However, the model can adjust both ways: + - If failures are too few, the team faces the challenge of identifying rare issues, which the model addresses through intelligent test selection. With some modifications, you can also ensure all tests eventually run across subsets. + - If failures are too high, the model still optimizes by reducing the number of tests executed, and issue grouping helps minimize the cognitive load during triage. However, in this scenario, the confidence curve becomes less informative. + +* *Tests organized in groups or sub-suites:* + Depending on how tests are organized and run, the instrumentation might be slightly different. + See link:#separating-test-suites[Separating out test suites]. + +* *Flakiness:* + Our model is trained to handle a typical degree of flakiness, but if flakiness is your number one problem, we should chat about it! +==== + +[NOTE] +==== +A short checklist as you think about your test suite. This helps you assess what/where the impact will be. + +1. *What kind of tests are these?* (unit tests, integration tests, E2E tests. You might have your own terminology; that’s fine!) You typically provide the test suite name as an option in the CLI. +2. *When do you run in your software development lifecycle?* (on every git push, when a developer opens a PR, after a git merge, on a schedule (hourly, nightly...), triggered manually (by whom?)) +3. *How many times a day/week/month does this suite run?* +4. *How long does this suite take to complete, usually?* +5. *Are tests run in multiple environments?* (if environment, e.g. chrome vs safari etc., is important, then that information will be passed using the `--flavor` option) +==== diff --git a/smart_tests/docs/modules/resources/pages/onboarding-guide/optional-configurations.adoc b/smart_tests/docs/modules/resources/pages/onboarding-guide/optional-configurations.adoc new file mode 100644 index 000000000..c730f9502 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/onboarding-guide/optional-configurations.adoc @@ -0,0 +1,105 @@ += Optional configurations for your use-case + +As you think about bringing in {PRODUCT}, there are a few configuration topology options to think about. + +== Simple configurations + +=== Attach log files for a test session run + +{PRODUCT} uses log files from test runs to further analyze test results and help diagnose failures. + +*Implementation:* +Logs produced during test sessions can be submitted as attachments. + +link:#how-the-command-works[Here’s how the command works.] + +*Value unlocked:* +- Issues dashboard (identifies new, resolved, and ongoing issues across multiple runs) + +--- + +=== Single source repository (correlate commit data with test failures for Predictive Test Selection) + +If you plan to use PTS, sending build information is critical to correlate commit data with test failures. + +*Implementation:* +To record a build, run `{PRODUCT} record build` before creating a build in your CI script. + +link:#how-the-command-works[Here’s how the command works.] + +*Value unlocked:* +- Predictive Test Selection factors in commit information to predict what tests to run. + +--- + +=== Same test suite runs in multiple different environments + +For cases where your test results depend on the browser or runtime environment. + +*Implementation:* +When submitting test results using `{PRODUCT} record tests`, you can submit additional metadata as key-value pairs using the `--flavor` option. + +link:#how-the-command-works[Here’s how the command works] + +*Value unlocked:* +- Predictive Test Selection can find tailored tests to run for specific environments. + +--- + +=== Multiple test suites run against the same build sent over to {PRODUCT} + +If different test suites are run against the same build, separate them into distinct “test suites” to improve data analysis across the system. + +*Implementation:* +When invoking the `{PRODUCT} record tests` command, specify the `--test-suite` option and assign it a test suite name. + +link:#how-the-command-works[Here’s how the command works] + +*Value unlocked:* +- Improved quality of data analysis through the platform +- UI allows drill-down by test suite for detailed analysis + +--- + +=== Multiple repositories combined in one build, then tested + +If builds are created by combining code from multiple repositories, this setup applies. + +*Implementation:* +Invoke `{PRODUCT} record build` with multiple `--source` options to denote them. + +link:#how-the-command-works[Here’s how the command works] + +*Value unlocked:* +- Enables multi-repo data correlation within a single build. + +--- + +== Complex configurations + +=== Build and test processes happen on different machines + +*Implementation:* +You’ll need to manually create a test session ID to run tests. + +link:#help-doc[Read this help doc further to understand setup] + +--- + +=== Combining test reports from multiple runs + +*Implementation:* +Some pipelines execute multiple test runs against a build, outputting separate reports across several machines. Depending on your setup (see link:#test-session[Test Session]), you may want to merge these into a single test session. + +link:#help-doc[Read this help doc further to understand setup] + +--- + +=== Multiple repositories built/deployed separately then tested together (e.g. micro-services) + +*Implementation:* +Some teams run regression tests against environments where multiple services have been deployed. Each service is built from code in its own repository (or set of repositories). + +link:#help-doc[Read this help doc further to understand setup] + +--- diff --git a/smart_tests/docs/modules/resources/pages/policies.adoc b/smart_tests/docs/modules/resources/pages/policies.adoc new file mode 100644 index 000000000..d6165f43a --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/policies.adoc @@ -0,0 +1,3 @@ += Policies +:slug: policies + diff --git a/smart_tests/docs/modules/resources/pages/policies/data-examples.adoc b/smart_tests/docs/modules/resources/pages/policies/data-examples.adoc new file mode 100644 index 000000000..11c41fc6b --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/policies/data-examples.adoc @@ -0,0 +1,308 @@ += Data examples +:slug: data-examples + +This page provides examples of the various data the {PRODUCT} CLI sends to the {PRODUCT} service. + +You can use the `--dry-run` global option to preview what data _would_ be passed in a real request. You can also use the `--log-level audit` global option when you invoke the CLI to view exactly what data was passed in the request. See the xref:resources:cli-reference.adoc[CLI reference] for more info about both of these options. + +== Recording commits + +e.g. `smart-tests record commit` + +`POST` body sent to the Launchable API: + +[source] +---- +{ + "commits" : [ { + "commitHash" : "d7159e2418dae84cb36d3de50cc792eb952c7df5", + "authorEmailAddress" : "994a70d1ac542e847f24abfcbe05e68926c281c81cd13c704964800d58d022e3", + "authorWhen" : 1763503839000, + "authorTimezoneOffset" : -480, + "committerEmailAddress" : "3c205d8fc749f72977b9331e3179773c315bb1f4860c366de2abe9ec9337730b", + "committerWhen" : 1763503839000, + "committerTimezoneOffset" : -480, + "message" : "", + "shallow" : false, + "parentHashes" : { + "55f48c3a472ca1a17e69180e9733f288f4e1d84e" : [ ], + "6ba6000a7c61788be8fa263bdd95b4818ff6b3f9" : [ { + "linesAdded" : 164, + "linesDeleted" : 164, + "status" : "ADD", + "path" : "/dev/null", + "pathTo" : "smart_tests/test_runners/jasmine.py" + }, { + "linesAdded" : 159, + "linesDeleted" : 159, + "status" : "ADD", + "path" : "/dev/null", + "pathTo" : "tests/data/jasmine/jasmine-test-results.json" + }, { + "linesAdded" : 81, + "linesDeleted" : 81, + "status" : "ADD", + "path" : "/dev/null", + "pathTo" : "tests/data/jasmine/record_test_result.json" + }, { + "linesAdded" : 16, + "linesDeleted" : 16, + "status" : "ADD", + "path" : "/dev/null", + "pathTo" : "tests/data/jasmine/subset_payload.json" + }, { + "linesAdded" : 27, + "linesDeleted" : 27, + "status" : "ADD", + "path" : "/dev/null", + "pathTo" : "tests/test_runners/test_jasmine.py" + } ] + } + } ] + { + "tree": [ + { + "blob": "761b6995de8fa459197c26a17afe6829203e5aa9", + "path": "smart_tests/args4p/option.py" + }, + { + "blob": "25d94e464ed17da4069f93c5568f7ac4f9daac7d", + "path": "smart_tests/args4p/parameter.py" + }, + ... + { + "blob": "4a4903634a495f2309e6ab8d999ce1aa1b154f0c", + "path": "tests/utils/test_git_log_parser.py" + }, + { + "blob": "1f5c4229abfaaed01f4340df0af9c6fd4e87edae", + "path": "tests/utils/test_glob.py" + } + ] +} +---- + +== Recording builds + +e.g. `smart-tests record session --build example-build` + +`POST` body sent to the {PRODUCT} API: + +[source] +---- +{ + "buildNumber": "example-build", + "lineage": "main", + "commitHashes": [ + { + "repositoryName": ".", + "commitHash": "394e9af90cd130409f4c2fa48456d8d790149149", + "branchName": "main" + } + ], + "links": [], + "timestamp": null +} +---- + +== Recording sessions + +e.g. `smart-tests record session --build --test-suite=unut-test --flavor os=ubuntu=latest` + +`POST` body sent to the {PRODUCT} API: + +[source] +---- +{ + "flavors": { + "os": "ubuntu-latest" + }, + "isObservation": false, + "noBuild": false, + "testSuite": "unit-test", + "timestamp": null, + "links": [] +} +---- + +== Subsetting tests + +e.g. `find ./tests/**/*_test.py | smart-tests subset --session $(cat session.txt) file > subset.txt` + +`POST` body sent to the {PRODUCT} API: + +[source] +---- +{ + "testPaths": [ + [ + { + "type": "file", + "name": "tests/__init__.py" + } + ], + [ + { + "type": "file", + "name": "tests/args4p/__init__.py" + } + ] + ... + ], + "testRunner": "file", + "session": { + "id": "411" + }, + "ignoreNewTests": false, + "getTestsFromPreviousSessions": false, + "getTestsFromGuess": false, + "useServerSideOptimizationTarget": true, + "changesUnderTest": "one-commit" +} +---- + +== Recording test results + +e.g. `smart-tests reocrd tests --session $(cat session.txt) file test-results/*.xml ` + +`POST` body sent to the {PRODUCT} API: + +[source] +---- +{ + "events": [ + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "tests/commands/compare/test_subsets.py" + }, + { + "type": "testsuite", + "name": "tests.commands.compare.test_subsets.SubsetsTest" + }, + { + "type": "testcase", + "name": "test_subsets" + } + ], + "duration": 0.011, + "status": 1, + "stdout": "", + "stderr": "", + "createdAt": "2025-10-22T15:58:36+09:00", + "data": { + "lineNumber": 9 + } + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "tests/commands/compare/test_subsets.py" + }, + { + "type": "testsuite", + "name": "tests.commands.compare.test_subsets.SubsetsTest" + }, + { + "type": "testcase", + "name": "test_subsets_when_deleted_tests" + } + ], + "duration": 0.01, + "status": 1, + "stdout": "", + "stderr": "", + "createdAt": "2025-10-22T15:58:36+09:00", + "data": { + "lineNumber": 103 + } + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "tests/commands/compare/test_subsets.py" + }, + { + "type": "testsuite", + "name": "tests.commands.compare.test_subsets.SubsetsTest" + }, + { + "type": "testcase", + "name": "test_subsets_when_new_tests" + } + ], + "duration": 0.01, + "status": 1, + "stdout": "", + "stderr": "", + "createdAt": "2025-10-22T15:58:36+09:00", + "data": { + "lineNumber": 55 + } + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "tests/commands/record/test_attachment.py" + }, + { + "type": "testsuite", + "name": "tests.commands.record.test_attachment.AttachmentTest" + }, + { + "type": "testcase", + "name": "test_attachment" + } + ], + "duration": 0.041, + "status": 1, + "stdout": "", + "stderr": "", + "createdAt": "2025-10-22T15:58:36+09:00", + "data": { + "lineNumber": 13 + } + }, + { + "type": "case", + "testPath": [ + { + "type": "file", + "name": "tests/commands/record/test_build.py" + }, + { + "type": "testsuite", + "name": "tests.commands.record.test_build.BuildTest" + }, + { + "type": "testcase", + "name": "test_build_name_validation" + } + ], + "duration": 0.019, + "status": 1, + "stdout": "", + "stderr": "", + "createdAt": "2025-10-22T15:59:01+09:00", + "data": { + "lineNumber": 302 + } + }, + ... + ], + "testRunner": "file", + "group": "", + "metadata": {}, + "noBuild": false, + "testSuite": "", + "flavors": {} +} +---- diff --git a/smart_tests/docs/modules/resources/pages/policies/data-privacy-and-protection.adoc b/smart_tests/docs/modules/resources/pages/policies/data-privacy-and-protection.adoc new file mode 100644 index 000000000..d6cc1d049 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/policies/data-privacy-and-protection.adoc @@ -0,0 +1,87 @@ += Data privacy and protection +:slug: data-privacy-and-protection + +[NOTE] +-- +Your data is our highest priority +-- + +== Purpose and use of collected information + +{PRODUCT}’s predictive test selection service learns the relationship between code changes and the test cases those changes impact. + +=== Does {PRODUCT} use personal information for any purpose outside of providing the services? + +No. + +=== Does {PRODUCT} use any anonymized or aggregate data for any independent purpose outside of providing the services? + +No. + +== Specifics on the data sent to {PRODUCT} + +=== Does {PRODUCT} need access to my source code? + +Yes, with Predictive Test Selection (PTS) v2, {PRODUCT} temporarily processes the content of your source code files to generate embeddings — vectorized representations that allow our AI models to understand and analyze code and test file relationships more effectively. + +Here’s what that means in practice: + +- The full source code content is sent securely to OpenAI to create these embeddings +- The raw source code itself is not stored or retained by {PRODUCT} or OpenAI after the embeddings are created +- Only the embeddings are stored in the {PRODUCT} system +- These are mathematical representations of the repo's file structure and content — they cannot be used to reconstruct the original source code + +=== What data is sent to {PRODUCT}? + +The key inputs to enable Predictive Test Selection (PTS) and AI-powered insights are: + +* *Source code files* — The full content of source code files is temporarily processed to generate *embeddings*, which are mathematical representations of your code. +** These embeddings allow our AI models to understand the relationships between code and tests. +** The raw source code itself is *not stored* by {PRODUCT} or OpenAI after embeddings are generated. +** Only the embeddings — which cannot be reverse-engineered into your original source — are retained for analysis and prediction. + +* *Metadata about the code changes being tested*, including: +** Names and paths of files added, removed, or modified +** Number of lines changed +** Git commit hashes +** Git author details (hashed using SHA-256) + +* *Metadata about the test cases that were run*, including: +** Names and paths of test files and test cases +** Pass/fail/skipped status of each test case +** Duration of each test case +** Associations between test cases and test suites (for example, `unit`, `integration`) + +== Data storage and retention + +=== Does {PRODUCT} encrypt personal information? + +We encrypt data in transit and at rest. + +=== How does {PRODUCT} store customer data? + +{PRODUCT} is a multi-tenant SaaS product. Each customer’s data is kept separate from each other. + +=== Where is the customer data stored specifically? + +{PRODUCT} is hosted on AWS' US-West region. + +=== How long is customer data retained by {PRODUCT}? Will customer data be deleted or returned at the end of the engagement? + +The customer has the option to have their data deleted. We will delete data based on a customer request to do so. + +== Removing personal information from {PRODUCT} + +{PRODUCT} stores user email addresses for the purpose of authentication. + +=== Can {PRODUCT} Support provide customer data in a readable and easily transferable format when required by the customer? + +Yes. A customer has to contact support to request this information. + +=== Does {PRODUCT} delete an individual's information for removal? + +Since the service needs user email addresses to provide login functionality, we require you to unsubscribe from the service to delete this data. + +=== Can {PRODUCT} stop processing personal information when requested? + +Since the service needs user email addresses to provide login functionality, we require you to unsubscribe from the service to delete this data. diff --git a/smart_tests/docs/modules/resources/pages/policies/security-policies.adoc b/smart_tests/docs/modules/resources/pages/policies/security-policies.adoc new file mode 100644 index 000000000..7f3eaa6e0 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/policies/security-policies.adoc @@ -0,0 +1,68 @@ += Security policies +:slug: security-policies + +{PRODUCT} is a multi-tenant SaaS. Data is sent from a customer's site to {PRODUCT}. +This document covers frequently asked questions related to information security (infosec). + +== Frequently asked questions + +=== Does {PRODUCT} maintain a written security plan or other security governance? + +Yes, we do. We can furnish details upon request. + +=== Does {PRODUCT} hold any security certifications? + +{PRODUCT} is SOC 2 Type 1 compliant as of August 2023. + +We also utilize AWS as our cloud provider, who maintains SOC 2 Type 2, ISO 27001, and ISO 27017. + +=== Is access to personal information limited only to those individuals who need access to the information to perform the services? + +Yes. Access controls are specifically granted only to individuals whose job function requires it. + +=== Where do you run your AI models? +We currently use OpenAI as our LLM provider. Their privacy policy and data protection practices are in line with ours. +OpenAI’s models are used to summarize test failures, group failures together, and for some customers, to select tests that are relevant to code changes. + +We also train and run various regression models on our own compute infrastructure, which currently operates on AWS. + +=== Are there logging capabilities within {PRODUCT} that capture all AI-related events? +{PRODUCT} includes robust AI event logging and traceability capabilities. +We are continuously innovating to incorporate the latest standards related to AI logging and telemetry. + +=== Do we provide human oversight and validation of the results that AI models are accurate? +Yes. {PRODUCT} employs a *human-in-the-loop* model for oversight and validation. +Our engineers and QA teams review model outputs and recommendations before deployment to production. + +=== Are there processes to minimize hallucination? +Yes. {PRODUCT} implements multiple layers of safeguards to minimize hallucination and non-deterministic behavior. + +=== Is there a summative description of all information used to develop, train, validate, test, or improve the AI System? +Yes. CloudBees maintains documentation describing the datasets, data sources, and usage context for AI model development and evaluation. + +*Links:* + +* link:https://example.com/data-examples[Data examples] +* link:https://example.com/data-privacy-protection[Data Privacy & Protection] + +=== Are inputs into the AI model validated? +Yes. Inputs into {PRODUCT} AI models undergo *multi-stage validation* before processing. +We are continuously enhancing safeguards—following industry best practices—for AI models before public deployment. + +=== Does the AI System provide an explanation of the prediction, recommendation, or decision generated by such AI System as an output? +Where relevant and applicable, test execution history, titles, and logs explain or inform recommendations generated. +Additionally, we are continuously evaluating product feedback to provide more helpful tips and explanations to end users. + +=== Do you maintain a DPIA covering your AI/LLM processing activities? Are there any internal mechanisms to monitor bias, transparency, or explainability of your AI models? +Given that our AI processing does not involve high risk to the rights and freedom of individuals and limited personal data, we do not maintain a DPIA as such. +However, upon request, we can complete one. +Yes, we have manual checks such as linters, code runs, and other algorithms. + +=== You mention that OpenAI is used as a subprocesser. Please confirm whether SCCs are in place for transfers to the US and whether a TIA has been completed for OpenAI. +SCCs are included within OpenAI’s link:https://openai.com/policies/data-processing-addendum[Data Processing Agreement], which {CB} has agreed to. + +=== Are all AI-related vendors listed in your subprocessors list? +All AI-related vendors that process personal data and are relevant to the services provided are listed within our Trust Center. + +=== Are OpenAI or other AI subprocessors covered under your SOC 2 / ISO 27001 certification scopes? +Yes. ISO 27001 contains specific third-party security controls, such as annual security assessments and access reviews, which apply to OpenAI and other subprocessors within this scope. diff --git a/smart_tests/docs/modules/resources/pages/supported-languages.adoc b/smart_tests/docs/modules/resources/pages/supported-languages.adoc new file mode 100644 index 000000000..f9c4b4ccb --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages.adoc @@ -0,0 +1,15 @@ += Supported languages +:slug: supported-languages + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in various language ecosystems, including the list below. + +*However, {PRODUCT} is not a language-specific tool! It can work with any language.* + +* xref:resources:supported-languages/c-plus-plus.adoc[C++] +* xref:resources:supported-languages/dotnet.adoc[.NET] +* xref:resources:supported-languages/go.adoc[Go] +* xref:resources:supported-languages/java.adoc[Java] +* xref:resources:supported-languages/javascript.adoc[JavaScript] +* xref:resources:supported-languages/perl.adoc[Perl] +* xref:resources:supported-languages/python.adoc[Python] +* xref:resources:supported-languages/ruby.adoc[Ruby] \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/c-plus-plus.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/c-plus-plus.adoc new file mode 100644 index 000000000..63aa91ec8 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/c-plus-plus.adoc @@ -0,0 +1,11 @@ += C++ +:slug: c-plus-plus + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the C++ ecosystem: + +* xref:resources:integrations/bazel.adoc[Bazel] +* xref:resources:integrations/ctest.adoc[CTest] +* xref:resources:integrations/googletest.adoc[GoogleTest] +* xref:resources:integrations/gradle.adoc[Gradle] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/dotnet.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/dotnet.adoc new file mode 100644 index 000000000..270fad9ce --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/dotnet.adoc @@ -0,0 +1,8 @@ += .NET +:slug: dotnet + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the .NET ecosystem: + +* xref:resources:integrations/dotnet-test.adoc[dotnet test] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/go.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/go.adoc new file mode 100644 index 000000000..f1bd945c6 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/go.adoc @@ -0,0 +1,9 @@ += Go +:slug: go + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the Golang ecosystem: + +* xref:resources:integrations/bazel.adoc[Bazel] +* xref:resources:integrations/go-test.adoc[Go Test] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/java.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/java.adoc new file mode 100644 index 000000000..3298eb5bb --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/java.adoc @@ -0,0 +1,11 @@ += Java +:slug: java + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the Java/JVM ecosystem: + +* xref:resources:integrations/ant.adoc[Ant] +* xref:resources:integrations/bazel.adoc[Bazel] +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:integrations/maven.adoc[Maven] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/javascript.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/javascript.adoc new file mode 100644 index 000000000..f736d8fec --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/javascript.adoc @@ -0,0 +1,10 @@ += JavaScript +:slug: javascript + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the JavaScript ecosystem: + +* xref:resources:integrations/cypress.adoc[Cypress] +* xref:resources:integrations/jest.adoc[Jest] +* xref:resources:integrations/karma.adoc[Karma] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/perl.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/perl.adoc new file mode 100644 index 000000000..22f8ba7d3 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/perl.adoc @@ -0,0 +1,8 @@ += Perl +:slug: perl + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the Perl ecosystem: + +* xref:resources:integrations/prove.adoc[prove for Perl] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/python.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/python.adoc new file mode 100644 index 000000000..6989b63f2 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/python.adoc @@ -0,0 +1,8 @@ += Python +:slug: python + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the Python ecosystem: + +* xref:resources:integrations/nose.adoc[nose (Integration)] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-languages/ruby.adoc b/smart_tests/docs/modules/resources/pages/supported-languages/ruby.adoc new file mode 100644 index 000000000..ec5528517 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-languages/ruby.adoc @@ -0,0 +1,9 @@ += Ruby +:slug: ruby + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that are popular in the Ruby ecosystem: + +* xref:resources:integrations/minitest.adoc[minitest] +* xref:resources:integrations/rspec.adoc[RSpec] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks.adoc new file mode 100644 index 000000000..89a0dbff4 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks.adoc @@ -0,0 +1,20 @@ += Supported test frameworks +:slug: supported-test-frameworks + +The {PRODUCT} CLI includes built-in integrations for test runners and build tools that execute tests built using various frameworks, including: + +* xref:resources:supported-test-frameworks/appium.adoc[Appium] +* xref:resources:supported-test-frameworks/cucumber.adoc[Cucumber] +* xref:resources:supported-test-frameworks/junit.adoc[JUnit] +* xref:resources:integrations/googletest.adoc[GoogleTest] +* xref:resources:integrations/jest.adoc[Jest] +* xref:resources:integrations/minitest.adoc[minitest] +* xref:resources:supported-test-frameworks/nose.adoc[nose] +* xref:resources:supported-test-frameworks/nunit.adoc[NUnit] +* xref:resources:integrations/prove.adoc[prove for Perl] +* xref:resources:integrations/robot.adoc[Robot] +* xref:resources:integrations/rspec.adoc[RSpec] +* xref:resources:supported-test-frameworks/selenium.adoc[Selenium] +* xref:resources:supported-test-frameworks/testng.adoc[TestNG] + +Note that the primary integration point for {PRODUCT} is your team's build tool or test runner (i.e. whatever CLI you invoke to actually kick off your tests), not the test framework itself. However, some frameworks have their own test runners. View the full list at xref:resources:integrations.adoc[Integrations] . \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/appium.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/appium.adoc new file mode 100644 index 000000000..635c53626 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/appium.adoc @@ -0,0 +1,11 @@ += Appium +:slug: appium + +The primary integration point for {PRODUCT} is your team's build tool or test runner (i.e. whatever CLI you invoke to actually kick off your tests), not the test framework itself. Therefore, if you use the Appium framework, check which tool you use to kick off your tests, such as: + +* xref:resources:integrations/adb.adoc[Android Debug Bridge (adb)] +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:integrations/maven.adoc[Maven] +* xref:resources:integrations/pytest.adoc[pytest] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/cucumber.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/cucumber.adoc new file mode 100644 index 000000000..b293eb80c --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/cucumber.adoc @@ -0,0 +1,4 @@ += Cucumber +:slug: cucumber + +The Cucumber framework has its own test runner. Check out the xref:resources:integrations/cucumber.adoc[cucumber] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/googletest.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/googletest.adoc new file mode 100644 index 000000000..a34d64f53 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/googletest.adoc @@ -0,0 +1,4 @@ += GoogleTest +:slug: googletest + +The GoogleTest framework has its own test runner. Check out the xref:resources:integrations/googletest.adoc[GoogleTest] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/jest.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/jest.adoc new file mode 100644 index 000000000..60b2f6391 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/jest.adoc @@ -0,0 +1,4 @@ += Jest +:slug: jest + +The Jest framework has its own test runner. Check out the xref:resources:integrations/jest.adoc[Jest] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/junit.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/junit.adoc new file mode 100644 index 000000000..d5b0e1bbd --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/junit.adoc @@ -0,0 +1,9 @@ += JUnit +:slug: junit + +The primary integration point for {PRODUCT} is your team's build tool or test runner (i.e. whatever CLI you invoke to actually kick off your tests), not the test framework itself. Therefore, if you use the JUnit framework, check which tool you use to kick off your tests, such as: + +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:integrations/maven.adoc[Maven] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/minitest.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/minitest.adoc new file mode 100644 index 000000000..7c77857b6 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/minitest.adoc @@ -0,0 +1,4 @@ += minitest +:slug: minitest + +The minitest framework has its own test runner. Check out the xref:resources:integrations/minitest.adoc[minitest] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nose.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nose.adoc new file mode 100644 index 000000000..b514ac1b0 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nose.adoc @@ -0,0 +1,4 @@ += nose +:slug: nose + +The nose framework has its own test runner. Check out the xref:resources:integrations/nose.adoc[nose (Integration)] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nunit.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nunit.adoc new file mode 100644 index 000000000..25af71f92 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/nunit.adoc @@ -0,0 +1,4 @@ += NUnit +:slug: nunit + +The nunit framework has its own test runner. Check out the xref:resources:integrations/dotnet-test.adoc[dotnet test] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/prove.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/prove.adoc new file mode 100644 index 000000000..77ab40510 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/prove.adoc @@ -0,0 +1,4 @@ += prove for Perl +:slug: prove + +The prove has its own test runner. Check out the xref:resources:integrations/prove.adoc[prove for Perl] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/robot.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/robot.adoc new file mode 100644 index 000000000..5b5b626cb --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/robot.adoc @@ -0,0 +1,4 @@ += Robot +:slug: robot + +The Robot framework has its own test runner. Check out the xref:resources:integrations/robot.adoc[Robot] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/rspec.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/rspec.adoc new file mode 100644 index 000000000..edf3066f1 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/rspec.adoc @@ -0,0 +1,4 @@ += RSpec +:slug: rspec + +The RSpec framework has its own test runner. Check out the xref:resources:integrations/rspec.adoc[RSpec] page for more info. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/selenium.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/selenium.adoc new file mode 100644 index 000000000..bb91cc917 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/selenium.adoc @@ -0,0 +1,11 @@ += Selenium +:slug: selenium + +The primary integration point for {PRODUCT} is your team's build tool or test runner (i.e. whatever CLI you invoke to actually kick off your tests), not the test framework itself. Therefore, if you use the Selenium framework, check which tool you use to kick off your tests, such as: + +* xref:resources:integrations/ant.adoc[Ant] +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:integrations/maven.adoc[Maven] +* xref:resources:integrations/pytest.adoc[pytest] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/supported-test-frameworks/testng.adoc b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/testng.adoc new file mode 100644 index 000000000..b0b9d3641 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/supported-test-frameworks/testng.adoc @@ -0,0 +1,9 @@ += TestNG +:slug: testng + +The primary integration point for {PRODUCT} is your team's build tool or test runner (i.e. whatever CLI you invoke to actually kick off your tests), not the test framework itself. Therefore, if you use the TestNG framework, check which tool you use to kick off your tests, such as: + +* xref:resources:integrations/gradle.adoc[Gradle] +* xref:resources:integrations/maven.adoc[Maven] + +See each integration's page for instructions to add {PRODUCT} to your CI pipeline. \ No newline at end of file diff --git a/smart_tests/docs/modules/resources/pages/troubleshooting.adoc b/smart_tests/docs/modules/resources/pages/troubleshooting.adoc new file mode 100644 index 000000000..397c0c138 --- /dev/null +++ b/smart_tests/docs/modules/resources/pages/troubleshooting.adoc @@ -0,0 +1,75 @@ += Troubleshooting +:slug: troubleshooting + +== "smart-tests verify" failure + +=== Firewalls and static IP addresses + +If you receive an error like this one, then you will need to configure your firewall to allow traffic to `+api-static.mercury.launchableinc.com+` : + +[source] +---- +$ smart-tests verify +unable to post to https://api.mercury.launchableinc.com/... + +$ smart-tests record build +... +Exception in thread "main" java.net.UnknownHostException: api.mercury.launchableinc.com: No address associated with hostname +---- + +If you need to interact with the API via static IPs, first set the `SMART_TESTS_BASE_URL` environment variable to `+https://api-static.mercury.launchableinc.com+` . + +The IP for this hostname will be either `13.248.185.38` or `76.223.54.162` which you can add to your firewall settings. + +=== Proxies + +If your CI server sits behind a proxy, you can tell the CLI to use it by setting the `HTTP_PROXY` and/or `HTTPS_PROXY` environment variables. For example: + +[source] +---- +export HTTP_PROXY="http://10.10.1.10:3128" +export HTTPS_PROXY="http://10.10.1.10:1080" +---- + +=== SSL certificate verification error + +If you get an error like this: + +`SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate ` + +It is an indication that your system is lacking the root CA certificates. See the https://requests.readthedocs.io/en/master/user/advanced/#proxies[documentation for Requests], which the CLI uses under the hood, for how it looks up certificates. Also check out Stack Overflow posts like https://stackoverflow.com/questions/50422136/python-requests-with-wincertstore/57053415[this] where people discusses various remedies. + +If all else fails, use the `--skip-cert-verification` option of the {PRODUCT} CLI to altogether bypass the SSL certificate check process. This means you are susceptible to MITM attacks, so use it with caution. + +== Missing Git object during commit collection + +During `record build` or `record commit` , you might encounter errors like this: + +`Warning: 5730667 is missing. Skipping diff calculation for f2244ec -> 20630e5.` + +This warning indicates that a git object (typically a "blob", which stores a file) is missing from the local clone, and that prevented us from calculating a diff between the two commits listed. This is typically a result of https://git-scm.com/docs/shallow[Git shallow clone] or https://git-scm.com/docs/partial-clone[Git partial clone] . + +{PRODUCT} will continue to function without this information, hence this is a warning; Predictive test selection will run with limited set of information, which will hurt the performance by unknown amount. If you are only seeing this sporadically, we suggest you simply ignore this warning. + +Getting rid of this warning involves tweaking the `git-clone` or `git-fetch` operations to ensure enough of the relevant Git objects are made available locally. For example, + +* CI systems often use `--depth 1` to just fetch the very latest commit and that alone. Increase this number should help, say `--depth 32` +* `GitHub Action actions/checkout uses --filter=blob:none` when you choose to do a sparse checkout. Use `filter: none` to override this. + +== Recording branch of build + +During `record build` , {PRODUCT} tries to automatically determine the Git branch being built. However, this can fail, depending on how you check out the code. When this happens, our webapp will ask you to configure this explicitly. + +If your build involves just one repository, you can do this by using the `--branch NAME` option: + +`smart-tests record build --build $BUILD_NAME --source . --branch $GIT_BRANCH` + +If you have multiple repositories, you have to use the `--branch REPONAME=BRANCHNAME` format: + +[source] +---- +smart-tests record build --build $BUILD_NAME \ + --source product=. --source test=../test --branch product=$GIT_BRANCH +---- + +You can specify branches to every repositories in this manner, but the first repository is the most important, as that is used as the branch of this build as a whole. We recommend choosing the primary repository as. the first repository for this reason. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/getting-started.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/getting-started.adoc new file mode 100644 index 000000000..90ba6db23 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/getting-started.adoc @@ -0,0 +1,147 @@ += Getting started +include::ROOT:partial$abbr.adoc[] +:slug: getting-started + +[#create-account-and-workspace] +== Create your account and workspace + +After creating a user account and verifying your email address (if necessary), the system will prompt you to create an xref:concepts:organization.adoc[Organization] for your company and a xref:concepts:workspace.adoc[Workspace] for your test suite. + +[#create-and-set-your-api-key] +== Create and set your API key + +This authentication token lets the CLI talk to your {PRODUCT} workspace. Create an API key by doing the following: + +. Navigate to menu:Settings[]. +. Select btn:[Create your API key] and copy the generated key. + ++ +.Settings tab in {PRODUCT} dashboard +image::ROOT:sending-data/create-api-key.png[create-api-key,role="screenshot"] ++ +[NOTE] +-- +Make sure to copy the API key somewhere secure, as you won't be able to see it again after you close the dialog. +-- ++ +. Make that API key available as the `SMART_TESTS_TOKEN` environment variable in your CI process. This allows the CLI to authenticate with your {PRODUCT} workspace when you run it in your CI pipeline. ++ +[TIP] +-- +If using a different CI system, check its documentation for a section about setting environment variables. {PRODUCT} works with any CI system. +-- ++ +.CI System documentation for setting environment variables +[cols="1,2", options="header"] + +|=== +| CI system | Docs +| Azure DevOps Pipelines | link:https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables[Azure DevOps Pipelines] +| Bitbucket Pipelines | link:https://support.atlassian.com/bitbucket-cloud/docs/variables-and-secrets/[Bitbucket Pipelines] +| CircleCI | link:https://circleci.com/docs/guides/security/env-vars//[CircleCI] +| GitHub Actions | link:https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets[GitHub Actions] +| GitLab CI | link:https://docs.gitlab.com/ee/ci/variables/[GitLab CI] +| GoCD | link:https://docs.gocd.org/current/faq/dev_use_current_revision_in_build.html#setting-variables-on-an-environment[GoCD] + +| Jenkins +|link:https://docs.cloudbees.com/docs/cloudbees-ci/latest/cloud-secure-guide/injecting-secrets[{JOSS}] + +NOTE: Create a global secret text in Jenkins and reference it as an environment variable in your pipeline. For more information, refer to the link above. + + +| Travis CI | link:https://docs.travis-ci.com/user/environment-variables/[Travis CI] +|=== + +[#install-the-cli] +== Install the {PRODUCT} CLI in your CI pipeline + +The {PRODUCT} CLI is a Python3 package that you can install from link:https://pypi.org/project/smart-tests/[PyPI]. The CLI connects your build tool/test runner to {PRODUCT}. + +=== Recommended: Use uv (fastest) + +{PRODUCT} recommends using link:https://docs.astral.sh/uv/[uv], a fast Python package installer, for the best installation experience. + +. Install uv: ++ +[source,bash] +---- +curl -LsSf https://astral.sh/uv/install.sh | sh +export PATH="$HOME/.local/bin:$PATH" +---- ++ +For more information about installation methods (including Windows, GitHub Actions, etc.), refer to link:https://docs.astral.sh/uv/getting-started/installation/[uv installation guide]. ++ +. Install {PRODUCT} CLI: ++ +[source,bash] +---- +uv tool install smart-tests-cli~=2.0 +---- + +=== Alternative: Use pip + +You can also install the CLI using pip by adding the following command to the part of your CI script where you install dependencies. +[IMPORTANT] +-- +If your build and test process is split into different pipelines or machines, you must do this in both places. +-- +---- +pip3 install --user --upgrade smart-tests-cli +---- + +[#verify-connectivity] +== Verify connectivity + +After setting your API key and installing the CLI, add `smart-tests verify || true` to your CI script to verify that everything is set up correctly. If successful, you should receive an output such as: + +[source] +---- +# We recommend including `|| true` after smart-tests verify so that the exit status from the command is always 0 +$ smart-tests verify || true + +Organization: +Workspace: +Platform: +Python version: +Java command: +smart-tests version: +Your CLI configuration is successfully verified. +---- + +For more information on errors, refer to xref:resources:troubleshooting.adoc[Troubleshooting]. + +Once all of the above steps are complete, you can start sending data to {PRODUCT} and using its features! + +[#cli-execution-order] +== CLI execution order + +The following is the {PRODUCT} CLI execution order, with corresponding commands. CloudBees recommends these commands be executed in this order to enable Predictive test selection. + +[source,default=expand] +---- +#1: Record builds +smart-tests record build --build mychange1 + +#2: Start a new test session +smart-tests record session --test-suite e2e --build mychange1 > session.txt + +#3: Decide which tests to run (Predictive Test Selection) +find tests -name 'test_*.py' | grep -v tests/data > test_list.txt +cat test_list.txt | smart-tests subset file --confidence 90% --session @session.txt > subset.txt + +#4: Execute selected tests +uv run poe test-xml $(cat subset.txt) + +#5: Record test results +smart-tests record tests file --session @session.txt test-results/*.xml +---- + +. Start with xref:send-data-to-smart-tests:record-builds/record-builds.adoc[record builds]. +. Then xref:send-data-to-smart-tests:record-sessions/record-sessions.adoc[record sessions]. +. For those using Predictive test selection, arrange and run xref:send-data-to-smart-tests:subset/subset-predictive-test-selection.adoc[subsets]. +. Finally, xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[record test results]. + +[NOTE] +-- +If only recording test results to generate insights the subset command can be skipped. +-- \ No newline at end of file diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/migration-to-github-oidc-auth.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/migration-to-github-oidc-auth.adoc new file mode 100644 index 000000000..33e16ff1a --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/migration-to-github-oidc-auth.adoc @@ -0,0 +1,117 @@ +:slug: migration-to-github-oidc-auth += Update tokenless authentication to use GitHub OIDC + +*Tokenless authentication* is {PRODUCT}'s specialized authentication method for public repositories that use GitHub Actions for CI. + +We are updating this method to use OpenID Connect (OIDC) to make it more scalable and secure. + +[NOTE] +-- +This change requires action on your part. +-- + +== OIDC implementation overview + +GitHub now provides a short-lived signed token for each GitHub Actions run (link:https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect[About security hardening with OpenID Connect]). This token is signed by GitHub's private key, and we can verify its validity via its public key. This makes the token a security credential that major Cloud providers such as AWS, Azure, and Google Cloud can use as an authentication token. {PRODUCT} implemented the same mechanism as these Cloud providers, allowing it to be accepted as a credential. + +The new implementation of tokenless authentication provides the same benefit as the previous one: no API key is needed. + +However, on top of that, the new implementation provides more security as it uses a verifiable short-lived token. Because the previous implementation involved calling GitHub APIs, the authentication process occasionally hit its API limit, resulting in request failures. With the new implementation, we no longer need to hit the GitHub API, which makes the service more stable. + +== Migration process + +The new OIDC-based authentication is supported by CLI v1.52.0. If the latest CLI using `pip3 install --upgrade` has been installed, you will get the necessary version automatically. Otherwise, upgrade to the latest version. + +=== Steps + +To migrate to the new implementation, do the following: + +. Add/update the `permissions` section of your GitHub Actions YAML file. (Refer to <>). +. Add a new `EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH` environment variable. +.. Set this to `1` to enable the new auth implementation. +. Remove the `GITHUB_PR_HEAD_SHA` environment variable. It is no longer needed. +. Keep the `LAUNCHABLE_ORGANIZATION` and `LAUNCHABLE_WORKSPACE` environment variables that were already set. + +=== Environment variables summary + +[cols="1,1", options="header"] +|=== +|API implementation (original) |OIDC implementation (new) + +|`LAUNCHABLE_ORGANIZATION`: {PRODUCT} organization name +|`LAUNCHABLE_ORGANIZATION`: {PRODUCT} organization name + +|`LAUNCHABLE_WORKSPACE`: {PRODUCT} workspace name +|`LAUNCHABLE_WORKSPACE`: {PRODUCT} workspace name + +|`GITHUB_PR_HEAD_SHA` +|`EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH`: Set this to `1` to enable the new auth. +|=== + +[#github-actions-permissions] +=== GitHub Actions Permissions + +To use the OIDC token in GitHub Actions, configure permissions to retrieve it. As described in the link:https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings[GitHub Help Article] , `id-token: write` permission needs to be added. + +This permission can be added per job or to the entire workflow. + +==== Examples + +[source,role="default=expand"] +---- +name: Verify {PRODUCT} tokenless authentication + +on: + pull_request: + paths: + - gradle/** + +env: + LAUNCHABLE_ORGANIZATION: "examples" + LAUNCHABLE_WORKSPACE: "gradle" + EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH: 1 + +permissions: + id-token: write + contents: read + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - uses: actions/setup-python@v2 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + + - name: {PRODUCT} + run: | + pip3 install --user smart-tests~=1.0 + export PATH=~/.local/bin:$PATH + smart-tests verify + working-directory: ./gradle +---- + +== Frequently Asked Questions + +=== What is included in the OIDC token? + +GitHub provides a detailed explanation and example of the OIDC token. For more information, refer to link:https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#understanding-the-oidc-token[Understanding the OIDC token]. + +=== How does {PRODUCT} verify the OIDC token? + +When you apply for tokenless authentication, we associate your GitHub repository with your {PRODUCT} workspace in our internal database. + +When you run the CLI, the {PRODUCT} API server verifies the OIDC token and checks that the `repository` claim in it matches the stored association. + +=== Can I see how the CLI handles the OIDC token? + +Yes. Review these commits in the public CLI repository: + +* https://github.com/launchableinc/cli/commit/945597f3fffeb49cd5968ba29054de78505aab61[https://github.com/launchableinc/cli/commit/945597f3fffeb49cd5968ba29054de78505aab61] +* https://github.com/launchableinc/cli/commit/68b06c01607b43bed33b2f774f424f7a8c220af6[https://github.com/launchableinc/cli/commit/68b06c01607b43bed33b2f774f424f7a8c220af6] diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-a-public-repository.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-a-public-repository.adoc new file mode 100644 index 000000000..53f74a4e0 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-a-public-repository.adoc @@ -0,0 +1,94 @@ +:slug: use-the-cli-with-a-public-repository += Use the CLI with a public repository + +Authentication between the {PRODUCT} CLI and {PRODUCT} API typically requires an API key. However, API keys cannot be used for open-source projects in public repositories since anyone can retrieve an API key by opening a pull request. + +To solve this problem for open-source projects, {PRODUCT} offers another authentication mechanism called _tokenless authentication_. Instead of using a static token, tokenless authentication uses a CI/CD service provider's public API to verify if tests are actually running in CI. With this feature, OSS contributors can analyze and optimize test execution without a static {PRODUCT} API token. + +[NOTE] +-- +In November 2022 {PRODUCT} added support for OpenID Connect for authentication. + +If tokenless authentication was implemented before November 2022, refer to xref:send-data-to-smart-tests:getting-started/migration-to-github-oidc-auth.adoc[Update tokenless authentication to use GitHub OIDC]. +-- +[#setup-tokenless-authentication] +== Set up tokenless authentication + +=== Prerequisites + +If your project is open source, and you want to use {PRODUCT}; + +* The open-source project needs to be hosted in a public GitHub repository. +* The open-source project needs to use GitHub Actions for {CI}. + +=== Preparation + +. http://app.launchableinc.com/signup[Sign up] and create your xref:concepts:organization.adoc[Organization] and xref:concepts:workspace.adoc[Workspace]. +. https://www.launchableinc.com/support[Contact us] to enable tokenless authentication for your project. We need to know your {PRODUCT} organization, {PRODUCT} workspace, and GitHub repository URL. +. Update your GitHub Actions YAML. +. Start using {PRODUCT} in your open-source project. + +==== GitHub Actions YAML configuration + +Once tokenless authentication is enabled for your project, do the following: + +. Configure environment variables in your CI pipeline: + +.. `SMART_TESTS_ORGANIZATION`: {PRODUCT} organization name +.. `SMART_TESTS_WORKSPACE`: {PRODUCT} workspace name +.. `EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH`: Set this to 1 + +. Add the `permissions` section of your GitHub Actions YAML file. + +Tokenless authentication relies on OpenID Connect (OIDC) tokens. To use an OIDC token in GitHub Actions, you need to configure permissions to retrieve it. As described in the https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings[GitHub Help Article], `id-token: write` permission needs to be added. + +This permission can be added for one job or to the entire workflow: + +[source,role="default=expand"] +---- +name: Verify Smart Tests tokenless authentication + +on: + pull_request: + paths: + - gradle/** + +env: + SMART_TESTS_ORGANIZATION: "examples" + SMART_TESTS_WORKSPACE: "gradle" + EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH: 1 + +permissions: + id-token: write + contents: read + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - uses: actions/setup-python@v2 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + + - name: {PRODUCT} + run: | + pip3 install --user smart-tests~=1.0 + export PATH=~/.local/bin:$PATH + smart-tests verify + working-directory: ./gradle +---- + +== About OpenID Connect (OIDC) + +[NOTE] +-- +In November 2022 {PRODUCT} added support for OpenID Connect for authentication. + +If tokenless authentication was implemented before November 2022, refer to xref:send-data-to-smart-tests:getting-started/migration-to-github-oidc-auth.adoc[Update tokenless authentication to use GitHub OIDC]. +-- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-docker-image.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-docker-image.adoc new file mode 100644 index 000000000..51a8cba5f --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/getting-started/use-the-cli-with-docker-image.adoc @@ -0,0 +1,5 @@ +include::ROOT:partial$abbr.adoc[] +:slug: use-the-cli-with-docker-image += Using the CLI with Docker image + +The following link:https://hub.docker.com/r/cloudbees/launchable[CloudBees/launchable] is the official Docker image published by CloudBees for integrating the Launchable CLI into your CI/CD pipeline. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/choose-a-value-for-build-name.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/choose-a-value-for-build-name.adoc new file mode 100644 index 000000000..398033ab4 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/choose-a-value-for-build-name.adoc @@ -0,0 +1,59 @@ +:slug: choose-a-value-for-build-name += Choose a value for build name + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-builds/record-builds-from-multiple-repositories.adoc[Record builds from multiple repositories]. +-- + +Your CI process probably already relies on some identifier to distinguish different builds. Such an identifier might be called a build number, build ID, etc. Most CI systems automatically make these values available via built-in environment variables. This makes it easy to pass this value into `record build`: + +[cols="1,1,1", options="header"] +.System-specific build name values +|=== +|CI system |Suggested value |Documentation + +|Azure DevOps Pipelines +|`Build.BuildId` +|https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables[Azure DevOps Pipelines variables] + +|Bitbucket Pipelines +|`BITBUCKET_BUILD_NUMBER` +|https://support.atlassian.com/bitbucket-cloud/docs/variables-and-secrets/[Bitbucket Pipelines variables] + +|CircleCI +|`CIRCLE_BUILD_NUM` +|https://circleci.com/docs/reference/variables/#built-in-environment-variables[CircleCI environment variables] + +|GitHub Actions +|`GITHUB_RUN_ID` +|https://docs.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables#default-environment-variables[GitHub Actions environment variables] + +|GitLab CI +|`CI_JOB_ID` +|https://docs.gitlab.com/ee/ci/variables/predefined_variables.html[GitLab CI predefined variables] + +|GoCD +|`GO_PIPELINE_LABEL` +|https://docs.gocd.org/current/faq/dev_use_current_revision_in_build.html#standard-gocd-environment-variables[GoCD environment variables] + +|Jenkins +|`BUILD_TAG` +|https://www.jenkins.io/doc/book/pipeline/jenkinsfile/#using-environment-variables[{JOSS}] + +|Travis CI +|`TRAVIS_BUILD_NUMBER` +|https://docs.travis-ci.com/user/environment-variables/#default-environment-variables[Travis CI environment variables] +|=== + +Other examples: + +* If your build produces an artifact or file that is later retrieved for testing, then the `sha1sum` of the file itself would be a good build name as it is unique. +* If you are building a Docker image, its content hash can be used as the unique identifier of a build: `docker inspect -f "{{.Id}}"` . + +[WARNING] +-- +{PRODUCT} discourages using a Git commit hash (or the output of `git-describe` ) if there is only one source code repository. + +It's not uncommon for teams to produce multiple builds from the same commit that are still considered different builds. +-- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/deal-with-shallow-clones.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/deal-with-shallow-clones.adoc new file mode 100644 index 000000000..62b752160 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/deal-with-shallow-clones.adoc @@ -0,0 +1,32 @@ +:slug: deal-with-shallow-clones += Deal with shallow clones + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds with {PRODUCT}]. +-- + +== Git shallow clone & problem it creates + +Git shallow clone is a technique often used in build/test environments to speed up the cloning of Git repositories. For more information, refer to link:https://www.perforce.com/blog/vcs/git-beyond-basics-using-shallow-clones[https://www.perforce.com/blog/vcs/git-beyond-basics-using-shallow-clones]. + +When you are recording builds from a shallowly cloned workspace, we won't be able to collect all the commits we can use to make predictions. So it's worth fixing this problem, although this is not a show-stopper for {PRODUCT}. + +== Solution: Set up the collection process + +In this approach, we set up a dedicated process with the sole purpose of collecting commits. Keep this separate from your {CI} jobs that use shallow clones, to keep those nimble. + +. Create a recurring job on your CI system with connected persistent storage. Initially, you run `git clone` to set up a full clone of the repository, then when it runs again later, you execute `git fetch --all` , which incrementally fetches all the commits from the server. + +. Once the local Git repository is populated with the updated commits from the server, run `smart-tests record commit --source DIR` (where `DIR` points to the local workspace, so probably just `.`), which processes the additional commits obtained. This operation should be pretty fast. + +The frequency of this recurring job should be higher than the time lag between the commit getting pushed to the repository and starting the test process. For example, if your build is a C++ project that takes 1 hour to build before it gets to testing, then the frequency of the commit collection jobs can be every 30 minutes. If you are unsure, every 5 minutes is probably a good start. + +The persistent storage makes incremental `git fetch` fast, but this setup will survive the occasional loss of the persistent storage. It's just that the first run after such a loss will be slow. + +Instead of or in addition to the periodic execution, you can execute this job whenever new builds happen. Doing so will reduce the chance that some of the commits do not make it into {PRODUCT} by the time the test suite runs. + +[WARNING] +-- +Persistent storage is not always a viable option, for example with cloud CI providers. If this is not an adequate solution in your situation, please contact support so that we can improve this further. +-- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds-from-multiple-repositories.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds-from-multiple-repositories.adoc new file mode 100644 index 000000000..0f26747ae --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds-from-multiple-repositories.adoc @@ -0,0 +1,120 @@ +:slug: record-builds-from-multiple-repositories += Record builds from multiple repositories + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds with {PRODUCT}]. +-- + +== Multiple repositories combined in one build then tested + +If you produce a build by combining code from several repositories (like the diagram below), invoke `smart-tests record build` with multiple `--source` options to denote them. + +.Record build with multiple repositories +image::ROOT:recording-from-multiple-repos.png[Reord build with multiple repositories,role="screenshot"] + +To differentiate them, provide a label for each repository in the form of `LABEL=PATH`: + +[source] +---- +# record the build +smart-tests record build --build --source main=./main_repo --source lib=./lib_repo + +# create the build +bundle install +---- + +[NOTE] +-- +The `record build` will automatically recognize link:https://www.git-scm.com/book/en/v2/Git-Tools-Submodules[Git submodules], so explicitly declaring them is not needed. +-- + +== Multiple repositories built/deployed separately then tested together (for example, microservices) + +Some teams run regression tests against an environment where several services have been deployed. Each service is built from code from its own repository (or set of repositories). + +.Recording multiple repositories built/deployed separately then tested together +image::ROOT:recording-from-multiple-repos-built-separately.png[Recording multiple repositories built separately,role="screenshot"] + +The intent of recording a build is to capture the version of the software being tested. In this scenario, the version of the software being tested is effectively the combination of all versions of the components deployed to the test environment. + +For example, if the currently deployed version of service 1 is `d7bf8b7c` (from repo 1) and the currently deployed version of service 2 is `c39b86a1` (from repo 2), then the effective version of the software being tested can be interpreted as: + +[source] +---- +[ + { + repository: "repo1", + commit: "d7bf8b7c" + }, + { + repository: "repo2", + commit: "c39b86a1" + } +] +---- + +This interpretation is no different from the diagram above. However, because you want to capture the versions of all the deployed software being tested, you need to run `smart-tests record build` right before running tests — for example, in the green box in the diagram above. + +This presents a challenge because the repos for each service are _usually_ not available at this stage (and cloning them just for this purpose would be inefficient). Luckily, when you run `smart-tests record build` , the CLI actually performs two functions that we can split up to support this use case: + +* Recording all new commits from included repositories, and +* Recording the build itself, 'tagged' with the HEAD commit from each included repository. + +The CLI further provides options to separate these: + +* Record commits in each component's build process. +* Record the "combined" build itself right before you run tests. + +The commands and options that enable this are: + +. `smart-tests record commit --source /PATH/TO/REPO`, can record commits separately in each component's build process. + +. Two `smart-tests record build` options: +.. `--no-commit-collection` disables commit collection (since you're doing it separately), and +.. `--commit REPO=HASH` allows you to 'tag' the build with each repository. ++ +[NOTE] +-- +This means that the deployed version of each service needs to be available to the process where you run tests. +-- + +These commands and steps are shown in the white boxes in the expanded diagram below. + +.Recording multiple repositories built separately then tested together, with commit collection separated +image::ROOT:recording-from-multiple-repos-built-separately-detailed.png[Recording multiple repositories built separately then tested together, with commit collection separated,role="screenshot"] + +== Incremental build over multiple repositories + +Some teams have a software project that spans a large number of repositories, with a build system that supports the incremental build with object cache. Only the "changed" repositories are cloned locally and built, and the reamining artifacts for the "unchanged" repositories get pulled from a remote/distributed object cache. The end result is that each build clones a different subset of repositories. + +One such example is link:https://cs.android.com/android/platform/superproject/+/master:build/soong/docs/rbe.md[Android + RBE]. +Two repositories (zooma & fft2d) are "changed" and thus cloned locally to be built. One repository (av framework) is "unchanged" and its build artifacts will be pulled from a remote cache, so there's no local clone of this repository. Even though this repository is not cloned, there's still a commit hash (d3427bd) associated with it. + +.Commit hashes for multiple repositories in an incremental build +image::ROOT:507088cf-209d-4856-be3e-60477c166d92.png[Commit hashes,role="screenshot"] + +The intent of recording a build is to capture the version of software being assembled, which includes portions that are not built and simply cached. To achieve this; + +* For the "changed" repositories, let {PRODUCT} collect new commits from a local clone. +* For the "unchanged" repositories, tell {PRODUCT} that the build will use a certain commit that is not provided locally. + +The commands and options that achieve this are: + +. For each "changed" repository, there should be a local clone, +.. Run `smart-tests record commit --source path/to/repo`, which lets you record commits from this repository. +. `Make one smart-tests record build` invocation with the following: +.. `--commit REPO=HASH` for each repository, changed or unchanged. If the repository was changed, the `record commit` command earlier should have collected this commit hash. If the repository was not changed, there should have been some earlier builds that cloned this repository and the hash locally, for which `record commit` command had run. +.. `--no-commit-collection` option, which disables commit collection, since you are doing it in separate `record commit` invocations. + +With the above example, the list of eventual invocations are: + +[source] +---- +smart-tests record commit --source device/zooma=path/to/zooma +smart-tests record commit --source external/fft2d=path/to/fft2d +smart-tests record build --build SOMETHING --no-commit-collection \ + --commit device/zooma=db0fd94 \ + --commit external/fft2d=9fcfcc5 \ + --commit frameworks/av=d3427bd +---- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds.adoc new file mode 100644 index 000000000..5481b8845 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/record-builds.adoc @@ -0,0 +1,44 @@ += Record builds +include::ROOT:partial$abbr.adoc[] +:slug: record-builds + +== Background + +Each xref:concepts:test-session.adoc[test session] is associated with a xref:concepts:build.adoc[build]. +In particular, with xref:features:predictive-test-selection.adoc[predictive test selection] tests are selected based on the Git changes in a build (among other data). + +== Record builds + +To record a build, run `smart-tests record build` before creating a build in either your {CI} or organizations CI script: + +[source,bash] +---- +smart-tests record build --build --source src= +---- +For more information on the `record build` command, refer to xref:resources:cli-reference.adoc[CLI reference - Command:record build]. + +* With the `--build` option, assign a unique identifier to this build. This value will be used later when test results are recorded. For tips on choosing this value, refer to xref:send-data-to-smart-tests:record-builds/choose-a-value-for-build-name.adoc[Choose a value for build name]. +* The `--source` option points to the local copy of the Git repository (or repositories) used to produce this build, such as `.` or `src`. +** Make sure to point to a full clone of each repository, not a partial clone. +** *GitHub Actions users:* if using `actions/checkout` to check out the current repo, set the `fetch-depth: 0`. +** Refer to xref:send-data-to-smart-tests:record-builds/record-builds-from-multiple-repositories.adoc[Record builds from multiple repositories]. + +View recorded builds on the menu:Builds[] dashboard. + +.Builds dashboard +image::ROOT:sending-data/builds-dashboard.png[Builds dashboard,role="screenshot"] + +== Record test results against your builds + +After you record a build, you can record test results against it by updating the `smart-tests record tests` invocation(s) created after following xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with the {PRODUCT} CLI]: + +[source] +---- +# before +smart-tests record tests --no-build + +# after +smart-tests record tests --build --session +---- + +If done correctly, you will see the corresponding build name for each test session on the Test session section on the {PRODUCT} dashboard. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/run-under-restricted-networks.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/run-under-restricted-networks.adoc new file mode 100644 index 000000000..d900a7554 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-builds/run-under-restricted-networks.adoc @@ -0,0 +1,86 @@ +:slug: run-under-restricted-networks += Run under restricted networks + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds with {PRODUCT}]. +-- + +The {PRODUCT} CLI requires an Internet connection, but your environment might have a limited Internet reachability. This page explains how to deal with it in common cases. + +== Outbound traffic control + +Networks can have a network policy that limits the connections to certain IP addresses and ports. In this case, you need to allowlist {PRODUCT}'s API server IP addresses. We have an API endpoint that has a stable IP address. + +* Domain: `api-static.mercury.launchableinc.com` +* IP Addresses: `13.248.185.38` and `76.223.54.162` +* Port: `443` (HTTPS) + +Once your network is configured to allow traffic to the IP addresses above, configure the {PRODUCT} CLI to use that endpoint: + +[source] +---- +export SMART_TESTS_BASE_URL="https://api-static.mercury.launchableinc.com" +smart-tests verify +---- + +== Git repository in an Internet unreachable environment + +Your repository might not have internet access for security reasons. For example, the {CI} workflow would look like: + +. Check out the Git repository in a machine with no Internet connection. +. Build the artifacts in the machine. +. Transfer the artifacts to a test environment that has the Internet connection. +. Run tests against the artifacts in the test environment. + +In this case, you can save the `git log` output to a file and transfer that file to the test environment so that {PRODUCT} CLI can see the changes under test. + +In the build machine, you will need to run a `git log` command with: + +`--pretty='format:{"commit": "%H", "parents": "%P", "authorEmail": "%ae", "authorTime": "%aI", "committerEmail": "%ce", "committerTime": "%cI"}' --numstat` . + +Be sure to limit the number of commits to be written out. Use the `--max-count` option. Typically, 10 to 20 commits should be enough. Advanced users can adjust the ranges, as needed, to limit the amount of data. + +[source] +---- +# Check out the repository and build artifacts +git clone https://git.internal.example.com/repo +cd repo +make all + +# Save git log data to a file. Using --max-count to limit the number of commits. +git log --pretty='format:{"commit": "%H", "parents": "%P", "authorEmail": "%ae", "authorTime": "%aI", "committerEmail": "%ce", "committerTime": "%cI"}' --numstat --max-count 10 > git_log_output +# Save the current commit hash +git rev-parse HEAD > git_commit_hash + +# Save the artifacts and git outputs +scp server_test_bin git_log_output git_commit_hash fileserver.internal.example.com: +---- + +The git log output (`git_log_output` file shown above) should contain content like below: + +[source] +---- +{"commit": "1f0c18ea3df6575b4132b311d52a339af34c90ba", "parents": "b068a8a515e6cbbb2d6673ddb2c421939bd618b7", "authorEmail": "example1@example.com", "authorTime": "2022-09-21T15:59:21-07:00", "committerEmail": "example1@example.com", "committerTime": "2022-09-21T16:34:35-07:00"} +24 4 main.cc +24 0 main_test.cc + +{"commit": "b068a8a515e6cbbb2d6673ddb2c421939bd618b7", "parents": "cb1d1b797726fe16e661d8377bd807f2508e9df4", "authorEmail": "example2@example.com", "authorTime": "2022-09-16T17:03:52+00:00", "committerEmail": "example3@example.com", "committerTime": "2022-09-16T17:03:52+00:00"} +1 1 docs/README.md +---- + +In the test machine, you will need to use these files to upload the commit data. + +[source] +---- +scp -R fileserver.internal.example.com: . + +# Record commits and build +BUILD_NAME=build-$(cat git_commit_hash) +smart-tests record commit --import-git-log-output git_log_output +smart-tests record build --no-commit-collection --commit .=$(cat git_commit_hash) --build $BUILD_NAME --branch $BRANCH_NAME +smart-tests record session --build $BUILD_NAME --test-suite $TEST_SUITE_NAME + +# Run tests +./server_test_bin +---- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-sessions/record-sessions.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-sessions/record-sessions.adoc new file mode 100644 index 000000000..fd6bcabaa --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-sessions/record-sessions.adoc @@ -0,0 +1,96 @@ +include::ROOT:partial$abbr.adoc[] + += Record sessions +:slug: record-sessions + +[NOTE] +-- +Before you start recording test sessions, consider your xref:concepts:test-session.adoc[Test Session Layouts]. +-- + +{PRODUCT} uses your test results from each test session to provide testing insights and recommendations. + +After running the `record build` command, execute the `record session` command to create test sessions in your workspace. + +The general structure of this command is as follows: + +[source,bash] +---- +smart-tests record session +--build +--test-suite +> smart-tests-session.txt +---- + +* `--build ` +** Use if you recorded a build before running tests (_for Predictive test selection only_). For more information, refer to xref:record-builds/record-builds.adoc[Record builds with the {PRODUCT} CLI]. + +* `--test-suite ` +** Use to set test suites. For more information, refer to xref:concepts:test-suite.adoc[Test suite]. + +[#record-session] +== Record a session + +. Run the `record session` command in your terminal. For example: ++ +[source,bash] +---- +{PRODUCT} record session +--build "Build 1" +--test-suite "e2e" +> smart-tests-session.txt +---- ++ +. After running the `record session` command, view your test sessions in the {PRODUCT} dashboard. ++ +[NOTE] +-- +The dashboard will display empty test session(s), until the `record tests` command is run. Once that command is executed, the dashboard will populate with test sessions, test results, and insights. +-- ++ +.Test Session tab on dashboard +image::ROOT:sending-data/record-session.png[Test session tab on dashboard,role="screenshot"] ++ +. Select a test session to view the session details. ++ +.. View session details using the dashboard. Such as; ++ +... Test session summary (Build name, flavors, test suite, session duration, and more.) + +... Test session details (Filters specific to the session, such as issues, failed tests, and more.) + +... (Optional) Select btn:[View in Jenkins] to export and view test details in a Jenkins file. + +... (Optional) Select the image:ROOT:icons/icon-vertical-three-dots-dark.png[three-dots] to download the test session details to a CSV file. ++ +.. View test details using the filters. Such as; ++ +... Select menu:Issues[] to view the issues related to the test session. ++ +For more information, refer to xref:features:intelligent-test-failure-diagnostics.adoc[Intelligent Test Failure Diagnostics]. + +... Select menu:Failed tests[] to view the failed tests in the test session. + +... Select menu:Flakes[] to view which tests were flaky in the test session. ++ +For more information, refer to <<#test-session-status,Test status definitions>>. + +... Select menu:Commits[] to view the details and number of commits related to the test session. ++ +. Select btn:[View Logs] to view the session logs. The Log Search tab on the dashboard displays. ++ +.. Select the desired log file to view the session logs related to the test session. For more information, refer to xref:record-test-results/attach-log-files.adoc[Attach log files to test sessions]. + +[#test-session-status] +== Test status definitions + +The following define the status criteria used in the test session details: + +* Success: A test the passed. +* Failure: A test that failed in all executions. +* Flake: A test failure that eventually passed after several retries within the same test session. +* Skip: A test that was not executed. + + +For more information about test sessions, refer to xref:concepts:test-session.adoc[Test session layouts]. + diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/attach-log-files.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/attach-log-files.adoc new file mode 100644 index 000000000..df60b92e2 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/attach-log-files.adoc @@ -0,0 +1,12 @@ +:slug: attach-log-files += Attach log files + +Logs produced during the test execution can be submitted as attachments. {PRODUCT} uses these log files to further analyze test results and assists users to diagnose test failures. + +The command is as follows: + +`smart-tests record attachment ...` + +* `--session ` , which you can use if you have a complex test session layout. For more information, refer to xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts]. + +File names (excluding directories) are used as an identity of the log files. These identities should stay the same from one test session to next. If you are submitting multiple log files, each of them should have different identities. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/convert-test-reports-to-junit-format.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/convert-test-reports-to-junit-format.adoc new file mode 100644 index 000000000..6fbf4c3c1 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/convert-test-reports-to-junit-format.adoc @@ -0,0 +1,62 @@ +:slug: convert-test-reports-to-junit-format += Convert test reports to JUnit format + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Recording test results with {PRODUCT}]. +-- + +== Deal with custom test report formats + +The {PRODUCT} CLI typically expects test reports to use the JUnit report format when passed into the `smart-tests record tests` command. This is the de facto test report format that is supported by some build/test/CI tools. However, if yours does not support this format, convert your reports into the JUnit report format. + +The best-annotated examples of the JUnit report format are; + +* link:https://llg.cubic.org/docs/junit/[This page] and +* link:https://help.catchsoftware.com/display/ET/JUnit+Format[this page] + +{PRODUCT} uses the following information: + +=== Must have + +* `` , `` , `` are the structural elements that matter * Their `name` and `classname` attributes are used to identify test names +* For a failed/errored/skipped test case, `` element must have a nested `` , `` , or `` child element, respectively +* While not documented in the pages linked above, `file` or `filepath` attributes on structural elements that point to the test source file path *are required* for file-based test runner support, most notably the xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc[`file` profile for unsupported test runners] one you will most likely use if you are on this page! +* `time` attribute on structural elements that indicates how long a test took to run (in seconds) + +=== Nice to have + +* `` , `` that captures output from tests, preferably at the level of `` +* `timestamp` attribute on structural elements that indicate when a test has run, preferably on `` + +== Examples + +Here's a bare-bones example of a test report that works with {PRODUCT}: + +[source] +---- + + + + + + + + +---- + +Another one from Maven+Java+JUnit. This is not a file-based test runner, so the format is slightly different: + +[source] +---- + + + + java.lang.RuntimeException + at foo.FooTest.test3(FooTest.java:7) + + + + + +---- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/ensure-record-tests-always-runs.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/ensure-record-tests-always-runs.adoc new file mode 100644 index 000000000..3e42c7e87 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/ensure-record-tests-always-runs.adoc @@ -0,0 +1,87 @@ +:slug: ensure-record-tests-always-runs += Ensure record tests always runs + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with {PRODUCT}]. +-- + +The `smart-tests record tests` command must be executed after you run tests. +However, some tools exit the build process as soon as the test process finishes, preventing this from happening. + +The way to fix this depends on your CI tool: + +== Jenkins + +For declarative Pipeline jobs, use the link:https://www.jenkins.io/doc/book/pipeline/syntax/#post[`post { always { ... } }`] option: + +[source] +---- +pipeline { + ... + sh 'bundle exec rails test -v $(cat smart-tests-subset.txt)' + ... + post { + always { + sh 'smart-tests record tests [OPTIONS]' + } + } +} +---- + +For scripted pipeline jobs, the `catchError` step should be used as described here: link:https://www.jenkins.io/doc/pipeline/steps/workflow-basic-steps/#catcherror-catch-error-and-set-build-result-to-failure[https://www.jenkins.io/doc/pipeline/steps/workflow-basic-steps/#catcherror-catch-error-and-set-build-result-to-failure]. + +[NOTE] +-- +This is unnecessary for Maven builds that use the `-Dmaven.test.failure.ignore` option. +-- + +== CircleCI + +CircleCI has the `when: always` option, described here: link:https://circleci.com/docs/reference/configuration-reference/#the-when-attribute[]. + +[source] +---- +- jobs: + - test: + ... + - run: + name: Run tests + command: bundle exec rails test -v $(cat smart-tests-subset.txt) + - run: + name: Record test results + command: smart-tests record tests [OPTIONS] + when: always +---- + +== GitHub Actions + +GitHub Action has the `if: ${{ always() }}` option, described here: link:https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#always[]. + +[source] +---- +jobs: + test: + steps: + ... + - name: Run tests + run: bundle exec rails test -v $(cat smart-tests-subset.txt) + - name: Record test result + run: smart-tests record tests [OPTIONS] + if: always() +---- + +== Bash + +If you run tests on your local or other CI, you can use `trap`: + +[source] +---- +function record() { + smart-tests record tests [OPTIONS] +} +# set a trap to send test results to {PRODUCT} for this build either tests succeed/fail +trap record EXIT SIGHUP + +bundle exec rails test +---- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/manage-complex-test-session-layouts.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/manage-complex-test-session-layouts.adoc new file mode 100644 index 000000000..986a4fdeb --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/manage-complex-test-session-layouts.adoc @@ -0,0 +1,112 @@ +:slug: manage-complex-test-session-layouts += Manage complex test session layouts + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with {PRODUCT}]. +-- + +The xref:concepts:test-session.adoc[Test Session] is one of {PRODUCT}'s core concepts. When you record test results, those results are recorded to a test session. When you request a subset of tests from {PRODUCT}, the subset is linked to a test session, too. This concept is useful because tests might run several times against the same build; it helps disambiguate those runs. + +Normally, the {PRODUCT} CLI manages session creation and usage in the background. However, if your build and test processes are split across multiple machines, or if your tests are parallelized across multiple machines, you will need to create test sessions yourself. + +== Build and test processes happen on different machines + +Normally, the {PRODUCT} CLI handles creating, saving, and retrieving a session ID in the background. When you run `smart-tests subset` or `smart-tests record tests`, the CLI checks for an existing file in `~/.launchable`. This file is written when you run `smart-tests record build`. + +However, if you need to record tests (`smart-tests record tests`) or request a subset (`smart-tests subset`) on a *different* machine than the one where `smart-tests record build` ran, the `~/.launchable` file won't be present. You will need to manually create a test session using the `smart-tests record session` command at the beginning of your test. + +This command outputs a string that you can store and then pass into the `--session` option in `smart-tests subset` and `smart-tests record tests`. + +Here is an example: + +[source,role="default-expanded"] +---- +# machine 1 + + # build step + smart-tests record build --build --branch [OPTIONS] + + # build software + bundle install + +# machine 2 + + # before running tests, create a session (mandatory) + # you'll use this session name again later + smart-tests record session --build --session [OPTIONS] + + # get a subset, if applicable + smart-tests subset --build --session [OPTIONS] + + # run tests + bundle exec rails test [OPTIONS] + +# machine 3 + + # record test results + smart-tests record tests --session [OPTIONS] +---- + +[[combine-test-reports-from-multiple-runs]] +== Combine test reports from multiple runs + +Some pipelines execute multiple test runs against a build, outputting distinct test report(s) across several machines. Depending on your layout, you may want to combine these into a single test session. For more information, refer to xref:concepts:test-session.adoc[Test Session]. + +[NOTE] +-- +This may also be the case if you execute tests of a _single_ type across several parallel runs, but usually the test runner can combine reports from parallel runs for consumption from a single place. + +If all the test reports for a session can be collected from a single machine, you don't need to use this method. +-- + +=== Tie multiple `smart-tests record tests` invocations to a single test session: + +. Use the `smart-tests record session` command to create a session ID +. Pass this ID into the corresponding `--session` parameter in `smart-tests record tests` _(note: pseudocode)_ : + +[source,role="default-expanded"] +---- +## build step + +# before building software, send commit and build info +# to {PRODUCT} +smart-tests record build --build [OPTIONS] + +# build software the way you normally do, for example +bundle install + +## test step + +# before running tests, create a session (mandatory) +# you'll use this session name to group the test reports together later +smart-tests record session --build --session [OPTIONS] + + # start multiple test runs + + # machine 1 + + # run tests + bundle exec rails test + + # send test results to {PRODUCT} from machine 1 + # Note: You need to configure the line to always run whether test run succeeds/fails. + # See each integration page. + smart-tests record tests --build --session [OPTIONS] + + # machine 2 + + # run tests + bundle exec rails test + + # send test results to {PRODUCT} from machine 2 + # Note: You need to configure the line to always run whether test run succeeds/fails. + # See each integration page. + smart-tests record tests --build --session [OPTIONS] + + ## repeat as needed... + +## finish multiple test runs +---- + +For more information, refer to `smart-tests record session` in the xref:resources:cli-reference.adoc[CLI reference]. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/record-test-results.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/record-test-results.adoc new file mode 100644 index 000000000..611bfa12b --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/record-test-results.adoc @@ -0,0 +1,615 @@ += Record test results + +[NOTE] +-- +Before you start recording test results to test sessions, consider your xref:concepts:test-session.adoc[Test Session Layouts]. +-- + +{PRODUCT} uses the test results from each xref:concepts:test-session.adoc[test session] to provide features. +After running tests, run the `smart-tests record tests` command to record test results in your workspace. + +The general structure of this command is as follows: + +`smart-tests record tests ` + +* ``: Use one of the following: +** `--no-build`, where no build information is captured with the test results. This is the best option for getting started quickly. *The examples on this page all use this option.* +** `--build `, is used if you recorded a build before running tests (for *predictive test selection* only). Refer to xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds with the {PRODUCT} CLI]. ++ +Even if you plan to use Predictive test selection, you can always start recording tests without recording builds and then record builds later. + +** `--session `, is used if you have a complex test session layout. Refer to xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Manage complex test session layouts]. + +* ``: This command varies slightly based on your test runner/build tool. Refer to <> for specific tool instructions. + +* ``: After recording tests, view recorded test sessions on the menu:[Test Session] dashboard of the {PRODUCT}. + +Select a session to view details and failed tests. + +.Test session dashboard +image::ROOT:sending-data/test-session-interface.png[Test session dashboard,role="screenshot"] + +The CLI will also output a link to view each session's details in the web app. + +[#instructions-for-test-runners] +== Instructions for test runners/build tools + +The {PRODUCT} CLI supports many popular test runners and build tools. Follow the instructions for the tool used to record test results. + +[cols="1,1,1"] +.System-specific test runners/build tools +|=== + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> |<> + +|<> |<> +| +|=== + +[#android-cts] +=== Android Compatibility Test Suite (CTS) + +After running tests, point the CLI to your test report file(s): + +`smart-tests record tests cts --no-build android-cts/results/**/*.xml` + +[#android-adb] +=== Android Debug Bridge (ADB) + +Currently, the CLI doesn't have a `record tests` command for ADB. Use the command for <> instead. + +[#ant] +=== Ant + +. After running tests, point the CLI to your test report file(s): ++ +`smart-tests record tests ant --no-build ` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#bazel] +=== Bazel + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests bazel --no-build` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#behave] +=== Behave + +. Generate reports that {PRODUCT} can consume, add the `--junit` option to your existing `behave` command: ++ +[source] +---- +# run the tests however you usually do +behave --junit +---- + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests behave --no-build ./reports/*.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#ctest] +=== CTest + +. Run your tests with `ctest -T test --no-compress-output` . These options ensure test results are written to the `Testing` directory. + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests ctest --no-build "Testing/**/Test.xml"` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#cucumber] +=== cucumber + +{PRODUCT} supports JSON and JUnit XML reports generated by cucumber (using `-f junit` or `-f json` . Follow the instructions for whichever one being used. JUnit XML is the default. + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| JUnit XML +| JSON + +a| If you run cucumber with the `-f junit` option, like this: + +`bundle exec cucumber -f junit -o reports` + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests cucumber --no-build "./reports/**/*.xml"` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +a| If you run cucumber with the `-f json` option, like this: + +`bundle exec cucumber -f json -o reports` + +. After running tests, point the CLI to your test report files to record test results (note the `--json` option): ++ +`smart-tests record tests cucumber --no-build --json "./reports/**/*.json"` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +|=== + +[NOTE] +-- +If you receive a warning message such as `Cannot find test file Test-feature-example.xml` set the project's root directory path with the `--base` option: + +`smart-tests record tests cucumber --no-build --base /example/project "/example/project/reports/**/*.xml"` +-- + +[#cypress] +=== Cypress + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests cypress --no-build ./report/*.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#dotnet] +=== dotnet test + +. Configure `dotnet test` to product NUnit report files. Install the https://www.nuget.org/packages/NUnitXml.TestLogger[NunitXml.TestLogger] package from NuGet via your preferred method, then the `-l` option to your `dotnet test` command to enable the logger: ++ +`dotnet test ... -l nunit` + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests dotnet --no-build "**/*.xml"` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[[flutter-test-results]] +=== Flutter + +. Run tests with the `--machine` option and produce a test report as JSON (NDJSON) format. ++ +[source,bash] +---- +flutter test --machine > report.json +---- + +. After running tests, point the CLI to your test report file to record test results: ++ +[source,bash] +---- +smart-tests record tests \ + --session $(cat smart-tests-session.txt) \ + flutter \ + report.json +---- + +[#googletest] +=== GoogleTest + +. Configure GoogleTest to produce JUnit-compatible report files. See https://github.com/google/googletest/blob/main/docs/advanced.md#generating-an-xml-report[their documentation] for how to do this. You'll end up with a command something like this: ++ +[source] +---- +# run the tests however you normally do +./my-test --gtest_output=xml:./report/my-test.xml +---- + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests googletest --no-build ./report` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#go-test] +=== Go Test + +. Generate reports that {PRODUCT} can consume, use https://github.com/jstemmer/go-junit-report[go-junit-report] to generate a JUnit XML file after you run tests: ++ +[source] +---- +# install JUnit report formatter +go get -u github.com/jstemmer/go-junit-report + +# run the tests however you usually do, then produce a JUnit XML file +go test -v ./... | go-junit-report -set-exit-code > report.xml +---- + +. Then point the CLI to your test report file to record test results: ++ +`smart-tests record tests go-test --no-build report.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#gradle] +=== Gradle + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests gradle --no-build ./build/test-results/test/` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[NOTES] +-- +* By default, Gradle's report files are saved to `build/test-results/test/` , but that might differ depending on how your Gradle project is configured. +* You can specify multiple directories if you do a multi-project build. +* A dedicated Gradle task to list all report directories might be convenient for a large project. See https://docs.gradle.org/current/userguide/java_testing.html#test_reporting[the upstream documentation] for more details and insights. +* Alternatively, you can specify a glob pattern for directories or individual test report files (this pattern might already be set in your pipeline script for easy copy-pasting), e.g., `gradle **/build/**/TEST-*.xml` . +-- + +[#jest] +=== Jest + +. Generate reports that {PRODUCT} can consume, use https://www.npmjs.com/package/jest-junit[jest-junit] to generate a JUnit XML file after you run tests. ++ +[source] +---- +# install jest-junit reporter +npm install jest-junit --save-dev +# or +yarn add --dev jest-junit +---- ++ +You will need to configure jest-junit to include file paths in reports. ++ +You can do this using environment variables: ++ +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| Using environment variables +| Using package.json + +a| Recommended config: + +[source] +---- +export JEST_JUNIT_CLASSNAME="{classname}" +export JEST_JUNIT_TITLE="{title}" +export JEST_JUNIT_SUITE_NAME="{filepath}" +---- + +Minimum config: + +`export JEST_JUNIT_SUITE_NAME="+{filepath}+"` + +a| Add the following lines to your `package.json` . The detail is the https://www.npmjs.com/package/jest-junit[jest-junit] section. + +Recommended config: + +[source] +---- +// package.json +"jest-junit": { + "suiteNameTemplate": "{filepath}", + "classNameTemplate": "{classname}", + "titleTemplate": "{title}" +} +---- + +Minimum config: + +[source] +---- +// package.json +"jest-junit": { + "suiteNameTemplate": "{filepath}" +} +---- + +|=== + +. Run `jest` using jest-junit: ++ +[source] +---- +# run tests with jest-junit +jest --ci --reporters=default --reporters=jest-junit +---- + +. After running tests, point the CLI to your test report files to record test results: ++ +[source] +---- +# send test results to {PRODUCT} +smart-tests record tests jest --no-build your-junit.xml +---- + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#karma] +=== Karma + +. Generate reports that {PRODUCT} can consume, use link:https://www.npmjs.com/package/karma-json-reporter[karma-json-reporter] to generate JSON test reports. Refer to the linked page for more details, but your `karma.conf.js` change should look something like this: ++ +[source,javascript,role="default=expand"] +---- +module.exports = function (config) { + config.set({ + ... + plugins: [ + ... + require('karma-json-reporter') + ], + jsonReporter: { + outputFile: require('path').join(__dirname, 'test-results.json'), + stdout: false + }, + reporters: [..., 'json'] + }); +}; +---- + +. After running tests, point the CLI to your test report file to record test results: ++ +[source,bash] +---- +smart-tests record tests karma --session $(cat session.txt) test-results.json +---- + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#maven] +=== Maven + +[NOTE] +-- +{PRODUCT} supports test reports generated using https://maven.apache.org/surefire/maven-surefire-plugin/[Surefire] , the default report plugin for https://maven.apache.org/[Maven]. +-- + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests maven --no-build "./**/target/surefire-reports"` ++ +_You can pass multiple directories/patterns, e.g. _ `smart-tests record tests maven --no-build '' ''` _._ + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#minitest] +=== minitest + +. Use https://github.com/circleci/minitest-ci[minitest-ci] to output test results to a file. It may already be installed if you store your test results on your CI server. + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests minitest --no-build "$CIRCLE_TEST_REPORTS/reports"` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#nunit] +=== NUnit Console Runner + +[NOTE] +-- +{PRODUCT} CLI accepts https://docs.nunit.org/articles/nunit/technical-notes/usage/XML-Formats.html[NUnit3 style test report XML files] produced by NUnit. +-- + +. After running tests, point the CLI to your test report file(s) to record test results: ++ +`smart-tests record tests nunit --no-build path/to/TestResult.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#perl] +=== prove for Perl + +{PRODUCT} supports JUnit XML report generated by https://github.com/bleargh45/TAP-Formatter-JUnit[TAP::Formatter::JUnit]. + +. Follow the instructions for generating a report. ++ +`prove -Ilib --formatter TAP::Formatter::JUnit-r t` ++ +The command will generate a JUnit XML report with the name `junit_output.xml` . + +. After running tests, point the CLI to your test report file to record test results: ++ +`smart-tests record tests prove --no-build junit_output.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#playwright] +=== Playwright + +{PRODUCT} supports JSON and JUnit XML reports generated by Playwright official https://playwright.dev/docs/test-reporters[reporters] (using `--json` ). + +Follow the instructions for whichever one being used. JUnit XML is the default but {PRODUCT} recommends to use JSON reports and option. Because JUnit XML reports will consolidate multiple same test case results into one. + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| JSON +| JUnit XML + +a| First, run tests with the `--reporter=json` option: + +`PLAYWRIGHT_JSON_OUTPUT_NAME=results.json npx playwright test --reporter=json` + +or pass options to the configuration file directly: + +[source] +---- +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ +reporter: [['json', { outputFile: 'results.json' }]], +}); +---- + +ref: https://playwright.dev/docs/test-reporters#json-reporter[Playwright Official Document] + +. After running tests, point the CLI to your test report file(s) to record tests results ++ +`smart-tests record tests playwright --no-build --json results.json` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +a| If you use https://playwright.dev/docs/test-reporters#junit-reporter[JUnit Reporter] to generate reports, like this: + +`PLAYWRIGHT_JUNIT_OUTPUT_NAME=results.xml npx playwright test --reporter=junit` + +or pass options to the configuration file directly + +[source] +---- +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ + reporter: [['junit', { outputFile: 'results.xml' }]], +}); +---- + +. After running tests, point the CLI to your test report files(s) to record test results: ++ +`smart-tests record tests playwright --no-build results.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +|=== + +[#pytest] +=== pytest + +{PRODUCT} supports JSON and JUnit XML reports generated by pytest (using `--junit-xml` or `--report-log` ). + +Follow the instructions for whichever one being used. JUnit XML is the default. + +[.multilanguage-custom-table, options="header", cols="2"] +|=== +| JUnit XML +| JSON + +a| First, run tests with the `--junit-xml` option: + +`pytest --junit-xml=test-results/results.xml` + +[WARNING] +-- +pytest changed its default test report format from `xunit1` to `xunit2` in version 6. Unfortunately, the new `xunit2` format does not include file paths, which {PRODUCT} needs. + +Therefore, if using pytest 6 or newer, you must also specify `junit_family=legacy` as the report format. See https://docs.pytest.org/en/latest/deprecations.html#junit-family-default-value-change-to-xunit2[Deprecations and Removals — pytest documentation] for instructions. +-- + +. After running tests, point the CLI to your test report file(s) to record test results (note the `--json` option): ++ +`smart-tests record tests pytest --no-build ./test-results/` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +a| If you use the https://github.com/pytest-dev/pytest-reportlog[pytest-dev/pytest-reportlog] plugin to generate reports, like this: + +`pytest --report-log=test-results/results.json` + +. After running tests, point the CLI to your test report file(s) to record test results: ++ +`smart-tests record tests pytest --no-build --json ./test-results/` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +|=== + +[#robot] +=== Robot + +. After running tests with `robot` , point the CLI to your test report files to record test results: ++ +`smart-tests record tests robot --no-build output.xml` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#rspec] +=== RSpec + +. Use link:https://github.com/sj26/rspec_junit_formatter[rspec_junit_formatter] to output test results to a file in RSpec. If you already have a CI server storing your test results, it may already be installed: ++ +`bundle exec rspec --format RspecJunitFormatter --out report/rspec.xml` + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests rspec --no-build ./report` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#ruby-test-unit] +=== Ruby test-unit + +. Use link:https://github.com/launchableinc/test-unit-launchable[test-unit-launchable] to output test results to a file in test-unit. ++ +[source] +---- +ruby test/example_test.rb --runner=launchable --launchable-test-report-json=report.json +---- + +. After running tests, point the CLI to your test report files to record test results: ++ +`smart-tests record tests raw --no-build report.json` + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +[#testng] +=== TestNG + +If using the TestNG framework, check whether the tests are run with Gradle or Maven. Then, use the appropriate command for the corresponding tool. + +[[vitest-record-tests]] +=== Vitest + +. Run tests with the `--reporter junit` option and the `--outputFile` option to produce a test report in JUnit format. ++ +[source,bash] +---- +vitest run --reporter junit --outputFile report.xml +---- + +. After running tests, point the CLI to your test report file to record test results: ++ +[source,bash] +---- +smart-tests record tests \ + --session $(cat smart-tests-session.txt) \ + vitest \ + report.json +---- + +[[xctest-recording-tests]] +=== XCTest + +. Generate reports that {PRODUCT} can consume, use link:https://github.com/supermarin/xcpretty[xcpretty] to generate a JUnit XML file after you run tests: ++ +[source,bash] +---- +# install xcpretty +gem install xcpretty + +# run the tests however you usually do, then produce a JUnit XML file +xcodebuild test -scheme -destination '' -parallel-testing-enabled | xcpretty -r junit +---- + +. Point the CLI to your test report file to record test results: ++ +[source,bash] +---- +smart-tests record tests \ + --session $(cat smart-tests-session.txt) \ + xctest \ + build/reports/junit.xml +---- + +. Then, follow the instructions to xref:send-data-to-smart-tests:record-test-results/ensure-record-tests-always-runs.adoc[Ensure record tests always runs]. + +== Other instructions + +If not using any of the above, refer to xref:resources:integrations/raw.adoc[`raw` profile for custom test runners] or xref:resources:integrations/use-the-generic-file-based-runner-integration.adoc[`file` profile for unsupported test runners]. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/separate-out-test-suites.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/separate-out-test-suites.adoc new file mode 100644 index 000000000..63dad3673 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/separate-out-test-suites.adoc @@ -0,0 +1,26 @@ +:slug: separate-out-test-suites += Separate out test suites + +If running different test suites against the same build, separating them out to different "test suites" would improve the quality of data analysis throughout our system. + +== What is a test suite? + +Teams regularly group their test cases into logical groups based on one or combination of: + +. The _type_ of test (e.g., unit tests vs. UI tests.) +. The _tech stack_ used to run those tests (e.g., Maven for unit tests vs. Cypress for UI tests.) +. The _when_ and _where_ they run (e.g., nightly vs every change, in the CI system vs in the staging environment.) + +If thinking about test suites this way, the definition will be more straightforward: for example, all the Maven unit tests become one test suite, and all the Cypress UI tests become another test suite. + +== How do you benefit from test suites? + +When you record test suites, you can focus on test sessions & issues from a particular test suite. + +== How do you record test suite? + +. Invoke the `smart-tests record tests` command, then specify the additional `--test-suite` option and give it a test suite name. ++ +`$ smart-tests record tests --test-suite "ui tests" ...` + +. If using the `smart-tests record session` command explicitly as per xref:send-data-to-smart-tests:record-test-results/manage-complex-test-session-layouts.adoc[Managing complex test session layout], then the `--test-suite` option should be used with that command instead of `record tests`. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/use-flavors-to-run-the-best-tests-for-an-environment.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/use-flavors-to-run-the-best-tests-for-an-environment.adoc new file mode 100644 index 000000000..19782766c --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/record-test-results/use-flavors-to-run-the-best-tests-for-an-environment.adoc @@ -0,0 +1,78 @@ +:slug: use-flavors-to-run-the-best-tests-for-an-environment += Use 'flavors' to run the best tests for an environment + +Capturing test reports and selecting tests to run in multiple environments (for example, browser tests, mobile tests, etc.) + +[NOTE] +-- +This page relates to xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results with {PRODUCT}]. +-- + +Lots of teams run the same tests across several different environments. For example, a UI test suite might be run in several browsers in parallel. Or perhaps you need to build a slightly different version of a mobile app for different locales and need to run the same tests across all of them. + +In these scenarios, a test result is not just a test result: it is the combination of the test _and_ the environment that it was run in. A test might pass in one environment but fail in another. + +{PRODUCT} supports these scenarios with a concept called *flavors* . + +.Flavors +image::ROOT:flavors.png[Flavors,role="screenshot"] + +When submitting test results using `smart-tests record tests`, additional metadata in the form of key-value pairs using the `--flavor` option can also be submitted. + +For example: + +[source,role="default-expanded"] +---- +# run tests in Chrome and report results +cypress run --reporter junit --reporter-options "mochaFile=report/test-output-chrome.xml" + +smart-tests record tests cypress --build [BUILD NAME] --session [SESSION NAME] --flavor browser=chrome report/test-output-chrome.xml + +# run tests in Firefox and report results +cypress run --reporter junit --reporter-options "mochaFile=report/test-output-firefox.xml" + +smart-tests record tests cypress --build [BUILD NAME] --session [SESSION NAME] --flavor browser=firefox report/test-output-firefox.xml +---- + +And so on. (You can submit multiple key-value pairs, too: `--flavor key=value --flavor key2=value2` ) + +Later, when you want to request a subset of tests, include the same key-value pairs to get a subset of tests specifically selected for that flavor. + +For example: + +[source,role="default-expanded"] +---- +# get a subset for Chrome, run it, then report results +find ./cypress/integration | smart-tests subset cypress --build [BUILD NAME] --session [SESSION NAME] --confidence 90% --flavor browser=chrome > subset-chrome.txt + +cypress run --spec "$(cat subset-chrome.txt)" --reporter junit --reporter-options "mochaFile=report/test-output-chrome.xml" + +smart-tests record tests cypress --build [BUILD NAME] --session [SESSION NAME] --flavor browser=chrome report/test-output-chrome.xml + +# get a subset for Firefox, run it, then report results +find ./cypress/integration | smart-tests subset --build [BUILD NAME] --session [SESSION NAME] --confidence 90% --flavor browser=firefox cypress > subset-firefox.txt + +cypress run --spec "$(cat subset-firefox.txt)" --reporter junit --reporter-options "mochaFile=report/test-output-firefox.xml" + +smart-tests record tests cypress --build [BUILD NAME] --session [SESSION NAME] --flavor browser=firefox report/test-output-firefox.xml +---- + +This feature lets you select the right tests to run based on the changes being tested _and_ the environment they are being run in. + +[NOTE] +-- +If your workflow involves creating a session externally using `smart-tests record session` , you should set `--flavor` in that command. Using `smart-tests subset` or `smart-tests record tests` will be ignored. +-- + +For example: + +[source,role="default-expanded"] +---- +smart-tests record session --build [BUILD NAME] --session chrome-session --flavor browser=chrome + +find ./cypress/integration | smart-tests subset cypress --session chrome-session --confidence 90% > subset-chrome.txt + +cypress run --spec "$(cat subset-chrome.txt)" --reporter junit --reporter-options "mochaFile=report/test-output-chrome.xml" + +smart-tests record tests cypress --session chrome-session report/test-output-chrome.xml +---- diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/send-data-to-smart-tests.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/send-data-to-smart-tests.adoc new file mode 100644 index 000000000..257e8fef2 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/send-data-to-smart-tests.adoc @@ -0,0 +1,36 @@ +include::ROOT:partial$abbr.adoc[] + += Send data to {PRODUCT} +:slug: send-data-to-smart-tests + +To start using {PRODUCT}'s features, you must send build and test data from your CI pipeline to your {PRODUCT} workspace. + +The following diagram shows the high-level data flow: + +.Predictive test selection data flow +image::ROOT:pts-v2-working.png[Predictive test selection data flow,role="screenshot"] + +== Integration options + +=== {PRODUCT} CLI + +We support a variety of tools to integrate with, all through the {PRODUCT} CLI. Tools supported include: Android Debug Bridge, Ant, Bazel, Behave, CTest, cucumber, Cypress, GoogleTest, Go Test, Gradle, Jasmin, Jest, Maven, minitest, pytest, Robot, Rspec, and other/custom test runners. + +First, follow the steps in xref:send-data-to-smart-tests:getting-started/getting-started.adoc[Getting Started] to create your account, set up your API key, install the {PRODUCT}, and verify your connection. + +Then complete the steps for sending your test suite’s data to your {PRODUCT} workspace: + +1. xref:send-data-to-smart-tests:record-builds/record-builds.adoc[Record builds] +2. xref:send-data-to-smart-tests:record-test-results/record-test-results.adoc[Record test results] + +== Next steps + +Once you start sending builds and test results to {PRODUCT}, you will see the benefits of {PRODUCT} in your test sessions. Here are some next steps to take to get the most out of {PRODUCT}: + +* Set up xref:features:test-notifications/test-notifications-via-slack.adoc[Test Notifications via Slack] or xref:features:test-notifications/github-app-for-test-sessions.adoc[GitHub app for test sessions] to get notified about test results without having to check email or navigate to a pull request. + +* View xref:features:trends.adoc[Trends] in your test sessions. + +* Find xref:features:unhealthy-tests.adoc[Unhealthy tests] in your test suite and fix them to run tests more reliably. + +* Begin saving time running tests and run tests earlier with xref:features:predictive-test-selection.adoc[Predictive test selection]. diff --git a/smart_tests/docs/modules/send-data-to-smart-tests/pages/subset/subset-predictive-test-selection.adoc b/smart_tests/docs/modules/send-data-to-smart-tests/pages/subset/subset-predictive-test-selection.adoc new file mode 100644 index 000000000..84ae2a113 --- /dev/null +++ b/smart_tests/docs/modules/send-data-to-smart-tests/pages/subset/subset-predictive-test-selection.adoc @@ -0,0 +1,13 @@ +include::ROOT:partial$abbr.adoc[] + += Subset (for Predictive test selection) +:slug: subset-predictive-test-selection + +[NOTE] +-- +Subsets are only generated using the Predictive test selection service. +-- + +Subset is a feature in {PRODUCT} that allows you to create a subset of tests from your test suite based on specific criteria. This feature is useful for running a smaller set of tests that are relevant to a particular change or for prioritizing tests based on historical data. This means that the tests included in a subset can change over time as new data is collected. + +For more information on using subsets, refer to xref:features:predictive-test-selection/request-and-run-a-subset-of-tests/subset-with-the-smart-tests-cli/subset-with-the-smart-tests-cli.adoc[Subset with the {PRODUCT} CLI]. \ No newline at end of file