forked from NVIDIA/Model-Optimizer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtox.ini
More file actions
138 lines (114 loc) · 4.24 KB
/
tox.ini
File metadata and controls
138 lines (114 loc) · 4.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
[tox]
envlist=
pre-commit-all
py312-torch28-tf_latest-unit
py312-cuda12-gpu
skipsdist = True
toxworkdir = /tmp/{env:USER}-modelopt-tox
############################
# CPU Unit test environments
############################
[testenv:{py310,py311,py312}-torch{26,27,28}-tf_{min,latest}-unit]
deps =
# torch version auto-selected based on torchvision version
torch26: torchvision~=0.21.0
torch27: torchvision~=0.22.0
torch28: torchvision~=0.23.0
# Build onnxsim from sdists for Python 3.12 until http://github.com/daquexian/onnx-simplifier/pull/353
py312: onnxsim
# Install megatron-core for special unit tests
megatron-core
-e .[all,dev-test]
# Should match setup.py
tf_min: transformers~=4.48.0
commands =
python -m pytest tests/unit {env:COV_ARGS:}
#####################################################################
# Environment to run unit tests with subset of dependencies installed
#####################################################################
[testenv:{py310,py311,py312}-partial-unit-{onnx,torch,torch_deploy}]
allowlist_externals =
bash, rm
deps =
# Build onnxsim from sdists for Python 3.12 until http://github.com/daquexian/onnx-simplifier/pull/353
py312: onnxsim
# ONNX unit tests heavily rely on torch / torchvision
onnx: .[onnx,dev-test]
onnx: torchvision
# Install megatron-core to test torch-only install can still import plugins
torch: megatron-core
torch: .[dev-test]
torch_deploy: .[onnx,torch,dev-test]
commands =
onnx: python -m pytest tests/unit/onnx
torch: python -m pytest tests/unit/torch --ignore tests/unit/torch/deploy
torch_deploy: python -m pytest tests/unit/torch/deploy
########################################################
# GPU test environments (Can be used with --current-env)
########################################################
[testenv:{py310,py311,py312}-cuda12-gpu]
setenv =
MAMBA_FORCE_BUILD=TRUE
commands_pre =
# Install deps here so that it gets installed even in --current-env
pip install -U megatron-core
pip install git+https://github.com/Dao-AILab/fast-hadamard-transform.git
# Install Mamba model dependencies (takes 8-10mins!)
# Triton 3.4.0 causes some real quant tests to fail
pip install "triton<3.4"
pip install --no-build-isolation git+https://github.com/state-spaces/mamba.git
# Install Eagle-3 test dependencies
pip install tiktoken blobfile sentencepiece
# Build onnxsim from sdists for Python 3.12 until http://github.com/daquexian/onnx-simplifier/pull/353
py312: pip install onnxsim
# NOTE: User is expected to have correct torch-cuda version pre-installed if using --current-env
# to avoid possible CUDA version mismatch
pip install -e .[all,dev-test]
commands =
# Coverage fails with "Can't combine line data with arc data" error so not using "--cov"
python -m pytest tests/gpu
#############################################
# Code quality checks on all files or on diff
#############################################
[testenv:{pre-commit}-{all,diff}]
deps =
-e .[all,dev-lint]
commands =
all: pre-commit run --all-files --show-diff-on-failure {posargs}
diff: pre-commit run --from-ref origin/main --to-ref HEAD {posargs}
#########################
# Run documentation build
#########################
[testenv:{build,debug}-docs]
allowlist_externals =
rm
passenv =
SETUPTOOLS_SCM_PRETEND_VERSION
deps =
-e .[all,dev-docs]
changedir = docs
commands_pre =
rm -rf build
rm -rf source/reference/generated
commands =
sphinx-build source build/html --fail-on-warning --show-traceback --keep-going
debug: sphinx-autobuild source build/html --host 0.0.0.0
#################
# Run wheel build
#################
[testenv:build-wheel]
allowlist_externals =
bash, cd, rm
passenv =
SETUPTOOLS_SCM_PRETEND_VERSION
deps =
twine
commands =
# Clean build directory to avoid any stale files getting into the wheel
rm -rf build
# Build and check wheel
pip wheel --no-deps --wheel-dir=dist .
twine check dist/*
# Install and test the wheel
bash -c "find dist -name 'nvidia_modelopt-*.whl' | xargs pip install -f dist"
bash -c "cd dist; python -c 'import modelopt; print(modelopt.__version__);'"