Skip to content

Commit 6c7a8b6

Browse files
add --no-deps for tests/py/requirements.txt (#3569)
1 parent dd06bd8 commit 6c7a8b6

File tree

3 files changed

+15
-6
lines changed

3 files changed

+15
-6
lines changed

.github/workflows/build-test-linux-x86_64.yml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,15 @@ jobs:
138138
pushd .
139139
cd tests/py
140140
python -m pip install -r requirements.txt
141+
major=${PYTHON_VERSION%%.*}
142+
minor=${PYTHON_VERSION#*.}
143+
minor=${minor%%.*}
144+
if (( major > 3 || (major == 3 && minor >= 13) )); then
145+
echo "flashinfer-python is not supported for python version 3.13 or higher"
146+
else
147+
echo "Installing flashinfer-python"
148+
python -m pip install flashinfer-python --no-deps
149+
fi
141150
cd dynamo
142151
python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 4 conversion/
143152
python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml automatic_plugin/test_automatic_plugin.py

py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import logging
2-
from typing import Any
2+
from typing import Any, Set
33

44
import torch
55
from torch_tensorrt._utils import sanitized_torch_version
@@ -100,12 +100,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
100100
super().__init__(*args, **kwargs)
101101

102102
def is_impure(self, node: torch.fx.node.Node) -> bool:
103-
# Set of known quantization ops to be excluded from constant folding.
103+
# Set of known quantization ops to be excluded from constant folding.
104104
# Currently, we exclude all quantization ops coming from modelopt library.
105-
quantization_ops = {}
105+
quantization_ops: Set[torch._ops.OpOverload] = set()
106106
try:
107-
# modelopt import ensures torch.ops.tensorrt.quantize_op.default is registered
108-
import modelopt.torch.quantization as mtq
107+
# modelopt import ensures torch.ops.tensorrt.quantize_op.default is registered
108+
import modelopt.torch.quantization as mtq # noqa: F401
109+
109110
assert torch.ops.tensorrt.quantize_op.default
110111
quantization_ops.add(torch.ops.tensorrt.quantize_op.default)
111112
except Exception as e:

tests/py/requirements.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ pytest>=8.2.1
88
pytest-xdist>=3.6.1
99
pyyaml
1010
timm>=1.0.3
11-
flashinfer-python; python_version < "3.13"
1211
transformers==4.49.0
1312
nvidia-modelopt[all]~=0.27.0; python_version >'3.9' and python_version <'3.13'
1413
--extra-index-url https://pypi.nvidia.com

0 commit comments

Comments
 (0)