Skip to content

Commit ada3e08

Browse files
committed
manually fix rebase issues
1 parent 0432a23 commit ada3e08

File tree

4 files changed

+10
-14
lines changed

4 files changed

+10
-14
lines changed

.ci/docker/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ datasets
3636
transformers
3737
torchmultimodal-nightly # needs to be updated to stable as soon as it's avaialable
3838
onnx
39-
onnxscript>=0.2.2
39+
onnxscript
4040
onnxruntime
4141
evaluate
4242
accelerate>=0.20.1
@@ -69,5 +69,5 @@ pycocotools
6969
semilearn==0.3.2
7070
torchao==0.5.0
7171
segment_anything==1.0
72-
torchrec==1.1.0; platform_system == "Linux"
72+
torchrec==1.0.0; platform_system == "Linux"
7373
fbgemm-gpu==1.1.0; platform_system == "Linux"

.jenkins/build.sh

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,10 @@ sudo apt-get install -y pandoc
2222
#Install PyTorch Nightly for test.
2323
# Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
2424
# Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed).
25-
sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict
26-
pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
27-
#sudo pip uninstall -y fbgemm-gpu
28-
#sudo pip3 install --pre fbgemm-gpu --index-url https://download.pytorch.org/whl/nightly/cu126/
29-
#pip install tensordict-nightly
30-
#pip install torchrl-nightly
31-
#sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
32-
25+
# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata
26+
# sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
27+
# sudo pip uninstall -y fbgemm-gpu torchrec
28+
# sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
3329

3430
# Install two language tokenizers for Translation with TorchText tutorial
3531
python -m spacy download en_core_web_sm

intermediate_source/torch_export_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -995,7 +995,7 @@ def forward(self, x):
995995
# with torch.no_grad():
996996
# pt2_path = torch._inductor.aoti_compile_and_package(ep)
997997
#
998-
# # Load and run the .pt2 file in Python.
998+
# # Load and run the .so file in Python.
999999
# # To load and run it in a C++ environment, see:
10001000
# # https://pytorch.org/docs/main/torch.compiler_aot_inductor.html
10011001
# aoti_compiled = torch._inductor.aoti_load_package(pt2_path)

recipes_source/torch_export_aoti_python.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,11 +238,11 @@ def timed(fn):
238238

239239
torch._dynamo.reset()
240240

241-
compiled_model = torch._inductor.aoti_load_package(model_path)
242-
example_inputs = torch.randn(1, 3, 224, 224, device=device)
241+
model = torch._inductor.aoti_load_package(model_path)
242+
example_inputs = (torch.randn(1, 3, 224, 224, device=device),)
243243

244244
with torch.inference_mode():
245-
_, time_taken = timed(lambda: compiled_model(example_inputs))
245+
_, time_taken = timed(lambda: model(example_inputs))
246246
print(f"Time taken for first inference for AOTInductor is {time_taken:.2f} ms")
247247

248248

0 commit comments

Comments
 (0)