Skip to content

Commit c42bf3c

Browse files
committed
Merge remote-tracking branch 'gitlab/master'
2 parents 79be717 + 71731f3 commit c42bf3c

File tree

2 files changed

+13
-7
lines changed

2 files changed

+13
-7
lines changed

docker/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ RUN --mount=type=cache,target=/opt/ccache \
5050
cd intel-extension-for-pytorch && git submodule sync && \
5151
git submodule update --init --recursive && \
5252
git clone https://github.com/pytorch/pytorch && \
53-
cd pytorch && git checkout v1.5.1 && git submodule sync && \
53+
cd pytorch && git checkout v1.7.0 && git submodule sync && \
5454
git submodule update --init --recursive && \
55-
git apply ../torch_patches/dpcpp-v1.5.1.patch && \
55+
git apply ../torch_patches/xpu-1.7.patch && \
5656
USE_MKLDNN=1 USE_CUDA=0 USE_NNPACK=0 USE_CUDNN=0 \
5757
CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" pip install -v . && \
5858
cd .. && pip install -v . && rm -rf *

tests/cpu/test_int8.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,29 +28,35 @@ def test_quantization_status(self):
2828
x = torch.randn((4, 5), dtype=torch.float32).to(device)
2929
model = torch.nn.Linear(5, 10, bias=True).float().to(device)
3030

31+
model1 = copy.deepcopy(model)
32+
x1 = x.clone()
3133
conf = ipex.AmpConf(torch.int8)
3234
with ipex.AutoMixPrecision(conf, running_mode='calibration'):
33-
ref = model(x)
35+
ref = model1(x1)
3436
conf.save('configure.json')
3537
conf = ipex.AmpConf(torch.int8, 'configure.json')
3638
with ipex.AutoMixPrecision(conf, running_mode='inference'):
37-
y = model(x)
39+
y = model1(x1)
3840
self.assertTrue(ipex.core.is_int8_dil_tensor(y))
3941
jsonFile = open('configure.json', 'r')
4042
data = json.load(jsonFile)
4143
jsonFile.close()
4244
self.assertTrue(data[0]['quantized'])
4345

4446
# check configure's change can works for calibration step,
45-
# need get fp32 tensor for quantized=False.
47+
# we need use origin model, because after running inference
48+
# step, the model has beem quantized, after change quantized
49+
# to False, the output should be fp32, i.e. not be quantized.
4650
data[0]['quantized'] = False
4751
jsonFile = open('configure.json', "w+")
4852
jsonFile.write(json.dumps(data))
4953
jsonFile.close()
5054
# use user's changed configure.
55+
model2 = copy.deepcopy(model)
56+
x2 = x.clone()
5157
conf = ipex.AmpConf(torch.int8, 'configure.json')
5258
with ipex.AutoMixPrecision(conf, running_mode='calibration'):
53-
ref = model(x)
59+
ref = model2(x2)
5460
conf.save('configure.json')
5561
conf = ipex.AmpConf(torch.int8, 'configure.json')
5662
jsonFile = open('configure.json', 'r')
@@ -59,7 +65,7 @@ def test_quantization_status(self):
5965
self.assertFalse(data[0]['quantized'])
6066

6167
with ipex.AutoMixPrecision(conf, running_mode='inference'):
62-
y = model(x)
68+
y = model2(x2)
6369
self.assertTrue(ipex.core.is_fp32_dil_tensor(y))
6470
os.remove('configure.json')
6571

0 commit comments

Comments
 (0)