Skip to content

Commit ae5717d

Browse files
committed
add longer nightly test for ONNX upscale pipeline
1 parent cb5de57 commit ae5717d

File tree

1 file changed

+70
-1
lines changed

1 file changed

+70
-1
lines changed

tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py

Lines changed: 70 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,22 @@
2121

2222
from diffusers.pipelines.stable_diffusion import OnnxStableDiffusionUpscalePipeline
2323
from diffusers.utils import floats_tensor
24+
from diffusers.utils.testing_utils import (
25+
is_onnx_available,
26+
load_image,
27+
nightly,
28+
require_onnxruntime,
29+
require_torch_gpu,
30+
)
31+
2432

2533
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin
2634

2735

36+
if is_onnx_available():
37+
import onnxruntime as ort
38+
39+
2840
class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
2941
# TODO: is there an appropriate internal test set?
3042
hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx"
@@ -42,7 +54,7 @@ def get_dummy_inputs(self, seed=0):
4254
}
4355
return inputs
4456

45-
def test_pipeline_default_ddim(self):
57+
def test_pipeline_default_ddpm(self):
4658
pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
4759
pipe.set_progress_bar_config(disable=None)
4860

@@ -56,3 +68,60 @@ def test_pipeline_default_ddim(self):
5668
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223]
5769
)
5870
assert np.abs(image_slice - expected_slice).max() < 1e-1
71+
72+
73+
@nightly
74+
@require_onnxruntime
75+
@require_torch_gpu
76+
class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
77+
@property
78+
def gpu_provider(self):
79+
return (
80+
"CUDAExecutionProvider",
81+
{
82+
"gpu_mem_limit": "15000000000", # 15GB
83+
"arena_extend_strategy": "kSameAsRequested",
84+
},
85+
)
86+
87+
@property
88+
def gpu_options(self):
89+
options = ort.SessionOptions()
90+
options.enable_mem_pattern = False
91+
return options
92+
93+
def test_inference_default_ddpm(self):
94+
init_image = load_image(
95+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
96+
"/img2img/sketch-mountains-input.jpg"
97+
)
98+
init_image = init_image.resize((128, 128))
99+
# using the PNDM scheduler by default
100+
pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(
101+
"ssube/stable-diffusion-x4-upscaler-onnx",
102+
safety_checker=None,
103+
feature_extractor=None,
104+
provider=self.gpu_provider,
105+
sess_options=self.gpu_options,
106+
)
107+
pipe.set_progress_bar_config(disable=None)
108+
109+
prompt = "A fantasy landscape, trending on artstation"
110+
111+
generator = torch.manual_seed(0)
112+
output = pipe(
113+
prompt=prompt,
114+
image=init_image,
115+
guidance_scale=7.5,
116+
num_inference_steps=10,
117+
generator=generator,
118+
output_type="np",
119+
)
120+
images = output.images
121+
image_slice = images[0, 255:258, 383:386, -1]
122+
123+
assert images.shape == (1, 512, 512, 3)
124+
expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972])
125+
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
126+
127+
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2

0 commit comments

Comments
 (0)