We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 982f9b3 commit f92e599Copy full SHA for f92e599
src/diffusers/pipelines/cogview4/pipeline_cogview4.py
@@ -215,7 +215,7 @@ def _get_glm_embeds(
215
)
216
text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1)
217
prompt_embeds = self.text_encoder(
218
- text_input_ids.to(self.text_encoder.model.device), output_hidden_states=True
+ text_input_ids.to(self.text_encoder.device), output_hidden_states=True
219
).hidden_states[-2]
220
221
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
0 commit comments