Update app.py
Browse files
app.py
CHANGED
|
@@ -381,7 +381,7 @@ def encode_image(pil_image):
|
|
| 381 |
dtype = torch.bfloat16
|
| 382 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 383 |
|
| 384 |
-
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-
|
| 385 |
transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 386 |
subfolder='transformer',
|
| 387 |
torch_dtype=dtype,
|
|
@@ -479,9 +479,6 @@ def infer(
|
|
| 479 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
| 480 |
print(f"Negative Prompt: '{negative_prompt}'")
|
| 481 |
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
| 482 |
-
if rewrite_prompt and len(pil_images) > 0:
|
| 483 |
-
prompt = polish_prompt_hf(prompt, pil_images)
|
| 484 |
-
print(f"Rewritten Prompt: {prompt}")
|
| 485 |
|
| 486 |
|
| 487 |
# Generate the image
|
|
|
|
| 381 |
dtype = torch.bfloat16
|
| 382 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 383 |
|
| 384 |
+
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511",
|
| 385 |
transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 386 |
subfolder='transformer',
|
| 387 |
torch_dtype=dtype,
|
|
|
|
| 479 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
| 480 |
print(f"Negative Prompt: '{negative_prompt}'")
|
| 481 |
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
|
|
|
|
|
|
|
|
|
| 482 |
|
| 483 |
|
| 484 |
# Generate the image
|