Tanut commited on
Commit
4032532
·
1 Parent(s): 91d463e

Edit for displaying

Browse files
Files changed (1) hide show
  1. app.py +10 -640
app.py CHANGED
@@ -1,644 +1,14 @@
1
- import os, gc, random, re, inspect
2
- from contextlib import nullcontext
3
-
4
  import gradio as gr
5
- import torch, spaces
6
- from PIL import Image, ImageFilter, ImageOps
7
- import numpy as np
8
- import qrcode
9
- from qrcode.constants import ERROR_CORRECT_H
10
- from diffusers import (
11
- StableDiffusionControlNetPipeline,
12
- StableDiffusionControlNetImg2ImgPipeline, # for Hi-Res Fix
13
- ControlNetModel,
14
- DPMSolverMultistepScheduler,
15
- )
16
-
17
- # ------------------- env / runtime -------------------
18
- # Quiet matplotlib cache warning on Spaces
19
- os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
20
-
21
- # Optional: faster model downloads on Spaces
22
- os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
23
- os.environ.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "1")
24
-
25
- # Hugging Face token (add it in Space Settings → Variables and secrets)
26
- HF_TOKEN = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
27
-
28
- # Device / dtype (CPU-safe)
29
- IS_CUDA = torch.cuda.is_available()
30
- IS_MPS = getattr(torch.backends, "mps", None) and torch.backends.mps.is_available()
31
- DTYPE = torch.float16 if (IS_CUDA or IS_MPS) else torch.float32
32
- DEV_TORCH = "cuda" if IS_CUDA else ("mps" if IS_MPS else "cpu")
33
-
34
- def autocast_ctx():
35
- if IS_CUDA:
36
- return torch.autocast(device_type="cuda", dtype=torch.float16)
37
- if IS_MPS:
38
- # MPS autocast uses fp16 path; acceptable for SD 1.5 on macOS
39
- return torch.autocast(device_type="mps", dtype=torch.float16)
40
- return nullcontext() # CPU: no autocast
41
-
42
- # ------------------- models -------------------
43
- BASE_MODELS = {
44
- "stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5",
45
- "dream": "Lykon/dreamshaper-8",
46
- }
47
-
48
- # ControlNets
49
- CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster"
50
- CN_BRIGHT = "latentcat/control_v1p_sd15_brightness"
51
-
52
- # ---------- helpers ----------
53
- def resize_like(im: Image.Image, width: int, height: int, method=Image.NEAREST) -> Image.Image:
54
- if im.size == (width, height):
55
- return im
56
- return im.resize((int(width), int(height)), method)
57
-
58
- def ensure_rgb_img(x):
59
- if isinstance(x, Image.Image):
60
- return x.convert("RGB")
61
- if isinstance(x, np.ndarray):
62
- a = x
63
- if a.dtype != np.uint8:
64
- a = np.clip(a, 0, 255).astype(np.uint8)
65
- if a.ndim == 2:
66
- return Image.fromarray(a, mode="L").convert("RGB")
67
- return Image.fromarray(a).convert("RGB")
68
- if torch.is_tensor(x):
69
- t = x.detach().cpu()
70
- if t.ndim == 3 and t.shape[0] in (1, 3):
71
- t = t.permute(1, 2, 0)
72
- arr = t.numpy()
73
- if arr.max() <= 1.0:
74
- arr = arr * 255.0
75
- arr = np.clip(arr, 0, 255).astype(np.uint8)
76
- if arr.ndim == 2:
77
- return Image.fromarray(arr, mode="L").convert("RGB")
78
- return Image.fromarray(arr).convert("RGB")
79
- raise ValueError(f"Unsupported image type for ensure_rgb_img: {type(x)}")
80
-
81
- def snap8(x: int) -> int:
82
- x = max(256, min(1024, int(x)))
83
- return x - (x % 8)
84
-
85
- def normalize_color(c):
86
- if c is None: return "white"
87
- if isinstance(c, (tuple, list)):
88
- r, g, b = (int(max(0, min(255, round(float(x))))) for x in c[:3]); return (r, g, b)
89
- if isinstance(c, str):
90
- s = c.strip()
91
- if s.startswith("#"): return s
92
- m = re.match(r"rgba?\(\s*([0-9.]+)\s*,\s*([0-9.]+)\s*,\s*([0-9.]+)", s, re.IGNORECASE)
93
- if m:
94
- r = int(max(0, min(255, round(float(m.group(1))))))
95
- g = int(max(0, min(255, round(float(m.group(2))))))
96
- b = int(max(0, min(255, round(float(m.group(3))))))
97
- return (r, g, b)
98
- return s
99
- return "white"
100
-
101
- def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
102
- qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
103
- qr.add_data(url.strip()); qr.make(fit=True)
104
- img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
105
- img = img.resize((int(size), int(size)), Image.NEAREST)
106
- if blur_radius and blur_radius > 0:
107
- img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius)))
108
- return img
109
-
110
- def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
111
- if strength <= 0: return stylized
112
- q = qr_img.convert("L")
113
- black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
114
- black = np.asarray(black_mask, dtype=np.float32) / 255.0
115
- white = 1.0 - black
116
- s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0
117
- s = s * (1.0 - float(strength) * black[..., None])
118
- s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None])
119
- s = np.clip(s, 0.0, 1.0)
120
- return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
121
-
122
- # ----- Brightness map preprocessing & mixing -----
123
- def prep_brightness_map(img: Image.Image, size: int, source: str,
124
- blur_px: float = 3.0, gamma: float = 0.9, autocontrast: bool = True) -> Image.Image:
125
- method = Image.NEAREST if source == "qr" else Image.LANCZOS
126
- im = img.resize((size, size), method).convert("L")
127
- if source != "qr":
128
- if autocontrast:
129
- im = ImageOps.autocontrast(im, cutoff=2)
130
- if blur_px and blur_px > 0:
131
- im = im.filter(ImageFilter.GaussianBlur(radius=float(blur_px)))
132
- if gamma and gamma != 1.0:
133
- arr = np.asarray(im, dtype=np.float32) / 255.0
134
- arr = np.clip(arr ** float(gamma), 0.0, 1.0)
135
- im = Image.fromarray((arr * 255.0).astype(np.uint8), "L")
136
- return im.convert("RGB")
137
-
138
- def blend_brightness_maps(qr_img: Image.Image,
139
- init_img: Image.Image,
140
- size: int,
141
- alpha: float,
142
- blur_px: float = 2.5,
143
- gamma: float = 0.9,
144
- autocontrast: bool = True) -> Image.Image:
145
- qr_map = prep_brightness_map(qr_img, size, "qr")
146
- init_map = prep_brightness_map(init_img, size, "init",
147
- blur_px=blur_px, gamma=gamma, autocontrast=autocontrast)
148
- qa = np.asarray(qr_map, dtype=np.float32)
149
- ia = np.asarray(init_map, dtype=np.float32)
150
- a = float(alpha)
151
- mix = np.clip((1.0 - a) * ia + a * qa, 0, 255).astype(np.uint8)
152
- return Image.fromarray(mix, mode="RGB")
153
-
154
- # ---------- lazy pipelines / models ----------
155
- _CN_QR = None
156
- _CN_BR = None
157
- _CN_TXT2IMG = {}
158
- _CN_IMG2IMG = {}
159
-
160
- def _base_scheduler_for(pipe):
161
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(
162
- pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
163
- )
164
- pipe.enable_attention_slicing()
165
- pipe.enable_vae_slicing()
166
- pipe.enable_model_cpu_offload()
167
- return pipe
168
-
169
- def get_qr_cn():
170
- global _CN_QR
171
- if _CN_QR is None:
172
- _CN_QR = ControlNetModel.from_pretrained(
173
- CN_QRMON, torch_dtype=DTYPE, use_safetensors=True, token=HF_TOKEN
174
- )
175
- return _CN_QR
176
-
177
- def get_bright_cn():
178
- global _CN_BR
179
- if _CN_BR is None:
180
- _CN_BR = ControlNetModel.from_pretrained(
181
- CN_BRIGHT, torch_dtype=DTYPE, use_safetensors=True, token=HF_TOKEN
182
- )
183
- return _CN_BR
184
-
185
- def get_controlnets(use_brightness: bool):
186
- return [get_qr_cn(), get_bright_cn()] if use_brightness else get_qr_cn()
187
-
188
- def get_txt2img_pipe(model_id: str, use_brightness: bool):
189
- key = (model_id, "2cn" if use_brightness else "1cn")
190
- if key not in _CN_TXT2IMG:
191
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
192
- model_id,
193
- controlnet=get_controlnets(use_brightness),
194
- torch_dtype=DTYPE,
195
- safety_checker=None,
196
- use_safetensors=True,
197
- low_cpu_mem_usage=True,
198
- token=HF_TOKEN,
199
- )
200
- _CN_TXT2IMG[key] = _base_scheduler_for(pipe)
201
- return _CN_TXT2IMG[key]
202
-
203
- def get_img2img_pipe(model_id: str, use_brightness: bool):
204
- key = (model_id, "2cn" if use_brightness else "1cn")
205
- if key not in _CN_IMG2IMG:
206
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
207
- model_id,
208
- controlnet=get_controlnets(use_brightness),
209
- torch_dtype=DTYPE,
210
- safety_checker=None,
211
- use_safetensors=True,
212
- low_cpu_mem_usage=True,
213
- token=HF_TOKEN,
214
- )
215
- _CN_IMG2IMG[key] = _base_scheduler_for(pipe)
216
- return _CN_IMG2IMG[key]
217
-
218
- # -------- core helpers --------
219
- def _pick_brightness_image(mode: str,
220
- qr_img: Image.Image,
221
- init_img: Image.Image | None,
222
- custom_img: Image.Image | None) -> Image.Image:
223
- if mode == "init" and init_img is not None:
224
- return init_img
225
- if mode == "custom" and custom_img is not None:
226
- return custom_img
227
- return qr_img
228
-
229
- # -------- Method 1: QR control model in text-to-image (+ optional Hi-Res Fix) --------
230
- def _qr_txt2img_core(model_id: str,
231
- url: str, style_prompt: str, negative: str,
232
- steps: int, cfg: float, size: int, border: int,
233
- qr_weight: float, seed: int,
234
- use_hires: bool, hires_upscale: float, hires_strength: float,
235
- repair_strength: float, feather: float,
236
- control_start: float, control_end: float,
237
- use_brightness: bool, bright_weight: float,
238
- bright_start: float, bright_end: float,
239
- bright_mode: str, bright_custom: Image.Image | None):
240
-
241
- s = snap8(size)
242
-
243
- # --- Build base-size control images (s x s)
244
- qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
245
- if use_brightness:
246
- raw_bright_s = _pick_brightness_image(bright_mode, qr_img, None, bright_custom)
247
- bright_img_s = prep_brightness_map(raw_bright_s, s, bright_mode)
248
- control_images_s = [ensure_rgb_img(qr_img), ensure_rgb_img(bright_img_s)]
249
- scales_s = [float(qr_weight), float(bright_weight)]
250
- starts_s = [float(control_start), float(bright_start)]
251
- ends_s = [float(control_end), float(bright_end)]
252
- else:
253
- control_images_s = ensure_rgb_img(qr_img)
254
- scales_s = float(qr_weight)
255
- starts_s = float(control_start)
256
- ends_s = float(control_end)
257
-
258
- # Seed / generator
259
- if int(seed) < 0:
260
- seed = random.randint(0, 2**31 - 1)
261
- gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed))
262
-
263
- # --- Stage A: txt2img at s x s
264
- pipe = get_txt2img_pipe(model_id, use_brightness)
265
- if torch.cuda.is_available():
266
- torch.cuda.empty_cache()
267
- gc.collect()
268
-
269
- kwargs = dict(
270
- prompt=str(style_prompt),
271
- negative_prompt=str(negative or ""),
272
- num_inference_steps=int(steps),
273
- guidance_scale=float(cfg),
274
- width=s, height=s,
275
- generator=gen,
276
- controlnet_conditioning_scale=scales_s,
277
- control_guidance_start=starts_s,
278
- control_guidance_end=ends_s,
279
- )
280
-
281
- # detect which argument the pipeline supports
282
- sig = inspect.signature(pipe.__call__)
283
- if "control_image" in sig.parameters:
284
- kwargs["control_image"] = control_images_s
285
- elif "image" in sig.parameters:
286
- kwargs["image"] = control_images_s
287
- else:
288
- raise RuntimeError("Pipeline does not accept controlnet images")
289
-
290
- with autocast_ctx():
291
- out = pipe(**kwargs)
292
-
293
- lowres = out.images[0]
294
-
295
- # --- Stage B: optional hi-res
296
- final = lowres
297
- qr_for_repair = qr_img
298
- if use_hires:
299
- up = max(1.0, min(2.0, float(hires_upscale)))
300
- W = snap8(int(s * up)); H = W
301
-
302
- qr_img_hi = resize_like(qr_img, W, H, method=Image.NEAREST)
303
- if use_brightness:
304
- raw_bright_hi = _pick_brightness_image(bright_mode, qr_img_hi, None, bright_custom)
305
- bright_img_hi = prep_brightness_map(raw_bright_hi, W, bright_mode)
306
- control_images_hi = [ensure_rgb_img(qr_img_hi), ensure_rgb_img(bright_img_hi)]
307
- scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s
308
- else:
309
- control_images_hi = ensure_rgb_img(qr_img_hi)
310
- scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s
311
-
312
- pipe2 = get_img2img_pipe(model_id, use_brightness)
313
- if torch.cuda.is_available():
314
- torch.cuda.empty_cache()
315
- gc.collect()
316
-
317
- kwargs2 = dict(
318
- prompt=str(style_prompt),
319
- negative_prompt=str(negative or ""),
320
- image=lowres,
321
- strength=float(hires_strength),
322
- num_inference_steps=int(steps),
323
- guidance_scale=float(cfg),
324
- width=W, height=H,
325
- generator=gen,
326
- controlnet_conditioning_scale=scales_hi,
327
- control_guidance_start=starts_hi,
328
- control_guidance_end=ends_hi,
329
- )
330
-
331
- sig2 = inspect.signature(pipe2.__call__)
332
- if "control_image" in sig2.parameters:
333
- kwargs2["control_image"] = control_images_hi
334
- elif "image" in sig2.parameters:
335
- kwargs2["image"] = control_images_hi
336
- else:
337
- raise RuntimeError("Img2Img pipeline does not accept controlnet images")
338
-
339
- with autocast_ctx():
340
- out2 = pipe2(**kwargs2)
341
-
342
- final = out2.images[0]
343
- qr_for_repair = qr_img_hi
344
-
345
- final = enforce_qr_contrast(final, qr_for_repair,
346
- strength=float(repair_strength),
347
- feather=float(feather))
348
- return final, lowres, qr_img
349
-
350
- # ===================== helpers for img2img =====================
351
- def center_square(im: Image.Image) -> Image.Image:
352
- w, h = im.size
353
- if w == h:
354
- return im
355
- if w > h:
356
- off = (w - h) // 2
357
- return im.crop((off, 0, off + h, h))
358
- else:
359
- off = (h - w) // 2
360
- return im.crop((0, off, w, off + w))
361
-
362
- def prep_init_image(init_img: Image.Image, target: int) -> Image.Image:
363
- s = snap8(target)
364
- im = center_square(init_img.convert("RGB"))
365
- return im.resize((s, s), Image.LANCZOS)
366
-
367
- # ================== img2img + QR Control core ==================
368
- def _qr_img2img_core(model_id: str,
369
- init_image: Image.Image,
370
- url: str,
371
- style_prompt: str,
372
- negative: str,
373
- steps: int,
374
- cfg: float,
375
- size: int,
376
- border: int,
377
- qr_weight: float,
378
- seed: int,
379
- strength: float,
380
- repair_strength: float,
381
- feather: float,
382
- control_start: float, control_end: float,
383
- use_brightness: bool, bright_weight: float,
384
- bright_start: float, bright_end: float,
385
- bright_mode: str, bright_custom: Image.Image | None,
386
- bright_blur_px: float = 2.5, bright_gamma: float = 0.9, bright_autocontrast: bool = True,
387
- bright_mix_alpha: float = 0.65):
388
-
389
- s = snap8(size)
390
- init = ensure_rgb_img(prep_init_image(init_image, s))
391
- qr_img = ensure_rgb_img(make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0))
392
-
393
- if int(seed) < 0:
394
- seed = random.randint(0, 2**31 - 1)
395
- gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed))
396
-
397
- if use_brightness:
398
- if bright_mode == "mix":
399
- bright_img = blend_brightness_maps(qr_img, init, s,
400
- alpha=float(bright_mix_alpha),
401
- blur_px=float(bright_blur_px),
402
- gamma=float(bright_gamma),
403
- autocontrast=bool(bright_autocontrast))
404
- else:
405
- raw_bright = _pick_brightness_image(bright_mode, qr_img, init, bright_custom)
406
- bright_img = prep_brightness_map(raw_bright, s, bright_mode,
407
- blur_px=float(bright_blur_px),
408
- gamma=float(bright_gamma),
409
- autocontrast=bool(bright_autocontrast))
410
- control_images = [ensure_rgb_img(qr_img), ensure_rgb_img(bright_img)]
411
- scales = [float(qr_weight), float(bright_weight)]
412
- starts = [float(control_start), float(bright_start)]
413
- ends = [float(control_end), float(bright_end)]
414
- else:
415
- control_images = ensure_rgb_img(qr_img)
416
- scales = float(qr_weight)
417
- starts = float(control_start)
418
- ends = float(control_end)
419
-
420
- pipe = get_img2img_pipe(model_id, use_brightness)
421
- if torch.cuda.is_available():
422
- torch.cuda.empty_cache()
423
- gc.collect()
424
-
425
- kwargs = dict(
426
- prompt=str(style_prompt),
427
- negative_prompt=str(negative or ""),
428
- image=init,
429
- strength=float(strength),
430
- num_inference_steps=int(steps),
431
- guidance_scale=float(cfg),
432
- width=s, height=s,
433
- generator=gen,
434
- controlnet_conditioning_scale=scales,
435
- control_guidance_start=starts,
436
- control_guidance_end=ends,
437
- )
438
-
439
- sig = inspect.signature(pipe.__call__)
440
- if "control_image" in sig.parameters:
441
- kwargs["control_image"] = control_images
442
- elif "image" in sig.parameters and isinstance(control_images, list):
443
- kwargs["image"] = [init] + control_images
444
- else:
445
- raise RuntimeError("Img2Img pipeline does not accept controlnet images")
446
-
447
- with autocast_ctx():
448
- out = pipe(**kwargs)
449
-
450
- final = out.images[0]
451
- final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
452
- return final, init, qr_img
453
-
454
- # ============== wrappers for Gradio ==============
455
- @spaces.GPU(duration=120)
456
- def qr_img2img_blend(model_key: str,
457
- init_image: Image.Image,
458
- url: str, style_prompt: str, negative: str,
459
- steps: int, cfg: float, size: int, border: int,
460
- qr_weight: float, seed: int,
461
- strength: float,
462
- repair_strength: float, feather: float,
463
- control_start: float, control_end: float,
464
- use_brightness: bool, bright_weight: float,
465
- bright_start: float, bright_end: float,
466
- bright_mode: str, bright_custom: Image.Image | None,
467
- bright_blur_px: float, bright_gamma: float, bright_autocontrast: bool,
468
- bright_mix_alpha: float):
469
- model_id = BASE_MODELS.get(model_key, BASE_MODELS["stable-diffusion-v1-5"])
470
- return _qr_img2img_core(model_id,
471
- init_image,
472
- url, style_prompt, negative,
473
- steps, cfg, size, border,
474
- qr_weight, seed,
475
- strength,
476
- repair_strength, feather,
477
- control_start, control_end,
478
- use_brightness, bright_weight,
479
- bright_start, bright_end,
480
- bright_mode, bright_custom,
481
- bright_blur_px, bright_gamma, bright_autocontrast,
482
- bright_mix_alpha)
483
-
484
- @spaces.GPU(duration=120)
485
- def qr_txt2img_sd15(*args):
486
- return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args)
487
-
488
- @spaces.GPU(duration=120)
489
- def qr_txt2img_dream(*args):
490
- return _qr_txt2img_core(BASE_MODELS["dream"], *args)
491
-
492
- # ---------- UI ----------
493
- with gr.Blocks() as demo:
494
- gr.Markdown("# ZeroGPU • QR Control (with optional Brightness ControlNet)")
495
-
496
- # ---- Tab 1: stable-diffusion-v1-5 (Brightness forced ON) ----
497
- with gr.Tab("stable-diffusion-v1-5"):
498
- url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
499
- s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition")
500
- s_negative1 = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, nsfw, watermark, text, low contrast, deformed, extra digits")
501
- size1 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)")
502
- steps1 = gr.Slider(10, 50, value=30, step=1, label="Steps")
503
- cfg1 = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG")
504
- border1 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)")
505
- qr_w1 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight")
506
- seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
507
-
508
- cstart1 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start")
509
- cend1 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end")
510
-
511
- use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
512
- hires_up1 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
513
- hires_str1 = gr.Slider(0.30, 0.60, value=0.45, step=0.05, label="Hi-Res denoise strength")
514
-
515
- repair1 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
516
- feather1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
517
-
518
- use_bright1 = gr.Checkbox(value=True, visible=False)
519
- bright_w1 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight")
520
- bright_s1 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start")
521
- bright_e1 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end")
522
- bright_mode1 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source")
523
- bright_ref1 = gr.Image(label="(Optional) custom brightness ref", type="pil")
524
-
525
- final_img1 = gr.Image(label="Final (or Hi-Res) image")
526
- low_img1 = gr.Image(label="Low-res (Stage A) preview")
527
- ctrl_img1 = gr.Image(label="Control QR used")
528
-
529
- gr.Button("Generate with SD 1.5").click(
530
- qr_txt2img_sd15,
531
- [url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1,
532
- use_hires1, hires_up1, hires_str1, repair1, feather1,
533
- cstart1, cend1,
534
- use_bright1, bright_w1, bright_s1, bright_e1, bright_mode1, bright_ref1],
535
- [final_img1, low_img1, ctrl_img1],
536
- api_name="qr_txt2img_sd15"
537
- )
538
-
539
- # ---- Tab 2: DreamShaper 8 (Brightness forced ON) ----
540
- with gr.Tab("DreamShaper 8"):
541
- url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
542
- s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic")
543
- s_negative2 = gr.Textbox(label="Negative prompt", value="lowres, low contrast, blurry, jpeg artifacts, watermark, text, bad anatomy")
544
- size2 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)")
545
- steps2 = gr.Slider(10, 50, value=30, step=1, label="Steps")
546
- cfg2 = gr.Slider(1.0, 12.0, value=6.5, step=0.1, label="CFG")
547
- border2 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)")
548
- qr_w2 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight")
549
- seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
550
-
551
- cstart2 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start")
552
- cend2 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end")
553
-
554
- use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
555
- hires_up2 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
556
- hires_str2 = gr.Slider(0.30, 0.60, value=0.45, step=0.05, label="Hi-Res denoise strength")
557
-
558
- repair2 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
559
- feather2 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
560
-
561
- use_bright2 = gr.Checkbox(value=True, visible=False)
562
- bright_w2 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight")
563
- bright_s2 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start")
564
- bright_e2 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end")
565
- bright_mode2 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source")
566
- bright_ref2 = gr.Image(label="(Optional) custom brightness ref", type="pil")
567
-
568
- final_img2 = gr.Image(label="Final (or Hi-Res) image")
569
- low_img2 = gr.Image(label="Low-res (Stage A) preview")
570
- ctrl_img2 = gr.Image(label="Control QR used")
571
-
572
- gr.Button("Generate with DreamShaper 8").click(
573
- qr_txt2img_dream,
574
- [url2, s_prompt2, s_negative2, steps2, cfg2, size2, border2, qr_w2, seed2,
575
- use_hires2, hires_up2, hires_str2, repair2, feather2,
576
- cstart2, cend2,
577
- use_bright2, bright_w2, bright_s2, bright_e2, bright_mode2, bright_ref2],
578
- [final_img2, low_img2, ctrl_img2],
579
- api_name="qr_txt2img_dream"
580
- )
581
-
582
- # ------------------- Image Blend (img2img + QR) -------------------
583
- with gr.Tab("Image Blend (img2img + QR)"):
584
- m_key = gr.Dropdown(choices=list(BASE_MODELS.keys()),
585
- value="stable-diffusion-v1-5",
586
- label="Base model")
587
-
588
- init_up = gr.Image(label="Upload base image", type="pil")
589
-
590
- url_b = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
591
- s_prompt_b = gr.Textbox(label="Style prompt", value="highly detailed, cinematic lighting, rich textures")
592
- s_negative_b = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, watermark, text")
593
-
594
- size_b = gr.Slider(384, 1024, value=768, step=64, label="Canvas (px, target)")
595
- steps_b = gr.Slider(10, 50, value=30, step=1, label="Steps")
596
- cfg_b = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG")
597
-
598
- border_b = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)")
599
- qr_w_b = gr.Slider(0.8, 1.8, value=1.8, step=0.05, label="QR control weight")
600
- seed_b = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
601
-
602
- strength_b = gr.Slider(0.2, 0.9, value=0.70, step=0.05, label="Img2Img denoise strength (blend amount)")
603
-
604
- cstart_b = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start")
605
- cend_b = gr.Slider(0.4, 1.0, value=0.95, step=0.05, label="QR control end")
606
-
607
- repair_b = gr.Slider(0.0, 1.0, value=0.1, step=0.05, label="Post repair strength (optional)")
608
- feather_b = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
609
-
610
- use_bright_b = gr.Checkbox(value=True, label="Add Brightness ControlNet")
611
- bright_w_b = gr.Slider(0.0, 0.5, value=0.25, step=0.01, label="Brightness weight")
612
- bright_s_b = gr.Slider(0.0, 0.8, value=0.40, step=0.05, label="Brightness start")
613
- bright_e_b = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end")
614
- bright_mode_b = gr.Radio(choices=["mix","qr","init","custom"], value="mix", label="Brightness source")
615
- bright_ref_b = gr.Image(label="(Optional) custom brightness ref", type="pil")
616
-
617
- bright_blur_b = gr.Slider(0.0, 6.0, value=2.5, step=0.1, label="Brightness blur (px)")
618
- bright_gamma_b = gr.Slider(0.6, 1.2, value=0.9, step=0.01, label="Brightness gamma")
619
- bright_auto_b = gr.Checkbox(value=True, label="Brightness auto-contrast")
620
-
621
- bright_mix_b = gr.Slider(0.0, 1.0, value=0.65, step=0.01, label="Brightness source mix")
622
-
623
- final_b = gr.Image(label="Final blended image")
624
- init_b = gr.Image(label="(Resized) init image used")
625
- ctrl_b = gr.Image(label="Control QR used")
626
 
627
- gr.Button("Blend Uploaded Image with QR").click(
628
- qr_img2img_blend,
629
- [m_key, init_up, url_b, s_prompt_b, s_negative_b, steps_b, cfg_b, size_b, border_b,
630
- qr_w_b, seed_b, strength_b, repair_b, feather_b, cstart_b, cend_b,
631
- use_bright_b, bright_w_b, bright_s_b, bright_e_b, bright_mode_b, bright_ref_b,
632
- bright_blur_b, bright_gamma_b, bright_auto_b, bright_mix_b],
633
- [final_b, init_b, ctrl_b],
634
- api_name="qr_img2img_blend"
635
- )
636
 
637
- if __name__ == "__main__":
638
- demo.queue(max_size=12).launch(
639
- server_name="0.0.0.0", # 👈 required on Spaces
640
- server_port=7860, # 👈 Spaces expects 7860
641
- show_api=False, # optional: quieter UI
642
- share=False, # Spaces provides the public URL
643
- )
644
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import spaces
3
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ zero = torch.Tensor([0]).cuda()
6
+ print(zero.device) # <-- 'cpu' 🤔
 
 
 
 
 
 
 
7
 
8
+ @spaces.GPU
9
+ def greet(n):
10
+ print(zero.device) # <-- 'cuda:0' 🤗
11
+ return f"Hello {zero + n} Tensor"
 
 
 
12
 
13
+ demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
14
+ demo.launch()