import os, gc, random, re import gradio as gr import torch, spaces from PIL import Image, ImageFilter, ImageOps import numpy as np import qrcode from qrcode.constants import ERROR_CORRECT_H from diffusers import ( StableDiffusionControlNetPipeline, StableDiffusionControlNetImg2ImgPipeline, # for Hi-Res Fix ControlNetModel, DPMSolverMultistepScheduler, ) # Quiet matplotlib cache warning on Spaces os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl") # ---- base models for the two tabs ---- BASE_MODELS = { "stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5", "dream": "Lykon/dreamshaper-8", } # ControlNets CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster" CN_BRIGHT = "latentcat/control_v1p_sd15_brightness" # dtype / device DTYPE = torch.float16 DEV_AUTOCast = "cuda" if torch.cuda.is_available() else "cpu" # mps doesn't support autocast DEV_TORCH = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu") # ---------- helpers ---------- def resize_like(im: Image.Image, width: int, height: int, method=Image.NEAREST) -> Image.Image: if im.size == (width, height): return im return im.resize((int(width), int(height)), method) def snap8(x: int) -> int: x = max(256, min(1024, int(x))) return x - (x % 8) def normalize_color(c): if c is None: return "white" if isinstance(c, (tuple, list)): r, g, b = (int(max(0, min(255, round(float(x))))) for x in c[:3]); return (r, g, b) if isinstance(c, str): s = c.strip() if s.startswith("#"): return s m = re.match(r"rgba?\(\s*([0-9.]+)\s*,\s*([0-9.]+)\s*,\s*([0-9.]+)", s, re.IGNORECASE) if m: r = int(max(0, min(255, round(float(m.group(1)))))) g = int(max(0, min(255, round(float(m.group(2)))))) b = int(max(0, min(255, round(float(m.group(3)))))) return (r, g, b) return s return "white" def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0): """Give ControlNet a sharp, black-on-WHITE QR (no blur).""" qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border)) qr.add_data(url.strip()); qr.make(fit=True) img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB") img = img.resize((int(size), int(size)), Image.NEAREST) if blur_radius and blur_radius > 0: img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius))) return img def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image: """Optional gentle repair (applied only once at the end).""" if strength <= 0: return stylized q = qr_img.convert("L") black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather))) black = np.asarray(black_mask, dtype=np.float32) / 255.0 white = 1.0 - black s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0 s = s * (1.0 - float(strength) * black[..., None]) s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None]) s = np.clip(s, 0.0, 1.0) return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB") # ----- Brightness map preprocessing & mixing ----- def prep_brightness_map(img: Image.Image, size: int, source: str, blur_px: float = 2.5, gamma: float = 0.9, autocontrast: bool = True) -> Image.Image: """ Produces a smooth, contrast-normalized brightness image to stabilize scanability at lower denoise. - For source='qr': keep NEAREST and skip blur (already high-contrast). - For source='init' or 'custom': LANCZOS + light blur + autocontrast + mild gamma. Returns RGB (diffusers expects 3-channel). """ method = Image.NEAREST if source == "qr" else Image.LANCZOS im = img.resize((size, size), method).convert("L") if source != "qr": if autocontrast: im = ImageOps.autocontrast(im, cutoff=2) if blur_px and blur_px > 0: im = im.filter(ImageFilter.GaussianBlur(radius=float(blur_px))) if gamma and gamma != 1.0: arr = np.asarray(im, dtype=np.float32) / 255.0 arr = np.clip(arr ** float(gamma), 0.0, 1.0) im = Image.fromarray((arr * 255.0).astype(np.uint8), "L") return im.convert("RGB") def blend_brightness_maps(qr_img: Image.Image, init_img: Image.Image, size: int, alpha: float, blur_px: float = 2.5, gamma: float = 0.9, autocontrast: bool = True) -> Image.Image: """ Mix QR luminance with init-image luminance. alpha=0 -> pure init brightness (prettier, weaker QR) alpha=1 -> pure QR brightness (strong QR) """ qr_map = prep_brightness_map(qr_img, size, "qr") init_map = prep_brightness_map(init_img, size, "init", blur_px=blur_px, gamma=gamma, autocontrast=autocontrast) qa = np.asarray(qr_map, dtype=np.float32) ia = np.asarray(init_map, dtype=np.float32) a = float(alpha) mix = np.clip((1.0 - a) * ia + a * qa, 0, 255).astype(np.uint8) return Image.fromarray(mix, mode="RGB") # ---------- lazy pipelines / models ---------- _CN_QR = None # QR Monster _CN_BR = None # Brightness _CN_TXT2IMG = {} # per-base-model txt2img pipes _CN_IMG2IMG = {} # per-base-model img2img pipes def _base_scheduler_for(pipe): pipe.scheduler = DPMSolverMultistepScheduler.from_config( pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++" ) pipe.enable_attention_slicing() pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() return pipe def get_qr_cn(): global _CN_QR if _CN_QR is None: _CN_QR = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True) return _CN_QR def get_bright_cn(): global _CN_BR if _CN_BR is None: _CN_BR = ControlNetModel.from_pretrained(CN_BRIGHT, torch_dtype=DTYPE, use_safetensors=True) return _CN_BR # DIFFUSERS 0.30+: return ControlNetModel OR list[ControlNetModel] def get_controlnets(use_brightness: bool): return [get_qr_cn(), get_bright_cn()] if use_brightness else get_qr_cn() def get_txt2img_pipe(model_id: str, use_brightness: bool): key = (model_id, "2cn" if use_brightness else "1cn") if key not in _CN_TXT2IMG: pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=get_controlnets(use_brightness), # single or list torch_dtype=DTYPE, safety_checker=None, use_safetensors=True, low_cpu_mem_usage=True, ) _CN_TXT2IMG[key] = _base_scheduler_for(pipe) return _CN_TXT2IMG[key] def get_img2img_pipe(model_id: str, use_brightness: bool): key = (model_id, "2cn" if use_brightness else "1cn") if key not in _CN_IMG2IMG: pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( model_id, controlnet=get_controlnets(use_brightness), # single or list torch_dtype=DTYPE, safety_checker=None, use_safetensors=True, low_cpu_mem_usage=True, ) _CN_IMG2IMG[key] = _base_scheduler_for(pipe) return _CN_IMG2IMG[key] # -------- core helpers -------- def _pick_brightness_image(mode: str, qr_img: Image.Image, init_img: Image.Image | None, custom_img: Image.Image | None) -> Image.Image: if mode == "init" and init_img is not None: return init_img if mode == "custom" and custom_img is not None: return custom_img return qr_img # default: use QR as brightness guide # -------- Method 1: QR control model in text-to-image (+ optional Hi-Res Fix) -------- def _qr_txt2img_core(model_id: str, url: str, style_prompt: str, negative: str, steps: int, cfg: float, size: int, border: int, qr_weight: float, seed: int, use_hires: bool, hires_upscale: float, hires_strength: float, repair_strength: float, feather: float, control_start: float, control_end: float, use_brightness: bool, bright_weight: float, bright_start: float, bright_end: float, bright_mode: str, bright_custom: Image.Image | None): s = snap8(size) # --- Build base-size control images (s x s) qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0) if use_brightness: raw_bright_s = _pick_brightness_image(bright_mode, qr_img, None, bright_custom) bright_img_s = prep_brightness_map(raw_bright_s, s, bright_mode) # Option A preprocessing control_images_s = [qr_img, bright_img_s] scales_s = [float(qr_weight), float(bright_weight)] starts_s = [float(control_start), float(bright_start)] ends_s = [float(control_end), float(bright_end)] else: control_images_s = qr_img scales_s = float(qr_weight) starts_s = float(control_start) ends_s = float(control_end) # Seed / generator if int(seed) < 0: seed = random.randint(0, 2**31 - 1) gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed)) # --- Stage A: txt2img at s x s pipe = get_txt2img_pipe(model_id, use_brightness) if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() with torch.autocast(device_type=DEV_AUTOCast, dtype=DTYPE): out = pipe( prompt=str(style_prompt), negative_prompt=str(negative or ""), image=control_images_s, # single or list @ s x s controlnet_conditioning_scale=scales_s, control_guidance_start=starts_s, control_guidance_end=ends_s, num_inference_steps=int(steps), guidance_scale=float(cfg), width=s, height=s, generator=gen, ) lowres = out.images[0] # --- Optional Stage B: Hi-Res Fix final = lowres qr_for_repair = qr_img # default (no hires) if use_hires: up = max(1.0, min(2.0, float(hires_upscale))) W = snap8(int(s * up)); H = W # Build HI-RES control images (W x H) to match output qr_img_hi = resize_like(qr_img, W, H, method=Image.NEAREST) if use_brightness: raw_bright_hi = _pick_brightness_image(bright_mode, qr_img_hi, None, bright_custom) bright_img_hi = prep_brightness_map(raw_bright_hi, W, bright_mode) control_images_hi = [qr_img_hi, bright_img_hi] scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s else: control_images_hi = qr_img_hi scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s pipe2 = get_img2img_pipe(model_id, use_brightness) if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() with torch.autocast(device_type=DEV_AUTOCast, dtype=DTYPE): out2 = pipe2( prompt=str(style_prompt), negative_prompt=str(negative or ""), image=lowres, # init @ s x s control_image=control_images_hi, # single or list @ W x H strength=float(hires_strength), controlnet_conditioning_scale=scales_hi, control_guidance_start=starts_hi, control_guidance_end=ends_hi, num_inference_steps=int(steps), guidance_scale=float(cfg), width=W, height=H, generator=gen, ) final = out2.images[0] qr_for_repair = qr_img_hi # ensure repair mask matches final size # single, final gentle repair (optional) final = enforce_qr_contrast(final, qr_for_repair, strength=float(repair_strength), feather=float(feather)) return final, lowres, qr_img # ===================== helpers for img2img ===================== def center_square(im: Image.Image) -> Image.Image: w, h = im.size if w == h: return im if w > h: off = (w - h) // 2 return im.crop((off, 0, off + h, h)) else: off = (h - w) // 2 return im.crop((0, off, w, off + w)) def prep_init_image(init_img: Image.Image, target: int) -> Image.Image: s = snap8(target) im = center_square(init_img.convert("RGB")) return im.resize((s, s), Image.LANCZOS) # ================== img2img + QR Control core ================== def _qr_img2img_core(model_id: str, init_image: Image.Image, url: str, style_prompt: str, negative: str, steps: int, cfg: float, size: int, border: int, qr_weight: float, seed: int, strength: float, repair_strength: float, feather: float, control_start: float, control_end: float, use_brightness: bool, bright_weight: float, bright_start: float, bright_end: float, bright_mode: str, bright_custom: Image.Image | None, bright_blur_px: float = 2.5, bright_gamma: float = 0.9, bright_autocontrast: bool = True, bright_mix_alpha: float = 0.65): s = snap8(size) init = prep_init_image(init_image, s) qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0) if int(seed) < 0: seed = random.randint(0, 2**31 - 1) gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed)) if use_brightness: if bright_mode == "mix": bright_img = blend_brightness_maps(qr_img, init, s, alpha=float(bright_mix_alpha), blur_px=float(bright_blur_px), gamma=float(bright_gamma), autocontrast=bool(bright_autocontrast)) else: raw_bright = _pick_brightness_image(bright_mode, qr_img, init, bright_custom) bright_img = prep_brightness_map(raw_bright, s, bright_mode, blur_px=float(bright_blur_px), gamma=float(bright_gamma), autocontrast=bool(bright_autocontrast)) control_images = [qr_img, bright_img] scales = [float(qr_weight), float(bright_weight)] starts = [float(control_start), float(bright_start)] ends = [float(control_end), float(bright_end)] else: control_images = qr_img scales = float(qr_weight) starts = float(control_start) ends = float(control_end) pipe = get_img2img_pipe(model_id, use_brightness) if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() with torch.autocast(device_type=DEV_AUTOCast, dtype=DTYPE): out = pipe( prompt=str(style_prompt), negative_prompt=str(negative or ""), image=init, control_image=control_images, # single or list strength=float(strength), controlnet_conditioning_scale=scales, control_guidance_start=starts, control_guidance_end=ends, # QR end often 0.85–1.0; Brightness end ~0.8–0.9 num_inference_steps=int(steps), guidance_scale=float(cfg), width=s, height=s, generator=gen, ) final = out.images[0] final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather)) return final, init, qr_img # ============== wrappers for Gradio ============== @spaces.GPU(duration=120) def qr_img2img_blend(model_key: str, init_image: Image.Image, url: str, style_prompt: str, negative: str, steps: int, cfg: float, size: int, border: int, qr_weight: float, seed: int, strength: float, repair_strength: float, feather: float, control_start: float, control_end: float, use_brightness: bool, bright_weight: float, bright_start: float, bright_end: float, bright_mode: str, bright_custom: Image.Image | None, bright_blur_px: float, bright_gamma: float, bright_autocontrast: bool, bright_mix_alpha: float): model_id = BASE_MODELS.get(model_key, BASE_MODELS["stable-diffusion-v1-5"]) return _qr_img2img_core(model_id, init_image, url, style_prompt, negative, steps, cfg, size, border, qr_weight, seed, strength, repair_strength, feather, control_start, control_end, use_brightness, bright_weight, bright_start, bright_end, bright_mode, bright_custom, bright_blur_px, bright_gamma, bright_autocontrast, bright_mix_alpha) @spaces.GPU(duration=120) def qr_txt2img_sd15(*args): return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args) @spaces.GPU(duration=120) def qr_txt2img_dream(*args): return _qr_txt2img_core(BASE_MODELS["dream"], *args) # ---------- UI ---------- with gr.Blocks() as demo: gr.Markdown("# ZeroGPU • QR Control (with optional Brightness ControlNet)") # ---- Tab 1: stable-diffusion-v1-5 (Brightness forced ON) ---- with gr.Tab("stable-diffusion-v1-5"): url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition") s_negative1 = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, nsfw, watermark, text, low contrast, deformed, extra digits") size1 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)") steps1 = gr.Slider(10, 50, value=30, step=1, label="Steps") cfg1 = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG") border1 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") qr_w1 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight") seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)") cstart1 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") cend1 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end") use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)") hires_up1 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)") hires_str1 = gr.Slider(0.30, 0.60, value=0.45, step=0.05, label="Hi-Res denoise strength") repair1 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)") feather1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") # Brightness — forced ON via hidden checkbox use_bright1 = gr.Checkbox(value=True, visible=False) bright_w1 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight") bright_s1 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start") bright_e1 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end") bright_mode1 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source") bright_ref1 = gr.Image(label="(Optional) custom brightness ref", type="pil") final_img1 = gr.Image(label="Final (or Hi-Res) image") low_img1 = gr.Image(label="Low-res (Stage A) preview") ctrl_img1 = gr.Image(label="Control QR used") gr.Button("Generate with SD 1.5").click( qr_txt2img_sd15, [url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1, use_hires1, hires_up1, hires_str1, repair1, feather1, cstart1, cend1, use_bright1, bright_w1, bright_s1, bright_e1, bright_mode1, bright_ref1], [final_img1, low_img1, ctrl_img1], api_name="qr_txt2img_sd15" ) # ---- Tab 2: DreamShaper 8 (Brightness forced ON) ---- with gr.Tab("DreamShaper 8"): url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic") s_negative2 = gr.Textbox(label="Negative prompt", value="lowres, low contrast, blurry, jpeg artifacts, watermark, text, bad anatomy") size2 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)") steps2 = gr.Slider(10, 50, value=30, step=1, label="Steps") cfg2 = gr.Slider(1.0, 12.0, value=6.5, step=0.1, label="CFG") border2 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") qr_w2 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight") seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)") cstart2 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") cend2 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end") use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)") hires_up2 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)") hires_str2 = gr.Slider(0.30, 0.60, value=0.45, step=0.05, label="Hi-Res denoise strength") repair2 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)") feather2 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") use_bright2 = gr.Checkbox(value=True, visible=False) bright_w2 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight") bright_s2 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start") bright_e2 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end") bright_mode2 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source") bright_ref2 = gr.Image(label="(Optional) custom brightness ref", type="pil") final_img2 = gr.Image(label="Final (or Hi-Res) image") low_img2 = gr.Image(label="Low-res (Stage A) preview") ctrl_img2 = gr.Image(label="Control QR used") gr.Button("Generate with DreamShaper 8").click( qr_txt2img_dream, [url2, s_prompt2, s_negative2, steps2, cfg2, size2, border2, qr_w2, seed2, use_hires2, hires_up2, hires_str2, repair2, feather2, cstart2, cend2, use_bright2, bright_w2, bright_s2, bright_e2, bright_mode2, bright_ref2], [final_img2, low_img2, ctrl_img2], api_name="qr_txt2img_dream" ) # ------------------- Image Blend (img2img + QR) ------------------- with gr.Tab("Image Blend (img2img + QR)"): m_key = gr.Dropdown(choices=list(BASE_MODELS.keys()), value="stable-diffusion-v1-5", label="Base model") init_up = gr.Image(label="Upload base image", type="pil") url_b = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") s_prompt_b = gr.Textbox(label="Style prompt", value="highly detailed, cinematic lighting, rich textures") s_negative_b = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, watermark, text") size_b = gr.Slider(384, 1024, value=768, step=64, label="Canvas (px, target)") steps_b = gr.Slider(10, 50, value=30, step=1, label="Steps") cfg_b = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG") border_b = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") qr_w_b = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight") seed_b = gr.Number(value=-1, precision=0, label="Seed (-1 random)") # slightly higher default helps the QR emerge on img2img strength_b = gr.Slider(0.2, 0.9, value=0.60, step=0.05, label="Img2Img denoise strength (blend amount)") cstart_b = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") cend_b = gr.Slider(0.4, 1.0, value=0.85, step=0.05, label="QR control end") repair_b = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)") feather_b = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") # Brightness for img2img defaults ON; MIX keeps aesthetics + scanability use_bright_b = gr.Checkbox(value=True, label="Add Brightness ControlNet") bright_w_b = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight") bright_s_b = gr.Slider(0.0, 0.8, value=0.40, step=0.05, label="Brightness start") bright_e_b = gr.Slider(0.2, 1.0, value=0.85, step=0.05, label="Brightness end") bright_mode_b = gr.Radio(choices=["mix","qr","init","custom"], value="mix", label="Brightness source") bright_ref_b = gr.Image(label="(Optional) custom brightness ref", type="pil") # Preprocessing knobs for Option A / mix bright_blur_b = gr.Slider(0.0, 6.0, value=2.5, step=0.1, label="Brightness blur (px)") bright_gamma_b = gr.Slider(0.6, 1.2, value=0.9, step=0.01, label="Brightness gamma") bright_auto_b = gr.Checkbox(value=True, label="Brightness auto-contrast") # Mix balance (0=init • 1=QR) bright_mix_b = gr.Slider(0.0, 1.0, value=0.65, step=0.01, label="Brightness source mix") final_b = gr.Image(label="Final blended image") init_b = gr.Image(label="(Resized) init image used") ctrl_b = gr.Image(label="Control QR used") gr.Button("Blend Uploaded Image with QR").click( qr_img2img_blend, [m_key, init_up, url_b, s_prompt_b, s_negative_b, steps_b, cfg_b, size_b, border_b, qr_w_b, seed_b, strength_b, repair_b, feather_b, cstart_b, cend_b, use_bright_b, bright_w_b, bright_s_b, bright_e_b, bright_mode_b, bright_ref_b, bright_blur_b, bright_gamma_b, bright_auto_b, bright_mix_b], [final_b, init_b, ctrl_b], api_name="qr_img2img_blend" ) if __name__ == "__main__": demo.queue(max_size=12).launch()