prithivMLmods commited on
Commit
c4c937b
·
verified ·
1 Parent(s): 6076cb5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -12
app.py CHANGED
@@ -10,6 +10,7 @@ from typing import Iterable
10
  from diffusers import FluxKontextPipeline
11
  from diffusers.utils import load_image
12
  from huggingface_hub import hf_hub_download
 
13
  from gradio_imageslider import ImageSlider
14
 
15
  from gradio.themes import Soft
@@ -35,7 +36,7 @@ class OrangeRedTheme(Soft):
35
  self,
36
  *,
37
  primary_hue: colors.Color | str = colors.gray,
38
- secondary_hue: colors.Color | str = colors.orange_red,
39
  neutral_hue: colors.Color | str = colors.slate,
40
  text_size: sizes.Size | str = sizes.text_lg,
41
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
@@ -83,6 +84,7 @@ class OrangeRedTheme(Soft):
83
 
84
  orange_red_theme = OrangeRedTheme()
85
 
 
86
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
87
 
88
  # --- # Device and CUDA Setup Check ---
@@ -100,17 +102,18 @@ print("Using device:", device)
100
  MAX_SEED = np.iinfo(np.int32).max
101
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
102
 
103
- # Load Adapters
104
  pipe.load_lora_weights("prithivMLmods/Kontext-Top-Down-View", weight_name="Kontext-Top-Down-View.safetensors", adapter_name="top-down")
105
  pipe.load_lora_weights("prithivMLmods/Kontext-Bottom-Up-View", weight_name="Kontext-Bottom-Up-View.safetensors", adapter_name="bottom-up")
106
  pipe.load_lora_weights("prithivMLmods/Kontext-CAM-Left-View", weight_name="Kontext-CAM-Left-View.safetensors", adapter_name="left-view")
107
  pipe.load_lora_weights("prithivMLmods/Kontext-CAM-Right-View", weight_name="Kontext-CAM-Right-View.safetensors", adapter_name="right-view")
108
  pipe.load_lora_weights("starsfriday/Kontext-Remover-General-LoRA", weight_name="kontext_remove.safetensors", adapter_name="kontext-remove")
109
 
 
 
110
  @spaces.GPU
111
- def infer(input_image, prompt, lora_adapter, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
112
  """
113
- Perform image editing using FLUX Kontext adapters.
114
  """
115
  if not input_image:
116
  raise gr.Error("Please upload an image for editing.")
@@ -141,6 +144,10 @@ def infer(input_image, prompt, lora_adapter, seed=42, randomize_seed=False, guid
141
  generator=torch.Generator().manual_seed(seed),
142
  ).images[0]
143
 
 
 
 
 
144
  return (original_image, image), seed, gr.Button(visible=True)
145
 
146
  @spaces.GPU
@@ -148,7 +155,7 @@ def infer_example(input_image, prompt, lora_adapter):
148
  """
149
  Wrapper function for gr.Examples to call the main infer logic for the slider.
150
  """
151
- (original_image, generated_image), seed, _ = infer(input_image, prompt, lora_adapter)
152
  return (original_image, generated_image), seed
153
 
154
  css="""
@@ -159,7 +166,7 @@ css="""
159
  #main-title h1 {font-size: 2.1em !important;}
160
  """
161
 
162
- with gr.Blocks() as demo:
163
 
164
  with gr.Column(elem_id="col-container"):
165
  gr.Markdown("# **Kontext-Photo-Mate-v2**", elem_id="main-title")
@@ -206,8 +213,7 @@ with gr.Blocks() as demo:
206
  )
207
 
208
  with gr.Column():
209
- # Use ImageSlider to display (Before, After) tuple correctly
210
- output_slider = gr.Image(label="Output Image", interactive=False, format="png")
211
  reuse_button = gr.Button("Reuse this image", visible=False)
212
 
213
  with gr.Row():
@@ -216,6 +222,9 @@ with gr.Blocks() as demo:
216
  choices=["Kontext-Top-Down-View", "Kontext-Remover", "Kontext-Bottom-Up-View", "Kontext-CAM-Left-View", "Kontext-CAM-Right-View"],
217
  value="Kontext-Top-Down-View"
218
  )
 
 
 
219
 
220
  gr.Examples(
221
  examples=[
@@ -228,22 +237,21 @@ with gr.Blocks() as demo:
228
  inputs=[input_image, prompt, lora_adapter],
229
  outputs=[output_slider, seed],
230
  fn=infer_example,
231
- cache_examples=False,
232
  label="Examples"
233
  )
234
 
235
  gr.on(
236
  triggers=[run_button.click, prompt.submit],
237
  fn=infer,
238
- inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
239
  outputs=[output_slider, seed, reuse_button]
240
  )
241
 
242
- # Logic to reuse the generated image (2nd image in the tuple)
243
  reuse_button.click(
244
  fn=lambda images: images[1] if isinstance(images, (list, tuple)) and len(images) > 1 else images,
245
  inputs=[output_slider],
246
  outputs=[input_image]
247
  )
248
 
249
- demo.launch(theme=orange_red_theme, css=css, mcp_server=True, ssr_mode=False, show_error=True)
 
10
  from diffusers import FluxKontextPipeline
11
  from diffusers.utils import load_image
12
  from huggingface_hub import hf_hub_download
13
+ from aura_sr import AuraSR
14
  from gradio_imageslider import ImageSlider
15
 
16
  from gradio.themes import Soft
 
36
  self,
37
  *,
38
  primary_hue: colors.Color | str = colors.gray,
39
+ secondary_hue: colors.Color | str = colors.orange_red, # Use the new color
40
  neutral_hue: colors.Color | str = colors.slate,
41
  text_size: sizes.Size | str = sizes.text_lg,
42
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
 
84
 
85
  orange_red_theme = OrangeRedTheme()
86
 
87
+
88
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
89
 
90
  # --- # Device and CUDA Setup Check ---
 
102
  MAX_SEED = np.iinfo(np.int32).max
103
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
104
 
 
105
  pipe.load_lora_weights("prithivMLmods/Kontext-Top-Down-View", weight_name="Kontext-Top-Down-View.safetensors", adapter_name="top-down")
106
  pipe.load_lora_weights("prithivMLmods/Kontext-Bottom-Up-View", weight_name="Kontext-Bottom-Up-View.safetensors", adapter_name="bottom-up")
107
  pipe.load_lora_weights("prithivMLmods/Kontext-CAM-Left-View", weight_name="Kontext-CAM-Left-View.safetensors", adapter_name="left-view")
108
  pipe.load_lora_weights("prithivMLmods/Kontext-CAM-Right-View", weight_name="Kontext-CAM-Right-View.safetensors", adapter_name="right-view")
109
  pipe.load_lora_weights("starsfriday/Kontext-Remover-General-LoRA", weight_name="kontext_remove.safetensors", adapter_name="kontext-remove")
110
 
111
+ aura_sr = AuraSR.from_pretrained("fal/AuraSR-v2")
112
+
113
  @spaces.GPU
114
+ def infer(input_image, prompt, lora_adapter, upscale_image, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
115
  """
116
+ Perform image editing and optional upscaling, returning a pair for the ImageSlider.
117
  """
118
  if not input_image:
119
  raise gr.Error("Please upload an image for editing.")
 
144
  generator=torch.Generator().manual_seed(seed),
145
  ).images[0]
146
 
147
+ if upscale_image:
148
+ progress(0.8, desc="Upscaling image...")
149
+ image = aura_sr.upscale_4x(image)
150
+
151
  return (original_image, image), seed, gr.Button(visible=True)
152
 
153
  @spaces.GPU
 
155
  """
156
  Wrapper function for gr.Examples to call the main infer logic for the slider.
157
  """
158
+ (original_image, generated_image), seed, _ = infer(input_image, prompt, lora_adapter, upscale_image=False)
159
  return (original_image, generated_image), seed
160
 
161
  css="""
 
166
  #main-title h1 {font-size: 2.1em !important;}
167
  """
168
 
169
+ with gr.Blocks(css=css, theme=orange_red_theme) as demo:
170
 
171
  with gr.Column(elem_id="col-container"):
172
  gr.Markdown("# **Kontext-Photo-Mate-v2**", elem_id="main-title")
 
213
  )
214
 
215
  with gr.Column():
216
+ output_slider = ImageSlider(label="Before / After", show_label=False, interactive=False)
 
217
  reuse_button = gr.Button("Reuse this image", visible=False)
218
 
219
  with gr.Row():
 
222
  choices=["Kontext-Top-Down-View", "Kontext-Remover", "Kontext-Bottom-Up-View", "Kontext-CAM-Left-View", "Kontext-CAM-Right-View"],
223
  value="Kontext-Top-Down-View"
224
  )
225
+
226
+ with gr.Row():
227
+ upscale_checkbox = gr.Checkbox(label="Upscale the final image", value=False)
228
 
229
  gr.Examples(
230
  examples=[
 
237
  inputs=[input_image, prompt, lora_adapter],
238
  outputs=[output_slider, seed],
239
  fn=infer_example,
240
+ cache_examples="lazy",
241
  label="Examples"
242
  )
243
 
244
  gr.on(
245
  triggers=[run_button.click, prompt.submit],
246
  fn=infer,
247
+ inputs=[input_image, prompt, lora_adapter, upscale_checkbox, seed, randomize_seed, guidance_scale, steps],
248
  outputs=[output_slider, seed, reuse_button]
249
  )
250
 
 
251
  reuse_button.click(
252
  fn=lambda images: images[1] if isinstance(images, (list, tuple)) and len(images) > 1 else images,
253
  inputs=[output_slider],
254
  outputs=[input_image]
255
  )
256
 
257
+ demo.launch(mcp_server=True, ssr_mode=False, show_error=True)