Adnan commited on
Commit
d7e092a
·
verified ·
1 Parent(s): 75eb378

Update api_providers.py

Browse files
Files changed (1) hide show
  1. api_providers.py +221 -118
api_providers.py CHANGED
@@ -1,7 +1,7 @@
1
  """
2
- TimeLapseForge Universal API Provider Layer v2.1
3
- All API client imports are LAZY — packages install only when needed.
4
- App will always start regardless of which API packages are installed.
5
  """
6
 
7
  import os
@@ -15,59 +15,123 @@ from typing import Optional, Dict, List, Any, Tuple
15
  from abc import ABC, abstractmethod
16
 
17
 
18
- def _safe_import(package_name, pip_name=None):
19
- """Try importing a package. Return None if not available."""
20
  try:
21
  import importlib
22
  return importlib.import_module(package_name)
23
  except ImportError:
24
- display = pip_name or package_name
25
  return None
26
 
27
 
28
  def _require_import(package_name, pip_name=None):
29
- """Import a package or raise a friendly error."""
30
  mod = _safe_import(package_name)
31
  if mod is None:
32
  pip = pip_name or package_name
33
  raise ImportError(
34
- f"📦 Package '{pip}' is not installed. "
35
- f"This provider requires it. The Space admin can add '{pip}' to requirements.txt, "
36
- f"or you can use a different provider that doesn't need it."
37
  )
38
  return mod
39
 
40
 
41
- # ═══════════════════════════════════════════
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # BASE PROVIDER CLASS
43
- # ═══════════════════════════════════════════
44
 
45
  class BaseProvider(ABC):
46
- name: str = "base"
47
- display_name: str = "Base Provider"
48
- website: str = ""
49
- supports_img2img: bool = False
50
- supports_negative_prompt: bool = True
51
- default_model: str = ""
52
- available_models: List[str] = []
53
- requires_package: str = "" # pip package name needed
54
-
55
- def __init__(self, api_key: str = ""):
 
56
  self.api_key = api_key.strip()
57
 
 
 
 
 
 
58
  @abstractmethod
59
  def generate_image(
60
- self, prompt: str, negative_prompt: str = "",
61
- width: int = 1024, height: int = 1024,
62
- seed: Optional[int] = None, model: Optional[str] = None, **kwargs,
63
- ) -> Image.Image:
64
  pass
65
 
66
  def img2img(
67
- self, prompt: str, image: Image.Image, strength: float = 0.4,
68
- negative_prompt: str = "", seed: Optional[int] = None,
69
- model: Optional[str] = None, **kwargs,
70
- ) -> Image.Image:
71
  return self.generate_image(
72
  prompt=prompt, negative_prompt=negative_prompt,
73
  width=image.width, height=image.height,
@@ -75,40 +139,41 @@ class BaseProvider(ABC):
75
  )
76
 
77
  @staticmethod
78
- def _image_to_base64(img: Image.Image, fmt: str = "PNG") -> str:
79
  buf = io.BytesIO()
80
  img.save(buf, format=fmt)
81
  return base64.b64encode(buf.getvalue()).decode("utf-8")
82
 
83
  @staticmethod
84
- def _base64_to_image(b64: str) -> Image.Image:
85
  data = base64.b64decode(b64)
86
  return Image.open(io.BytesIO(data)).convert("RGB")
87
 
88
  @staticmethod
89
- def _url_to_image(url: str) -> Image.Image:
90
  resp = requests.get(url, timeout=120)
91
  resp.raise_for_status()
92
  return Image.open(io.BytesIO(resp.content)).convert("RGB")
93
 
94
  @staticmethod
95
- def _bytes_to_image(data: bytes) -> Image.Image:
96
  return Image.open(io.BytesIO(data)).convert("RGB")
97
 
98
 
99
- # ═══════════════════════════════════════════
100
  # 1. OPENAI (DALL-E 3 / gpt-image-1)
101
- # ═══════════════════════════════════════════
102
 
103
  class OpenAIProvider(BaseProvider):
104
  name = "openai"
105
- display_name = "OpenAI (DALL·E 3 / gpt-image-1)"
106
  website = "https://platform.openai.com/api-keys"
107
  supports_img2img = False
108
  supports_negative_prompt = False
109
  default_model = "dall-e-3"
110
  available_models = ["dall-e-3", "dall-e-2", "gpt-image-1"]
111
  requires_package = "openai"
 
112
 
113
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
114
  seed=None, model=None, **kwargs):
@@ -116,6 +181,16 @@ class OpenAIProvider(BaseProvider):
116
  client = openai_mod.OpenAI(api_key=self.api_key)
117
  model = model or self.default_model
118
 
 
 
 
 
 
 
 
 
 
 
119
  size_map = {
120
  (1024, 1024): "1024x1024", (1792, 1024): "1792x1024",
121
  (1024, 1792): "1024x1792", (512, 512): "512x512",
@@ -125,14 +200,14 @@ class OpenAIProvider(BaseProvider):
125
 
126
  if model == "gpt-image-1":
127
  response = client.images.generate(
128
- model="gpt-image-1", prompt=prompt, n=1, size=size,
129
  )
130
  if hasattr(response.data[0], 'b64_json') and response.data[0].b64_json:
131
  return self._base64_to_image(response.data[0].b64_json)
132
  return self._url_to_image(response.data[0].url)
133
  else:
134
  api_kwargs = dict(
135
- model=model, prompt=prompt, n=1, size=size,
136
  response_format="b64_json",
137
  )
138
  if model == "dall-e-3":
@@ -142,9 +217,9 @@ class OpenAIProvider(BaseProvider):
142
  return self._base64_to_image(response.data[0].b64_json)
143
 
144
 
145
- # ═══════════════════════════════════════════
146
- # 2. STABILITY AI (SD3 / SDXL)
147
- # ═══════════════════════════════════════════
148
 
149
  class StabilityProvider(BaseProvider):
150
  name = "stability"
@@ -158,24 +233,26 @@ class StabilityProvider(BaseProvider):
158
  "sd3-large", "sd3-large-turbo", "sd3-medium",
159
  "stable-image-core", "stable-image-ultra",
160
  ]
161
- requires_package = "" # uses requests only
 
162
 
163
  API_BASE = "https://api.stability.ai/v2beta"
164
 
165
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
166
  seed=None, model=None, **kwargs):
167
  model = model or self.default_model
168
- headers = {"Authorization": f"Bearer {self.api_key}", "Accept": "image/*"}
169
- data = {"prompt": prompt, "output_format": "png", "width": width, "height": height}
 
170
  if negative_prompt:
171
- data["negative_prompt"] = negative_prompt
172
  if seed is not None:
173
  data["seed"] = seed
174
 
175
  if "stable-image" in model:
176
- url = f"{self.API_BASE}/stable-image/generate/{model.replace('stable-image-', '')}"
177
  else:
178
- url = f"{self.API_BASE}/stable-image/generate/sd3"
179
  data["model"] = model
180
 
181
  resp = requests.post(url, headers=headers, files={"none": ""}, data=data, timeout=120)
@@ -184,30 +261,31 @@ class StabilityProvider(BaseProvider):
184
 
185
  def img2img(self, prompt, image, strength=0.4, negative_prompt="",
186
  seed=None, model=None, **kwargs):
187
- headers = {"Authorization": f"Bearer {self.api_key}", "Accept": "image/*"}
 
188
  buf = io.BytesIO()
189
  image.save(buf, format="PNG")
190
  buf.seek(0)
191
 
192
  data = {
193
- "prompt": prompt, "strength": strength,
194
  "output_format": "png", "mode": "image-to-image",
195
  }
196
  if negative_prompt:
197
- data["negative_prompt"] = negative_prompt
198
  if seed is not None:
199
  data["seed"] = seed
200
 
201
  files = {"image": ("input.png", buf, "image/png")}
202
- url = f"{self.API_BASE}/stable-image/generate/sd3"
203
  resp = requests.post(url, headers=headers, files=files, data=data, timeout=120)
204
  resp.raise_for_status()
205
  return self._bytes_to_image(resp.content)
206
 
207
 
208
- # ═══════════════════════════════════════════
209
- # 3. REPLICATE (Flux, SDXL, any model)
210
- # ═══════════════════════════════════════════
211
 
212
  class ReplicateProvider(BaseProvider):
213
  name = "replicate"
@@ -225,16 +303,18 @@ class ReplicateProvider(BaseProvider):
225
  "bytedance/sdxl-lightning-4step:latest",
226
  ]
227
  requires_package = "replicate"
 
228
 
229
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
230
  seed=None, model=None, **kwargs):
231
  replicate_mod = _require_import("replicate")
232
  client = replicate_mod.Client(api_token=self.api_key)
233
  model_id = model or self.default_model
 
234
 
235
- input_params = {"prompt": prompt, "width": width, "height": height}
236
  if negative_prompt and "flux" not in model_id.lower():
237
- input_params["negative_prompt"] = negative_prompt
238
  if seed is not None:
239
  input_params["seed"] = seed
240
 
@@ -252,14 +332,15 @@ class ReplicateProvider(BaseProvider):
252
  replicate_mod = _require_import("replicate")
253
  client = replicate_mod.Client(api_token=self.api_key)
254
  model_id = model or "stability-ai/sdxl:latest"
 
255
 
256
  buf = io.BytesIO()
257
  image.save(buf, format="PNG")
258
  buf.seek(0)
259
 
260
- input_params = {"prompt": prompt, "image": buf, "prompt_strength": strength}
261
  if negative_prompt:
262
- input_params["negative_prompt"] = negative_prompt
263
  if seed is not None:
264
  input_params["seed"] = seed
265
 
@@ -268,9 +349,9 @@ class ReplicateProvider(BaseProvider):
268
  return self._url_to_image(url)
269
 
270
 
271
- # ═══════════════════════════════════════════
272
  # 4. TOGETHER AI
273
- # ═══════════════════════════════════════════
274
 
275
  class TogetherProvider(BaseProvider):
276
  name = "together"
@@ -286,17 +367,19 @@ class TogetherProvider(BaseProvider):
286
  "stabilityai/stable-diffusion-xl-base-1.0",
287
  ]
288
  requires_package = "together"
 
289
 
290
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
291
  seed=None, model=None, **kwargs):
292
  together_mod = _require_import("together")
293
  client = together_mod.Together(api_key=self.api_key)
294
  model_id = model or self.default_model
 
295
 
296
- params = dict(model=model_id, prompt=prompt, width=width, height=height,
297
  steps=kwargs.get("steps", 28), n=1, response_format="b64_json")
298
  if negative_prompt:
299
- params["negative_prompt"] = negative_prompt
300
  if seed is not None:
301
  params["seed"] = seed
302
 
@@ -304,9 +387,9 @@ class TogetherProvider(BaseProvider):
304
  return self._base64_to_image(response.data[0].b64_json)
305
 
306
 
307
- # ═══════════════════════════════════════════
308
  # 5. FAL.AI
309
- # ═══════════════════════════════════════════
310
 
311
  class FalProvider(BaseProvider):
312
  name = "fal"
@@ -321,16 +404,18 @@ class FalProvider(BaseProvider):
321
  "fal-ai/stable-diffusion-v35-large", "fal-ai/recraft-v3",
322
  ]
323
  requires_package = "fal_client"
 
324
 
325
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
326
  seed=None, model=None, **kwargs):
327
  fal_client = _require_import("fal_client", "fal-client")
328
  os.environ["FAL_KEY"] = self.api_key
329
  model_id = model or self.default_model
 
330
 
331
- arguments = {"prompt": prompt, "image_size": {"width": width, "height": height}, "num_images": 1}
332
  if negative_prompt:
333
- arguments["negative_prompt"] = negative_prompt
334
  if seed is not None:
335
  arguments["seed"] = seed
336
 
@@ -344,13 +429,14 @@ class FalProvider(BaseProvider):
344
  seed=None, model=None, **kwargs):
345
  fal_client = _require_import("fal_client", "fal-client")
346
  os.environ["FAL_KEY"] = self.api_key
 
347
 
348
  b64 = self._image_to_base64(image)
349
- data_uri = f"data:image/png;base64,{b64}"
350
 
351
- arguments = {"prompt": prompt, "image_url": data_uri, "strength": strength, "num_images": 1}
352
  if negative_prompt:
353
- arguments["negative_prompt"] = negative_prompt
354
  if seed is not None:
355
  arguments["seed"] = seed
356
 
@@ -362,9 +448,9 @@ class FalProvider(BaseProvider):
362
  raise ValueError("No image from Fal.ai img2img")
363
 
364
 
365
- # ═══════════════════════════════════════════
366
- # 6. GOOGLE GEMINI (Imagen 3)
367
- # ═══════════════════════════════════════════
368
 
369
  class GoogleGeminiProvider(BaseProvider):
370
  name = "google"
@@ -375,17 +461,19 @@ class GoogleGeminiProvider(BaseProvider):
375
  default_model = "imagen-3.0-generate-002"
376
  available_models = ["imagen-3.0-generate-002", "imagen-3.0-fast-generate-001"]
377
  requires_package = "google.generativeai"
 
378
 
379
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
380
  seed=None, model=None, **kwargs):
381
  genai = _require_import("google.generativeai", "google-generativeai")
382
  genai.configure(api_key=self.api_key)
383
  model_id = model or self.default_model
 
384
 
385
  imagen = genai.ImageGenerationModel(model_id)
386
- params = dict(prompt=prompt, number_of_images=1)
387
  if negative_prompt:
388
- params["negative_prompt"] = negative_prompt
389
 
390
  response = imagen.generate_images(**params)
391
  if response.images:
@@ -393,9 +481,9 @@ class GoogleGeminiProvider(BaseProvider):
393
  raise ValueError("No image returned from Imagen")
394
 
395
 
396
- # ═══════════════════════════════════════════
397
  # 7. HUGGING FACE INFERENCE API
398
- # ═══════════════════════════════════════════
399
 
400
  class HuggingFaceProvider(BaseProvider):
401
  name = "huggingface"
@@ -410,19 +498,21 @@ class HuggingFaceProvider(BaseProvider):
410
  "stabilityai/stable-diffusion-3.5-large",
411
  "runwayml/stable-diffusion-v1-5",
412
  ]
413
- requires_package = "" # uses requests only
 
414
 
415
  API_BASE = "https://api-inference.huggingface.co/models"
416
 
417
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
418
  seed=None, model=None, **kwargs):
419
  model_id = model or self.default_model
420
- url = f"{self.API_BASE}/{model_id}"
421
- headers = {"Authorization": f"Bearer {self.api_key}"}
 
422
 
423
- payload = {"inputs": prompt, "parameters": {"width": width, "height": height}}
424
  if negative_prompt:
425
- payload["parameters"]["negative_prompt"] = negative_prompt
426
  if seed is not None:
427
  payload["parameters"]["seed"] = seed
428
 
@@ -434,9 +524,9 @@ class HuggingFaceProvider(BaseProvider):
434
  return self._bytes_to_image(resp.content)
435
 
436
 
437
- # ═══════════════════════════════════════════
438
  # 8. xAI GROK
439
- # ═══════════════════════════════════════════
440
 
441
  class XAIProvider(BaseProvider):
442
  name = "xai"
@@ -447,21 +537,23 @@ class XAIProvider(BaseProvider):
447
  default_model = "grok-2-image"
448
  available_models = ["grok-2-image"]
449
  requires_package = "openai"
 
450
 
451
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
452
  seed=None, model=None, **kwargs):
453
  openai_mod = _require_import("openai")
454
  client = openai_mod.OpenAI(api_key=self.api_key, base_url="https://api.x.ai/v1")
 
455
  response = client.images.generate(
456
- model=model or self.default_model, prompt=prompt,
457
  n=1, response_format="b64_json", size="1024x1024",
458
  )
459
  return self._base64_to_image(response.data[0].b64_json)
460
 
461
 
462
- # ═══════════════════════════════════════════
463
  # 9. FIREWORKS AI
464
- # ═══════════════════════════════════════════
465
 
466
  class FireworksProvider(BaseProvider):
467
  name = "fireworks"
@@ -476,18 +568,20 @@ class FireworksProvider(BaseProvider):
476
  "accounts/fireworks/models/flux-1-dev-fp8",
477
  "accounts/fireworks/models/stable-diffusion-xl-1024-v1-0",
478
  ]
479
- requires_package = "" # uses requests only
 
480
 
481
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
482
  seed=None, model=None, **kwargs):
483
  url = "https://api.fireworks.ai/inference/v1/images/generations"
484
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
 
485
  payload = {
486
- "model": model or self.default_model, "prompt": prompt,
487
- "n": 1, "size": f"{width}x{height}", "response_format": "b64_json",
488
  }
489
  if negative_prompt:
490
- payload["negative_prompt"] = negative_prompt
491
  if seed is not None:
492
  payload["seed"] = seed
493
 
@@ -497,9 +591,9 @@ class FireworksProvider(BaseProvider):
497
  return self._base64_to_image(data["data"][0]["b64_json"])
498
 
499
 
500
- # ═══════════════════════════════════════════
501
  # 10. IDEOGRAM
502
- # ═══════════════════════════════════════════
503
 
504
  class IdeogramProvider(BaseProvider):
505
  name = "ideogram"
@@ -509,20 +603,22 @@ class IdeogramProvider(BaseProvider):
509
  supports_negative_prompt = True
510
  default_model = "V_2"
511
  available_models = ["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"]
512
- requires_package = "" # uses requests only
 
513
 
514
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
515
  seed=None, model=None, **kwargs):
516
  url = "https://api.ideogram.ai/generate"
517
  headers = {"Api-Key": self.api_key, "Content-Type": "application/json"}
 
518
  payload = {
519
  "image_request": {
520
- "prompt": prompt, "model": model or self.default_model,
521
  "magic_prompt_option": "AUTO", "aspect_ratio": "ASPECT_1_1",
522
  }
523
  }
524
  if negative_prompt:
525
- payload["image_request"]["negative_prompt"] = negative_prompt
526
  if seed is not None:
527
  payload["image_request"]["seed"] = seed
528
 
@@ -532,9 +628,9 @@ class IdeogramProvider(BaseProvider):
532
  return self._url_to_image(data["data"][0]["url"])
533
 
534
 
535
- # ═══════════════════════════════════════════
536
  # 11. LEONARDO AI
537
- # ═══════════════════════════════════════════
538
 
539
  class LeonardoProvider(BaseProvider):
540
  name = "leonardo"
@@ -548,29 +644,31 @@ class LeonardoProvider(BaseProvider):
548
  "aa77f04e-3eec-4034-9c07-d0f619684628",
549
  "1e60896f-3c26-4296-8ecc-53e2afecc132",
550
  ]
551
- requires_package = "" # uses requests only
 
552
 
553
  API_BASE = "https://cloud.leonardo.ai/api/rest/v1"
554
 
555
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
556
  seed=None, model=None, **kwargs):
557
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
 
558
  payload = {
559
- "prompt": prompt, "modelId": model or self.default_model,
560
  "width": width, "height": height, "num_images": 1,
561
  }
562
  if negative_prompt:
563
- payload["negative_prompt"] = negative_prompt
564
  if seed is not None:
565
  payload["seed"] = seed
566
 
567
- resp = requests.post(f"{self.API_BASE}/generations", headers=headers, json=payload, timeout=60)
568
  resp.raise_for_status()
569
  gen_id = resp.json()["sdGenerationJob"]["generationId"]
570
 
571
  for _ in range(30):
572
  time.sleep(5)
573
- poll = requests.get(f"{self.API_BASE}/generations/{gen_id}", headers=headers, timeout=30)
574
  poll.raise_for_status()
575
  gen = poll.json().get("generations_by_pk", {})
576
  if gen.get("status") == "COMPLETE":
@@ -581,9 +679,9 @@ class LeonardoProvider(BaseProvider):
581
  raise TimeoutError("Leonardo generation timed out")
582
 
583
 
584
- # ═══════════════════════════════════════════
585
  # 12. CUSTOM OPENAI-COMPATIBLE
586
- # ═══════════════════════════════════════════
587
 
588
  class CustomOpenAIProvider(BaseProvider):
589
  name = "custom_openai"
@@ -594,6 +692,7 @@ class CustomOpenAIProvider(BaseProvider):
594
  default_model = "dall-e-3"
595
  available_models = ["dall-e-3", "dall-e-2", "custom"]
596
  requires_package = "openai"
 
597
 
598
  def __init__(self, api_key="", base_url=""):
599
  super().__init__(api_key)
@@ -606,16 +705,17 @@ class CustomOpenAIProvider(BaseProvider):
606
  if self.base_url:
607
  ck["base_url"] = self.base_url
608
  client = openai_mod.OpenAI(**ck)
 
609
  response = client.images.generate(
610
- model=model or self.default_model, prompt=prompt,
611
- n=1, size=f"{width}x{height}", response_format="b64_json",
612
  )
613
  return self._base64_to_image(response.data[0].b64_json)
614
 
615
 
616
- # ═══════════════════════════════════════════
617
  # 13. DIRECT URL API
618
- # ═══════════════════════════════════════════
619
 
620
  class DirectURLProvider(BaseProvider):
621
  name = "direct_url"
@@ -626,6 +726,7 @@ class DirectURLProvider(BaseProvider):
626
  default_model = "custom"
627
  available_models = ["custom"]
628
  requires_package = ""
 
629
 
630
  def __init__(self, api_key="", endpoint_url=""):
631
  super().__init__(api_key)
@@ -636,10 +737,11 @@ class DirectURLProvider(BaseProvider):
636
  if not self.endpoint_url:
637
  raise ValueError("No endpoint URL provided")
638
 
639
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
640
- payload = {"prompt": prompt, "width": width, "height": height}
 
641
  if negative_prompt:
642
- payload["negative_prompt"] = negative_prompt
643
  if seed is not None:
644
  payload["seed"] = seed
645
  if model and model != "custom":
@@ -662,7 +764,7 @@ class DirectURLProvider(BaseProvider):
662
  for subkey in ["b64_json", "url", "image"]:
663
  if subkey in item:
664
  val = item[subkey]
665
- if val.startswith("http"):
666
  return self._url_to_image(val)
667
  return self._base64_to_image(val)
668
  if isinstance(item, str):
@@ -673,9 +775,9 @@ class DirectURLProvider(BaseProvider):
673
  raise ValueError("Could not parse image from API response")
674
 
675
 
676
- # ═══════════════════════════════════════════
677
  # PROVIDER REGISTRY
678
- # ═══════════════════════════════════════════
679
 
680
  PROVIDERS = {
681
  "openai": OpenAIProvider,
@@ -699,7 +801,7 @@ PROVIDER_DISPLAY_NAMES = {cls.display_name: key for key, cls in PROVIDERS.items(
699
  def get_provider(provider_name, api_key, **kwargs):
700
  cls = PROVIDERS.get(provider_name)
701
  if cls is None:
702
- raise ValueError(f"Unknown provider: {provider_name}")
703
  if provider_name == "custom_openai":
704
  return cls(api_key=api_key, base_url=kwargs.get("base_url", ""))
705
  if provider_name == "direct_url":
@@ -719,6 +821,7 @@ def get_provider_info():
719
  "website": cls.website, "supports_img2img": cls.supports_img2img,
720
  "default_model": cls.default_model, "available_models": cls.available_models,
721
  "requires_package": pkg, "package_installed": installed,
 
722
  })
723
  return info
724
 
 
1
  """
2
+ TimeLapseForge - Universal API Provider Layer v2.2
3
+ All API client imports are LAZY.
4
+ Smart prompt truncation per provider.
5
  """
6
 
7
  import os
 
15
  from abc import ABC, abstractmethod
16
 
17
 
18
+ def _safe_import(package_name):
 
19
  try:
20
  import importlib
21
  return importlib.import_module(package_name)
22
  except ImportError:
 
23
  return None
24
 
25
 
26
  def _require_import(package_name, pip_name=None):
 
27
  mod = _safe_import(package_name)
28
  if mod is None:
29
  pip = pip_name or package_name
30
  raise ImportError(
31
+ "Package '" + pip + "' is not installed. "
32
+ "Add it to requirements.txt or use a different provider."
 
33
  )
34
  return mod
35
 
36
 
37
+ # ============================================
38
+ # SMART PROMPT TRUNCATOR
39
+ # ============================================
40
+
41
+ def smart_truncate(text, max_length, preserve_end=True):
42
+ """
43
+ Intelligently truncate a prompt to fit within API limits.
44
+ Preserves the most important parts: subject description and style suffix.
45
+ """
46
+ if not text or len(text) <= max_length:
47
+ return text
48
+
49
+ # Strategy: keep first part (subject) and last part (style keywords)
50
+ if preserve_end:
51
+ # Find the last comma-separated style section
52
+ parts = text.rsplit(", ", 1)
53
+ if len(parts) == 2 and len(parts[1]) < max_length // 3:
54
+ suffix = ", " + parts[1]
55
+ available = max_length - len(suffix) - 5 # 5 for " ... "
56
+ if available > 100:
57
+ return text[:available] + " ... " + suffix
58
+
59
+ # Simple truncation with clean cut at word boundary
60
+ truncated = text[:max_length - 3]
61
+ last_space = truncated.rfind(" ")
62
+ if last_space > max_length // 2:
63
+ truncated = truncated[:last_space]
64
+ return truncated + "..."
65
+
66
+
67
+ def split_prompt_parts(full_prompt):
68
+ """
69
+ Split a long prompt into core subject and style modifiers.
70
+ Returns (core, style) where style is the reusable suffix.
71
+ """
72
+ # Common style keywords that appear at the end
73
+ style_markers = [
74
+ "photorealistic", "cinematic", "4K", "8K", "detailed",
75
+ "shot on", "lens", "lighting", "consistent", "camera",
76
+ "high quality", "professional", "dramatic",
77
+ ]
78
+
79
+ # Try to find where style section starts
80
+ lower = full_prompt.lower()
81
+ best_split = len(full_prompt)
82
+
83
+ for marker in style_markers:
84
+ idx = lower.rfind(marker)
85
+ if idx > len(full_prompt) // 2:
86
+ # Find the comma before this marker
87
+ comma_idx = full_prompt.rfind(", ", 0, idx)
88
+ if comma_idx > len(full_prompt) // 3:
89
+ best_split = min(best_split, comma_idx)
90
+
91
+ if best_split < len(full_prompt):
92
+ core = full_prompt[:best_split].strip().rstrip(",")
93
+ style = full_prompt[best_split:].strip().lstrip(",").strip()
94
+ return core, style
95
+
96
+ return full_prompt, ""
97
+
98
+
99
+ # ============================================
100
  # BASE PROVIDER CLASS
101
+ # ============================================
102
 
103
  class BaseProvider(ABC):
104
+ name = "base"
105
+ display_name = "Base Provider"
106
+ website = ""
107
+ supports_img2img = False
108
+ supports_negative_prompt = True
109
+ default_model = ""
110
+ available_models = []
111
+ requires_package = ""
112
+ max_prompt_length = 10000 # Default generous limit
113
+
114
+ def __init__(self, api_key=""):
115
  self.api_key = api_key.strip()
116
 
117
+ def _truncate(self, prompt, max_len=None):
118
+ """Truncate prompt to fit provider's limit."""
119
+ limit = max_len or self.max_prompt_length
120
+ return smart_truncate(prompt, limit)
121
+
122
  @abstractmethod
123
  def generate_image(
124
+ self, prompt, negative_prompt="",
125
+ width=1024, height=1024,
126
+ seed=None, model=None, **kwargs,
127
+ ):
128
  pass
129
 
130
  def img2img(
131
+ self, prompt, image, strength=0.4,
132
+ negative_prompt="", seed=None,
133
+ model=None, **kwargs,
134
+ ):
135
  return self.generate_image(
136
  prompt=prompt, negative_prompt=negative_prompt,
137
  width=image.width, height=image.height,
 
139
  )
140
 
141
  @staticmethod
142
+ def _image_to_base64(img, fmt="PNG"):
143
  buf = io.BytesIO()
144
  img.save(buf, format=fmt)
145
  return base64.b64encode(buf.getvalue()).decode("utf-8")
146
 
147
  @staticmethod
148
+ def _base64_to_image(b64):
149
  data = base64.b64decode(b64)
150
  return Image.open(io.BytesIO(data)).convert("RGB")
151
 
152
  @staticmethod
153
+ def _url_to_image(url):
154
  resp = requests.get(url, timeout=120)
155
  resp.raise_for_status()
156
  return Image.open(io.BytesIO(resp.content)).convert("RGB")
157
 
158
  @staticmethod
159
+ def _bytes_to_image(data):
160
  return Image.open(io.BytesIO(data)).convert("RGB")
161
 
162
 
163
+ # ============================================
164
  # 1. OPENAI (DALL-E 3 / gpt-image-1)
165
+ # ============================================
166
 
167
  class OpenAIProvider(BaseProvider):
168
  name = "openai"
169
+ display_name = "OpenAI (DALL-E 3 / gpt-image-1)"
170
  website = "https://platform.openai.com/api-keys"
171
  supports_img2img = False
172
  supports_negative_prompt = False
173
  default_model = "dall-e-3"
174
  available_models = ["dall-e-3", "dall-e-2", "gpt-image-1"]
175
  requires_package = "openai"
176
+ max_prompt_length = 3900 # DALL-E 3 limit is 4000
177
 
178
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
179
  seed=None, model=None, **kwargs):
 
181
  client = openai_mod.OpenAI(api_key=self.api_key)
182
  model = model or self.default_model
183
 
184
+ # Set correct limit per model
185
+ if model == "dall-e-2":
186
+ limit = 900 # DALL-E 2 limit is 1000
187
+ elif model == "gpt-image-1":
188
+ limit = 32000 # gpt-image-1 has much higher limit
189
+ else:
190
+ limit = 3900 # DALL-E 3
191
+
192
+ safe_prompt = self._truncate(prompt, limit)
193
+
194
  size_map = {
195
  (1024, 1024): "1024x1024", (1792, 1024): "1792x1024",
196
  (1024, 1792): "1024x1792", (512, 512): "512x512",
 
200
 
201
  if model == "gpt-image-1":
202
  response = client.images.generate(
203
+ model="gpt-image-1", prompt=safe_prompt, n=1, size=size,
204
  )
205
  if hasattr(response.data[0], 'b64_json') and response.data[0].b64_json:
206
  return self._base64_to_image(response.data[0].b64_json)
207
  return self._url_to_image(response.data[0].url)
208
  else:
209
  api_kwargs = dict(
210
+ model=model, prompt=safe_prompt, n=1, size=size,
211
  response_format="b64_json",
212
  )
213
  if model == "dall-e-3":
 
217
  return self._base64_to_image(response.data[0].b64_json)
218
 
219
 
220
+ # ============================================
221
+ # 2. STABILITY AI
222
+ # ============================================
223
 
224
  class StabilityProvider(BaseProvider):
225
  name = "stability"
 
233
  "sd3-large", "sd3-large-turbo", "sd3-medium",
234
  "stable-image-core", "stable-image-ultra",
235
  ]
236
+ requires_package = ""
237
+ max_prompt_length = 10000
238
 
239
  API_BASE = "https://api.stability.ai/v2beta"
240
 
241
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
242
  seed=None, model=None, **kwargs):
243
  model = model or self.default_model
244
+ safe_prompt = self._truncate(prompt)
245
+ headers = {"Authorization": "Bearer " + self.api_key, "Accept": "image/*"}
246
+ data = {"prompt": safe_prompt, "output_format": "png", "width": width, "height": height}
247
  if negative_prompt:
248
+ data["negative_prompt"] = smart_truncate(negative_prompt, 5000)
249
  if seed is not None:
250
  data["seed"] = seed
251
 
252
  if "stable-image" in model:
253
+ url = self.API_BASE + "/stable-image/generate/" + model.replace("stable-image-", "")
254
  else:
255
+ url = self.API_BASE + "/stable-image/generate/sd3"
256
  data["model"] = model
257
 
258
  resp = requests.post(url, headers=headers, files={"none": ""}, data=data, timeout=120)
 
261
 
262
  def img2img(self, prompt, image, strength=0.4, negative_prompt="",
263
  seed=None, model=None, **kwargs):
264
+ safe_prompt = self._truncate(prompt)
265
+ headers = {"Authorization": "Bearer " + self.api_key, "Accept": "image/*"}
266
  buf = io.BytesIO()
267
  image.save(buf, format="PNG")
268
  buf.seek(0)
269
 
270
  data = {
271
+ "prompt": safe_prompt, "strength": strength,
272
  "output_format": "png", "mode": "image-to-image",
273
  }
274
  if negative_prompt:
275
+ data["negative_prompt"] = smart_truncate(negative_prompt, 5000)
276
  if seed is not None:
277
  data["seed"] = seed
278
 
279
  files = {"image": ("input.png", buf, "image/png")}
280
+ url = self.API_BASE + "/stable-image/generate/sd3"
281
  resp = requests.post(url, headers=headers, files=files, data=data, timeout=120)
282
  resp.raise_for_status()
283
  return self._bytes_to_image(resp.content)
284
 
285
 
286
+ # ============================================
287
+ # 3. REPLICATE
288
+ # ============================================
289
 
290
  class ReplicateProvider(BaseProvider):
291
  name = "replicate"
 
303
  "bytedance/sdxl-lightning-4step:latest",
304
  ]
305
  requires_package = "replicate"
306
+ max_prompt_length = 10000
307
 
308
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
309
  seed=None, model=None, **kwargs):
310
  replicate_mod = _require_import("replicate")
311
  client = replicate_mod.Client(api_token=self.api_key)
312
  model_id = model or self.default_model
313
+ safe_prompt = self._truncate(prompt)
314
 
315
+ input_params = {"prompt": safe_prompt, "width": width, "height": height}
316
  if negative_prompt and "flux" not in model_id.lower():
317
+ input_params["negative_prompt"] = smart_truncate(negative_prompt, 5000)
318
  if seed is not None:
319
  input_params["seed"] = seed
320
 
 
332
  replicate_mod = _require_import("replicate")
333
  client = replicate_mod.Client(api_token=self.api_key)
334
  model_id = model or "stability-ai/sdxl:latest"
335
+ safe_prompt = self._truncate(prompt)
336
 
337
  buf = io.BytesIO()
338
  image.save(buf, format="PNG")
339
  buf.seek(0)
340
 
341
+ input_params = {"prompt": safe_prompt, "image": buf, "prompt_strength": strength}
342
  if negative_prompt:
343
+ input_params["negative_prompt"] = smart_truncate(negative_prompt, 5000)
344
  if seed is not None:
345
  input_params["seed"] = seed
346
 
 
349
  return self._url_to_image(url)
350
 
351
 
352
+ # ============================================
353
  # 4. TOGETHER AI
354
+ # ============================================
355
 
356
  class TogetherProvider(BaseProvider):
357
  name = "together"
 
367
  "stabilityai/stable-diffusion-xl-base-1.0",
368
  ]
369
  requires_package = "together"
370
+ max_prompt_length = 10000
371
 
372
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
373
  seed=None, model=None, **kwargs):
374
  together_mod = _require_import("together")
375
  client = together_mod.Together(api_key=self.api_key)
376
  model_id = model or self.default_model
377
+ safe_prompt = self._truncate(prompt)
378
 
379
+ params = dict(model=model_id, prompt=safe_prompt, width=width, height=height,
380
  steps=kwargs.get("steps", 28), n=1, response_format="b64_json")
381
  if negative_prompt:
382
+ params["negative_prompt"] = smart_truncate(negative_prompt, 5000)
383
  if seed is not None:
384
  params["seed"] = seed
385
 
 
387
  return self._base64_to_image(response.data[0].b64_json)
388
 
389
 
390
+ # ============================================
391
  # 5. FAL.AI
392
+ # ============================================
393
 
394
  class FalProvider(BaseProvider):
395
  name = "fal"
 
404
  "fal-ai/stable-diffusion-v35-large", "fal-ai/recraft-v3",
405
  ]
406
  requires_package = "fal_client"
407
+ max_prompt_length = 10000
408
 
409
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
410
  seed=None, model=None, **kwargs):
411
  fal_client = _require_import("fal_client", "fal-client")
412
  os.environ["FAL_KEY"] = self.api_key
413
  model_id = model or self.default_model
414
+ safe_prompt = self._truncate(prompt)
415
 
416
+ arguments = {"prompt": safe_prompt, "image_size": {"width": width, "height": height}, "num_images": 1}
417
  if negative_prompt:
418
+ arguments["negative_prompt"] = smart_truncate(negative_prompt, 5000)
419
  if seed is not None:
420
  arguments["seed"] = seed
421
 
 
429
  seed=None, model=None, **kwargs):
430
  fal_client = _require_import("fal_client", "fal-client")
431
  os.environ["FAL_KEY"] = self.api_key
432
+ safe_prompt = self._truncate(prompt)
433
 
434
  b64 = self._image_to_base64(image)
435
+ data_uri = "data:image/png;base64," + b64
436
 
437
+ arguments = {"prompt": safe_prompt, "image_url": data_uri, "strength": strength, "num_images": 1}
438
  if negative_prompt:
439
+ arguments["negative_prompt"] = smart_truncate(negative_prompt, 5000)
440
  if seed is not None:
441
  arguments["seed"] = seed
442
 
 
448
  raise ValueError("No image from Fal.ai img2img")
449
 
450
 
451
+ # ============================================
452
+ # 6. GOOGLE GEMINI
453
+ # ============================================
454
 
455
  class GoogleGeminiProvider(BaseProvider):
456
  name = "google"
 
461
  default_model = "imagen-3.0-generate-002"
462
  available_models = ["imagen-3.0-generate-002", "imagen-3.0-fast-generate-001"]
463
  requires_package = "google.generativeai"
464
+ max_prompt_length = 5000
465
 
466
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
467
  seed=None, model=None, **kwargs):
468
  genai = _require_import("google.generativeai", "google-generativeai")
469
  genai.configure(api_key=self.api_key)
470
  model_id = model or self.default_model
471
+ safe_prompt = self._truncate(prompt)
472
 
473
  imagen = genai.ImageGenerationModel(model_id)
474
+ params = dict(prompt=safe_prompt, number_of_images=1)
475
  if negative_prompt:
476
+ params["negative_prompt"] = smart_truncate(negative_prompt, 2000)
477
 
478
  response = imagen.generate_images(**params)
479
  if response.images:
 
481
  raise ValueError("No image returned from Imagen")
482
 
483
 
484
+ # ============================================
485
  # 7. HUGGING FACE INFERENCE API
486
+ # ============================================
487
 
488
  class HuggingFaceProvider(BaseProvider):
489
  name = "huggingface"
 
498
  "stabilityai/stable-diffusion-3.5-large",
499
  "runwayml/stable-diffusion-v1-5",
500
  ]
501
+ requires_package = ""
502
+ max_prompt_length = 10000
503
 
504
  API_BASE = "https://api-inference.huggingface.co/models"
505
 
506
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
507
  seed=None, model=None, **kwargs):
508
  model_id = model or self.default_model
509
+ url = self.API_BASE + "/" + model_id
510
+ headers = {"Authorization": "Bearer " + self.api_key}
511
+ safe_prompt = self._truncate(prompt)
512
 
513
+ payload = {"inputs": safe_prompt, "parameters": {"width": width, "height": height}}
514
  if negative_prompt:
515
+ payload["parameters"]["negative_prompt"] = smart_truncate(negative_prompt, 5000)
516
  if seed is not None:
517
  payload["parameters"]["seed"] = seed
518
 
 
524
  return self._bytes_to_image(resp.content)
525
 
526
 
527
+ # ============================================
528
  # 8. xAI GROK
529
+ # ============================================
530
 
531
  class XAIProvider(BaseProvider):
532
  name = "xai"
 
537
  default_model = "grok-2-image"
538
  available_models = ["grok-2-image"]
539
  requires_package = "openai"
540
+ max_prompt_length = 4000
541
 
542
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
543
  seed=None, model=None, **kwargs):
544
  openai_mod = _require_import("openai")
545
  client = openai_mod.OpenAI(api_key=self.api_key, base_url="https://api.x.ai/v1")
546
+ safe_prompt = self._truncate(prompt)
547
  response = client.images.generate(
548
+ model=model or self.default_model, prompt=safe_prompt,
549
  n=1, response_format="b64_json", size="1024x1024",
550
  )
551
  return self._base64_to_image(response.data[0].b64_json)
552
 
553
 
554
+ # ============================================
555
  # 9. FIREWORKS AI
556
+ # ============================================
557
 
558
  class FireworksProvider(BaseProvider):
559
  name = "fireworks"
 
568
  "accounts/fireworks/models/flux-1-dev-fp8",
569
  "accounts/fireworks/models/stable-diffusion-xl-1024-v1-0",
570
  ]
571
+ requires_package = ""
572
+ max_prompt_length = 10000
573
 
574
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
575
  seed=None, model=None, **kwargs):
576
  url = "https://api.fireworks.ai/inference/v1/images/generations"
577
+ headers = {"Authorization": "Bearer " + self.api_key, "Content-Type": "application/json"}
578
+ safe_prompt = self._truncate(prompt)
579
  payload = {
580
+ "model": model or self.default_model, "prompt": safe_prompt,
581
+ "n": 1, "size": str(width) + "x" + str(height), "response_format": "b64_json",
582
  }
583
  if negative_prompt:
584
+ payload["negative_prompt"] = smart_truncate(negative_prompt, 5000)
585
  if seed is not None:
586
  payload["seed"] = seed
587
 
 
591
  return self._base64_to_image(data["data"][0]["b64_json"])
592
 
593
 
594
+ # ============================================
595
  # 10. IDEOGRAM
596
+ # ============================================
597
 
598
  class IdeogramProvider(BaseProvider):
599
  name = "ideogram"
 
603
  supports_negative_prompt = True
604
  default_model = "V_2"
605
  available_models = ["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"]
606
+ requires_package = ""
607
+ max_prompt_length = 10000
608
 
609
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
610
  seed=None, model=None, **kwargs):
611
  url = "https://api.ideogram.ai/generate"
612
  headers = {"Api-Key": self.api_key, "Content-Type": "application/json"}
613
+ safe_prompt = self._truncate(prompt)
614
  payload = {
615
  "image_request": {
616
+ "prompt": safe_prompt, "model": model or self.default_model,
617
  "magic_prompt_option": "AUTO", "aspect_ratio": "ASPECT_1_1",
618
  }
619
  }
620
  if negative_prompt:
621
+ payload["image_request"]["negative_prompt"] = smart_truncate(negative_prompt, 5000)
622
  if seed is not None:
623
  payload["image_request"]["seed"] = seed
624
 
 
628
  return self._url_to_image(data["data"][0]["url"])
629
 
630
 
631
+ # ============================================
632
  # 11. LEONARDO AI
633
+ # ============================================
634
 
635
  class LeonardoProvider(BaseProvider):
636
  name = "leonardo"
 
644
  "aa77f04e-3eec-4034-9c07-d0f619684628",
645
  "1e60896f-3c26-4296-8ecc-53e2afecc132",
646
  ]
647
+ requires_package = ""
648
+ max_prompt_length = 10000
649
 
650
  API_BASE = "https://cloud.leonardo.ai/api/rest/v1"
651
 
652
  def generate_image(self, prompt, negative_prompt="", width=1024, height=1024,
653
  seed=None, model=None, **kwargs):
654
+ headers = {"Authorization": "Bearer " + self.api_key, "Content-Type": "application/json"}
655
+ safe_prompt = self._truncate(prompt)
656
  payload = {
657
+ "prompt": safe_prompt, "modelId": model or self.default_model,
658
  "width": width, "height": height, "num_images": 1,
659
  }
660
  if negative_prompt:
661
+ payload["negative_prompt"] = smart_truncate(negative_prompt, 5000)
662
  if seed is not None:
663
  payload["seed"] = seed
664
 
665
+ resp = requests.post(self.API_BASE + "/generations", headers=headers, json=payload, timeout=60)
666
  resp.raise_for_status()
667
  gen_id = resp.json()["sdGenerationJob"]["generationId"]
668
 
669
  for _ in range(30):
670
  time.sleep(5)
671
+ poll = requests.get(self.API_BASE + "/generations/" + gen_id, headers=headers, timeout=30)
672
  poll.raise_for_status()
673
  gen = poll.json().get("generations_by_pk", {})
674
  if gen.get("status") == "COMPLETE":
 
679
  raise TimeoutError("Leonardo generation timed out")
680
 
681
 
682
+ # ============================================
683
  # 12. CUSTOM OPENAI-COMPATIBLE
684
+ # ============================================
685
 
686
  class CustomOpenAIProvider(BaseProvider):
687
  name = "custom_openai"
 
692
  default_model = "dall-e-3"
693
  available_models = ["dall-e-3", "dall-e-2", "custom"]
694
  requires_package = "openai"
695
+ max_prompt_length = 3900
696
 
697
  def __init__(self, api_key="", base_url=""):
698
  super().__init__(api_key)
 
705
  if self.base_url:
706
  ck["base_url"] = self.base_url
707
  client = openai_mod.OpenAI(**ck)
708
+ safe_prompt = self._truncate(prompt)
709
  response = client.images.generate(
710
+ model=model or self.default_model, prompt=safe_prompt,
711
+ n=1, size=str(width) + "x" + str(height), response_format="b64_json",
712
  )
713
  return self._base64_to_image(response.data[0].b64_json)
714
 
715
 
716
+ # ============================================
717
  # 13. DIRECT URL API
718
+ # ============================================
719
 
720
  class DirectURLProvider(BaseProvider):
721
  name = "direct_url"
 
726
  default_model = "custom"
727
  available_models = ["custom"]
728
  requires_package = ""
729
+ max_prompt_length = 50000
730
 
731
  def __init__(self, api_key="", endpoint_url=""):
732
  super().__init__(api_key)
 
737
  if not self.endpoint_url:
738
  raise ValueError("No endpoint URL provided")
739
 
740
+ headers = {"Authorization": "Bearer " + self.api_key, "Content-Type": "application/json"}
741
+ safe_prompt = self._truncate(prompt)
742
+ payload = {"prompt": safe_prompt, "width": width, "height": height}
743
  if negative_prompt:
744
+ payload["negative_prompt"] = smart_truncate(negative_prompt, 10000)
745
  if seed is not None:
746
  payload["seed"] = seed
747
  if model and model != "custom":
 
764
  for subkey in ["b64_json", "url", "image"]:
765
  if subkey in item:
766
  val = item[subkey]
767
+ if isinstance(val, str) and val.startswith("http"):
768
  return self._url_to_image(val)
769
  return self._base64_to_image(val)
770
  if isinstance(item, str):
 
775
  raise ValueError("Could not parse image from API response")
776
 
777
 
778
+ # ============================================
779
  # PROVIDER REGISTRY
780
+ # ============================================
781
 
782
  PROVIDERS = {
783
  "openai": OpenAIProvider,
 
801
  def get_provider(provider_name, api_key, **kwargs):
802
  cls = PROVIDERS.get(provider_name)
803
  if cls is None:
804
+ raise ValueError("Unknown provider: " + str(provider_name))
805
  if provider_name == "custom_openai":
806
  return cls(api_key=api_key, base_url=kwargs.get("base_url", ""))
807
  if provider_name == "direct_url":
 
821
  "website": cls.website, "supports_img2img": cls.supports_img2img,
822
  "default_model": cls.default_model, "available_models": cls.available_models,
823
  "requires_package": pkg, "package_installed": installed,
824
+ "max_prompt_length": cls.max_prompt_length,
825
  })
826
  return info
827