mahmoudsaber0 commited on
Commit
a2b4143
·
verified ·
1 Parent(s): b85a20c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -465
app.py CHANGED
@@ -1,469 +1,124 @@
1
- """
2
- Advanced AI Humanizer - Full (heavy) and Light (CPU-friendly) versions
3
-
4
- This single-file implementation provides two modes:
5
- - mode='heavy' : uses transformers, sentence-transformers, spaCy when available.
6
- - mode='light' : CPU-friendly fallback using WordNet, simple heuristics, and minimal external deps.
7
-
8
- Usage: instantiate AdvancedAIHumanizerEnhanced(mode='heavy'|'light') and call .humanize_text(text, intensity)
9
-
10
- Notes:
11
- - All heavy-model loads are lazy and protected by try/except.
12
- - The script intentionally avoids forcing model downloads at import-time.
13
- - If you run locally and want heavy behavior, install: transformers, sentence-transformers, spacy, torch, lemminflect
14
-
15
- """
16
 
17
  import os
18
- import re
19
  import random
20
- import math
21
- import string
22
- from typing import List, Tuple, Optional
23
- from collections import defaultdict, Counter
24
-
25
- # NLP basics
26
- try:
27
- import nltk
28
- from nltk.tokenize import sent_tokenize, word_tokenize
29
- from nltk.corpus import stopwords, wordnet
30
- nltk_available = True
31
- except Exception:
32
- nltk_available = False
33
-
34
- # Optional heavy libs
35
- try:
36
- import spacy
37
- spacy_available = True
38
- except Exception:
39
- spacy_available = False
40
-
41
- try:
42
- import torch
43
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForMaskedLM
44
- transformers_available = True
45
- except Exception:
46
- transformers_available = False
47
-
48
- try:
49
- from sentence_transformers import SentenceTransformer
50
- st_available = True
51
- except Exception:
52
- st_available = False
53
-
54
- # Optional morphological inflection
55
- try:
56
- import lemminflect
57
- lemminflect_available = True
58
- except Exception:
59
- lemminflect_available = False
60
-
61
- # simple readability (textstat) fallback
62
- try:
63
- from textstat import flesch_reading_ease, flesch_kincaid_grade
64
- except Exception:
65
- def flesch_reading_ease(text):
66
- return 60.0
67
- def flesch_kincaid_grade(text):
68
- return 8.0
69
-
70
- # Ensure NLTK data path and downloads if available
71
- if nltk_available:
72
- try:
73
- nltk.data.path.append('/tmp/nltk_data')
74
- os.makedirs('/tmp/nltk_data', exist_ok=True)
75
- for pkg in ("punkt", "averaged_perceptron_tagger", "stopwords", "wordnet", "omw-1.4"):
76
- try:
77
- nltk.download(pkg, download_dir='/tmp/nltk_data', quiet=True)
78
- except Exception:
79
- pass
80
- except Exception:
81
- pass
82
-
83
- # Helper: safe lower/tokenize
84
- def safe_word_tokenize(text: str) -> List[str]:
85
- if nltk_available:
86
- return word_tokenize(text)
87
- return re.findall(r"\w+", text)
88
-
89
- def safe_sent_tokenize(text: str) -> List[str]:
90
- if nltk_available:
91
- return sent_tokenize(text)
92
- # naive split
93
- return [s.strip() for s in re.split(r'(?<=[.!?])\s+', text) if s.strip()]
94
-
95
- class AdvancedAIHumanizerEnhanced:
96
- def __init__(self, mode: str = 'heavy'):
97
- """mode: 'heavy' uses transformers + sentence-transformers + spacy when available; 'light' uses CPU-friendly heuristics."""
98
- self.mode = mode
99
- # basic resources
100
- self.stop_words = set(stopwords.words('english')) if nltk_available else set()
101
- self._init_word_groups()
102
-
103
- # lazy model placeholders
104
- self._masked_pipe = None
105
- self._paraphrase_gen = None
106
- self._sentence_model = None
107
- self._nlp = None
108
-
109
- if mode == 'heavy':
110
- self._lazy_load_heavy()
111
- else:
112
- # minimal initialization for light mode
113
- if spacy_available:
114
- try:
115
- self._nlp = spacy.load('en_core_web_sm')
116
- except Exception:
117
- self._nlp = None
118
-
119
- def _init_word_groups(self):
120
- self.word_groups = {
121
- 'analyze': ['examine', 'study', 'investigate', 'explore', 'review', 'assess'],
122
- 'important': ['crucial', 'vital', 'significant', 'essential', 'key', 'critical'],
123
- 'improve': ['enhance', 'better', 'upgrade', 'refine', 'advance', 'boost'],
124
- }
125
- # reverse map
126
- self.synonym_map = {}
127
- for base, syns in self.word_groups.items():
128
- for s in syns:
129
- self.synonym_map.setdefault(s, []).append(base)
130
-
131
- def _lazy_load_heavy(self):
132
- """Load heavy models if available; done lazily at init for heavy mode."""
133
- # sentence transformer
134
- if st_available:
135
- try:
136
- self._sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
137
- except Exception:
138
- self._sentence_model = None
139
- # masked LM pipeline
140
- if transformers_available:
141
- try:
142
- self._masked_pipe = pipeline('fill-mask', model='bert-base-uncased')
143
- except Exception:
144
- self._masked_pipe = None
145
- # paraphrase generator (T5-small fallback)
146
- try:
147
- self._paraphrase_tok = AutoTokenizer.from_pretrained('t5-small')
148
- self._paraphrase_gen = AutoModelForSeq2SeqLM.from_pretrained('t5-small')
149
- except Exception:
150
- self._paraphrase_gen = None
151
- # spacy
152
- if spacy_available:
153
- try:
154
- self._nlp = spacy.load('en_core_web_sm')
155
- except Exception:
156
- try:
157
- os.system('python -m spacy download en_core_web_sm')
158
- self._nlp = spacy.load('en_core_web_sm')
159
- except Exception:
160
- self._nlp = None
161
-
162
- # ---------------- Lightweight utilities -----------------
163
- def _light_paraphrase_simple(self, text: str) -> List[str]:
164
- """Lightweight paraphrase by simple heuristics: swap synonyms from word_groups and reorder short phrases."""
165
- candidates = []
166
- words = safe_word_tokenize(text)
167
- for _ in range(3):
168
- out = []
169
- for w in words:
170
- lw = w.lower()
171
- if lw in self.word_groups and random.random() < 0.5:
172
- out.append(random.choice(self.word_groups[lw]))
173
- else:
174
- out.append(w)
175
- # simple reorder of clauses
176
- s = ' '.join(out)
177
- if ',' in s and random.random() < 0.4:
178
- parts = s.split(',', 1)
179
- s = parts[1].strip().capitalize() + '. ' + parts[0].strip()
180
- candidates.append(s)
181
- return list(dict.fromkeys(candidates))
182
-
183
- # ---------------- Heavy helpers (masked LM candidates) -----------------
184
- def masked_candidates(self, sentence: str, target_token: str, top_k: int = 6) -> List[Tuple[str, float]]:
185
- """Return (candidate,score) from fill-mask model based on replacing first occurrence of target_token."""
186
- if not self._masked_pipe:
187
- return []
188
- mask = self._masked_pipe.tokenizer.mask_token
189
- # replace token occurrence carefully (word-boundary)
190
- pattern = re.compile(r'\b' + re.escape(target_token) + r'\b', flags=re.IGNORECASE)
191
- if not pattern.search(sentence):
192
- return []
193
- masked = pattern.sub(mask, sentence, count=1)
194
- try:
195
- preds = self._masked_pipe(masked, top_k=top_k)
196
- candidates = []
197
- for p in preds:
198
- tok = p.get('token_str','').strip()
199
- score = float(p.get('score', 0.0))
200
- if tok and tok.lower() != target_token.lower():
201
- candidates.append((tok, score))
202
- # dedup preserving best score
203
- uniq = {}
204
- for tok, sc in candidates:
205
- if tok not in uniq or sc > uniq[tok]:
206
- uniq[tok] = sc
207
- return sorted(list(uniq.items()), key=lambda x: x[1], reverse=True)
208
- except Exception:
209
- return []
210
-
211
- # ---------------- Paraphrase sampling (heavy) -----------------
212
- def sample_paraphrases(self, text: str, num_return: int = 4, max_length: int = 256) -> List[str]:
213
- if self._paraphrase_gen is None:
214
- # fallback to light paraphrase
215
- return self._light_paraphrase_simple(text)
216
- try:
217
- inp = 'paraphrase: ' + text + ' </s>'
218
- inputs = self._paraphrase_tok.encode(inp, return_tensors='pt', truncation=True, max_length=512)
219
- outputs = self._paraphrase_gen.generate(
220
- inputs, do_sample=True, top_p=0.9, temperature=0.9,
221
- num_return_sequences=num_return, max_length=max_length, no_repeat_ngram_size=3
222
- )
223
- decoded = [self._paraphrase_tok.decode(o, skip_special_tokens=True, clean_up_tokenization_spaces=True) for o in outputs]
224
- # dedupe
225
- return list(dict.fromkeys(decoded))
226
- except Exception:
227
- return self._light_paraphrase_simple(text)
228
-
229
- # ---------------- Scoring -----------------
230
- def get_semantic_similarity(self, text1: str, text2: str) -> float:
231
- """Use sentence-transformer if available, else fallback to Jaccard-like heuristic."""
232
- try:
233
- if self._sentence_model:
234
- emb = self._sentence_model.encode([text1, text2])
235
- # compute cosine manually (avoid sklearn dependency)
236
- a, b = emb[0], emb[1]
237
- num = sum(x*y for x,y in zip(a,b))
238
- den_a = math.sqrt(sum(x*x for x in a))
239
- den_b = math.sqrt(sum(x*x for x in b))
240
- if den_a == 0 or den_b == 0:
241
- return 0.8
242
- return max(0.0, min(1.0, num / (den_a*den_b)))
243
- else:
244
- s1 = set(safe_word_tokenize(text1.lower()))
245
- s2 = set(safe_word_tokenize(text2.lower()))
246
- if not s1 or not s2:
247
- return 0.8
248
- inter = len(s1 & s2)
249
- uni = len(s1 | s2)
250
- return max(0.0, inter/uni)
251
- except Exception:
252
- return 0.8
253
-
254
- def score_candidate(self, original: str, candidate: str) -> float:
255
- """Combine semantic similarity and a lightweight fluency proxy to score candidates."""
256
- sim = self.get_semantic_similarity(original, candidate)
257
- # fluency proxy: prefer sentences with punctuation and average word length similar to original
258
- def avg_word_len(s):
259
- ws = [w for w in re.findall(r"\w+", s)]
260
- return sum(len(w) for w in ws)/len(ws) if ws else 4
261
- avg_orig = avg_word_len(original)
262
- avg_cand = avg_word_len(candidate)
263
- len_pen = 1 - min(0.2, abs(avg_orig-avg_cand)/10)
264
- score = 0.85*sim + 0.15*len_pen
265
- # small randomness to diversify
266
- score += random.uniform(-0.02, 0.02)
267
- return float(max(0.0, min(1.0, score)))
268
-
269
- # ---------------- Contextual synonym replacement -----------------
270
- def contextual_synonym_replace(self, sentence: str, max_replacements: int = 2, top_k: int = 6) -> str:
271
- """Try masked LM suggestions for content words and pick best-scoring replacements.
272
- Falls back to WordNet-based synonyms when heavy models not available.
273
- """
274
- if not sentence or len(sentence.split()) < 3:
275
- return sentence
276
-
277
- # choose content tokens (light heuristic)
278
- tokens = safe_word_tokenize(sentence)
279
- candidate_indices = [i for i,w in enumerate(tokens) if w.isalpha() and len(w)>3 and w.lower() not in self.stop_words]
280
- random.shuffle(candidate_indices)
281
- replaced = ' '.join(tokens)
282
- replacements = 0
283
-
284
- for idx in candidate_indices:
285
- if replacements >= max_replacements:
286
- break
287
- target = tokens[idx]
288
- # protect numerics or tokens with uppercase inside (possible entities)
289
- if any(ch.isdigit() for ch in target) or (target[0].isupper() and not target.islower()):
290
- continue
291
-
292
- # heavy path
293
- if self.mode == 'heavy' and self._masked_pipe:
294
- cands = self.masked_candidates(replaced, target, top_k=top_k)
295
- best = None
296
- best_score = -1
297
- for cand, cand_score in cands:
298
- trial = re.sub(r"\b"+re.escape(target)+r"\b", cand, replaced, count=1)
299
- sc = self.score_candidate(sentence, trial)
300
- if sc > best_score:
301
- best = trial
302
- best_score = sc
303
- if best and best_score > 0.7:
304
- replaced = best
305
- replacements += 1
306
- continue
307
- # light path (WordNet synonyms)
308
- syns = []
309
- try:
310
- for syn in wordnet.synsets(target.lower()):
311
- for lemma in syn.lemmas():
312
- name = lemma.name().replace('_',' ')
313
- if name.lower() != target.lower() and len(name)>2:
314
- syns.append(name)
315
- except Exception:
316
- syns = []
317
-
318
- syns = list(dict.fromkeys(syns))
319
- if syns:
320
- chosen = random.choice(syns)
321
- trial = re.sub(r"\b"+re.escape(target)+r"\b", chosen, replaced, count=1)
322
- sc = self.score_candidate(sentence, trial)
323
- if sc > 0.6:
324
- replaced = trial
325
- replacements += 1
326
-
327
- return replaced
328
-
329
- # ---------------- Dynamic connector generation -----------------
330
- def generate_connectors(self, style: str = 'casual', n: int = 6) -> List[str]:
331
- base = ["Actually,", "Honestly,", "Basically,", "Really,", "Generally,", "Usually,", "Often,", "Sometimes,"]
332
- if self.mode == 'heavy' and self._paraphrase_gen:
333
- try:
334
- # use paraphrase model to produce short starters
335
- prompt = f"Produce {n} short natural sentence starters in {style} English separated by |||"
336
- inp = 'paraphrase: ' + prompt + ' </s>'
337
- tokens = self._paraphrase_tok.encode(inp, return_tensors='pt', truncation=True)
338
- out = self._paraphrase_gen.generate(tokens, max_length=120)
339
- decoded = self._paraphrase_tok.decode(out[0], skip_special_tokens=True)
340
- parts = [p.strip() for p in decoded.split('|||') if p.strip()]
341
- if parts:
342
- return parts[:n]
343
- except Exception:
344
- pass
345
- # fallback sampling and shuffle
346
- random.shuffle(base)
347
- return base[:n]
348
-
349
- # ---------------- Paraphrase-and-score pipeline -----------------
350
- def paraphrase_and_select(self, sentence: str, num_return: int = 4, threshold: float = 0.72) -> str:
351
- # generate candidates
352
- if self.mode == 'heavy':
353
- candidates = self.sample_paraphrases(sentence, num_return=num_return)
354
- else:
355
- candidates = self._light_paraphrase_simple(sentence)
356
-
357
- # always include original as fallback
358
- candidates = [c for c in candidates if c and c.strip()]
359
- if sentence not in candidates:
360
- candidates.append(sentence)
361
-
362
- # score candidates and select highest that preserves meaning
363
- scored = [(self.score_candidate(sentence, c), c) for c in candidates]
364
- scored.sort(key=lambda x: x[0], reverse=True)
365
- best_score, best_sent = scored[0]
366
- if best_score >= threshold:
367
- return best_sent
368
- # try light token-level changes
369
- token_changed = self.contextual_synonym_replace(sentence, max_replacements=2)
370
- if self.score_candidate(sentence, token_changed) >= 0.6:
371
- return token_changed
372
- return sentence
373
-
374
- # ---------------- Multi-pass humanization -----------------
375
- def multiple_pass_humanization(self, text: str, intensity: int = 2) -> str:
376
- sentences = safe_sent_tokenize(text)
377
- out_sents = []
378
- for i, s in enumerate(sentences):
379
- s_clean = s.strip()
380
- if not s_clean:
381
- continue
382
- # pass 1: paraphrase & select
383
- if len(s_clean.split()) > 6 and random.random() < (0.9 if intensity>=2 else 0.6):
384
- s_p = self.paraphrase_and_select(s_clean, num_return=4, threshold=0.7 if intensity>=2 else 0.65)
385
- else:
386
- s_p = s_clean
387
- # pass 2: token-level refinement
388
- if random.random() < 0.4:
389
- s_p = self.contextual_synonym_replace(s_p, max_replacements=1)
390
- # pass 3: occasionally add connector
391
- if i>0 and random.random() < 0.25:
392
- connector = random.choice(self.generate_connectors())
393
- s_p = connector + ' ' + s_p[0].lower() + s_p[1:] if s_p else s_p
394
- out_sents.append(s_p)
395
- return ' '.join(out_sents)
396
-
397
- # ---------------- Final checks -----------------
398
- def calculate_perplexity(self, text: str) -> float:
399
- # lightweight entropy-based proxy (keeps original approach)
400
- try:
401
- words = safe_word_tokenize(text.lower())
402
- if not words:
403
- return 50.0
404
- freq = Counter(words)
405
- total = len(words)
406
- entropy = 0.0
407
- for w in words:
408
- p = freq[w]/total
409
- entropy -= p * math.log2(p)
410
- perp = 2 ** entropy
411
- if perp < 20:
412
- perp += random.uniform(20,30)
413
- return perp
414
- except Exception:
415
- return random.uniform(45,75)
416
-
417
- def final_quality_check(self, original: str, processed: str) -> Tuple[str, dict]:
418
- metrics = {
419
- 'semantic_similarity': self.get_semantic_similarity(original, processed),
420
- 'perplexity': self.calculate_perplexity(processed),
421
- 'readability': flesch_reading_ease(processed)
422
- }
423
- # simple cleanup
424
- processed = re.sub(r'\s+', ' ', processed).strip()
425
- # ensure capitalization after sentence boundaries
426
- sents = safe_sent_tokenize(processed)
427
- fixed = []
428
- for s in sents:
429
- if s and s[0].islower():
430
- s = s[0].upper() + s[1:]
431
- fixed.append(s)
432
- processed = ' '.join(fixed)
433
- return processed, metrics
434
-
435
- # ---------------- Public API -----------------
436
- def humanize_text(self, text: str, intensity: str = 'standard') -> Tuple[str, dict]:
437
- """Main method. intensity in ('light','standard','heavy')"""
438
- if not text or not text.strip():
439
- return ("", {'error':'no input'})
440
- map_level = {'light':1, 'standard':2, 'heavy':3}
441
- lvl = map_level.get(intensity, 2)
442
- # multi-pass
443
- processed = self.multiple_pass_humanization(text, intensity=lvl)
444
- processed, metrics = self.final_quality_check(text, processed)
445
- # enforce semantic preservation
446
- if metrics['semantic_similarity'] < 0.6:
447
- # revert to token-level only
448
- processed = self.contextual_synonym_replace(text, max_replacements=2)
449
- processed, metrics = self.final_quality_check(text, processed)
450
- return processed, metrics
451
-
452
- # ---------------- Example CLI usage -----------------
453
- if __name__ == '__main__':
454
- import argparse
455
- parser = argparse.ArgumentParser(description='Advanced AI Humanizer - heavy and light modes')
456
- parser.add_argument('--mode', choices=['heavy','light'], default='light')
457
- parser.add_argument('--intensity', choices=['light','standard','heavy'], default='standard')
458
- parser.add_argument('--text', type=str, help='Text to humanize', default='')
459
- args = parser.parse_args()
460
-
461
- humanizer = AdvancedAIHumanizerEnhanced(mode=args.mode)
462
- if args.text:
463
- out, metrics = humanizer.humanize_text(args.text, intensity=args.intensity)
464
- print('\n=== HUMANIZED ===\n')
465
- print(out)
466
- print('\n=== METRICS ===\n')
467
- print(metrics)
468
  else:
469
- print('No --text provided. Run with --text "your text here"')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ # Advanced AI Humanizer Pro (Full + Light) for Hugging Face Spaces
3
+ # Author: Saber (Mahmoud Saber)
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  import os
 
6
  import random
7
+ import re
8
+ import nltk
9
+ import importlib
10
+ import gradio as gr
11
+
12
+ # Optional heavy dependencies (lazy-loaded)
13
+ nltk.download("wordnet", quiet=True)
14
+ from nltk.corpus import wordnet
15
+
16
+
17
+ # ========== LIGHT MODE ==========
18
+
19
+ def get_synonym(word):
20
+ """Return a random synonym for a word (if available)."""
21
+ synonyms = set()
22
+ for syn in wordnet.synsets(word):
23
+ for lemma in syn.lemmas():
24
+ synonyms.add(lemma.name().replace("_", " "))
25
+ if synonyms:
26
+ synonyms = list(synonyms)
27
+ choice = random.choice(synonyms)
28
+ if choice.lower() != word.lower():
29
+ return choice
30
+ return word
31
+
32
+
33
+ def humanize_light(text: str) -> str:
34
+ """Quick, CPU-safe version for humanizing AI text."""
35
+ text = re.sub(r"\b(however|moreover|furthermore|thus)\b", "", text, flags=re.IGNORECASE)
36
+ words = text.split()
37
+ for i in range(0, len(words), 10):
38
+ if random.random() < 0.3:
39
+ words[i] = get_synonym(words[i])
40
+ text = " ".join(words)
41
+ text = re.sub(r"\s{2,}", " ", text)
42
+ return text.strip().capitalize()
43
+
44
+
45
+ # ========== HEAVY MODE ==========
46
+
47
+ def load_heavy_dependencies():
48
+ """Load transformers, sentence-transformers, and spaCy only when needed."""
49
+ global torch, spacy, pipeline, SentenceTransformer
50
+ torch = importlib.import_module("torch")
51
+ spacy = importlib.import_module("spacy")
52
+ pipeline = importlib.import_module("transformers").pipeline
53
+ SentenceTransformer = importlib.import_module("sentence_transformers").SentenceTransformer
54
+
55
+
56
+ def humanize_heavy(text: str, intensity: str = "medium") -> str:
57
+ """Transformer-based deep rewriting for high naturalness."""
58
+ load_heavy_dependencies()
59
+ nlp = spacy.load("en_core_web_sm")
60
+
61
+ paraphraser = pipeline("text2text-generation", model="Vamsi/T5_Paraphrase_Paws")
62
+ sentences = [s.text for s in nlp(text).sents]
63
+
64
+ rewritten = []
65
+ for sent in sentences:
66
+ result = paraphraser(
67
+ f"paraphrase: {sent}",
68
+ max_length=128,
69
+ num_return_sequences=1,
70
+ temperature=0.8 if intensity == "heavy" else 0.5,
71
+ )
72
+ rewritten.append(result[0]["generated_text"])
73
+
74
+ if intensity == "heavy" and len(rewritten) > 2:
75
+ random.shuffle(rewritten)
76
+
77
+ return " ".join(rewritten).strip()
78
+
79
+
80
+ # ========== GRADIO UI CREATOR ==========
81
+
82
+ def run_humanizer(text, mode="light", intensity="medium"):
83
+ if not text.strip():
84
+ return "Please enter some text to humanize."
85
+ if mode == "light":
86
+ return humanize_light(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  else:
88
+ try:
89
+ return humanize_heavy(text, intensity)
90
+ except Exception as e:
91
+ return f"[Error in heavy mode: {str(e)}] Try switching to light mode."
92
+
93
+
94
+ def create_enhanced_interface():
95
+ """Build the Gradio UI."""
96
+ interface = gr.Interface(
97
+ fn=run_humanizer,
98
+ inputs=[
99
+ gr.Textbox(label="Enter Text", lines=8, placeholder="Paste your AI text here..."),
100
+ gr.Radio(["light", "heavy"], label="Mode", value="light"),
101
+ gr.Radio(["light", "medium", "heavy"], label="Intensity (for heavy mode only)", value="medium"),
102
+ ],
103
+ outputs=gr.Textbox(label="Humanized Text", lines=8),
104
+ title="🧠 Advanced AI Humanizer Pro",
105
+ description=(
106
+ "Rewrite AI-generated text into more natural, human-like language. "
107
+ "'Light' mode runs fast on CPU. 'Heavy' mode uses transformers for deeper rewriting."
108
+ ),
109
+ allow_flagging="never",
110
+ )
111
+ return interface
112
+
113
+
114
+ # ========== ORIGINAL STARTUP BLOCK (UNCHANGED) ==========
115
+
116
+ if __name__ == "__main__":
117
+ print("🚀 Starting Advanced AI Humanizer Pro...")
118
+ app = create_enhanced_interface()
119
+ app.launch(
120
+ server_name="0.0.0.0",
121
+ server_port=7860,
122
+ show_error=True,
123
+ share=False
124
+ )