Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,8 +4,10 @@ import random
|
|
| 4 |
import re
|
| 5 |
import nltk
|
| 6 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
|
|
|
| 7 |
from textstat import flesch_reading_ease, flesch_kincaid_grade
|
| 8 |
import string
|
|
|
|
| 9 |
|
| 10 |
# Setup NLTK download path for Hugging Face Spaces
|
| 11 |
os.environ['NLTK_DATA'] = '/tmp/nltk_data'
|
|
@@ -16,7 +18,8 @@ def download_nltk_data():
|
|
| 16 |
os.makedirs('/tmp/nltk_data', exist_ok=True)
|
| 17 |
nltk.data.path.append('/tmp/nltk_data')
|
| 18 |
|
| 19 |
-
required_data = ['
|
|
|
|
| 20 |
|
| 21 |
for data in required_data:
|
| 22 |
try:
|
|
@@ -34,346 +37,301 @@ download_nltk_data()
|
|
| 34 |
|
| 35 |
class AdvancedAIHumanizer:
|
| 36 |
def __init__(self):
|
| 37 |
-
self.
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
r'\bdelve\b': ["
|
| 46 |
-
r'\bembark\b': ["
|
| 47 |
-
r'\ba testament to\b': ["
|
| 48 |
-
r'\
|
| 49 |
-
r'\bnavigating\b': ["
|
| 50 |
-
r'\bmeticulous\b': ["
|
| 51 |
-
r'\bintricate\b': ["complex", "detailed", "
|
| 52 |
-
r'\
|
| 53 |
-
r'\
|
| 54 |
-
r'\
|
| 55 |
-
r'\
|
| 56 |
-
r'\
|
| 57 |
-
r'\
|
| 58 |
-
r'\
|
| 59 |
-
r'\
|
| 60 |
-
r'\
|
| 61 |
-
r'\
|
| 62 |
-
r'\
|
| 63 |
-
r'\
|
| 64 |
-
r'\
|
| 65 |
-
r'\bfacilitate\b': ["help", "make easier", "enable", "assist with"],
|
| 66 |
-
r'\butilize\b': ["use", "work with", "employ", "make use of"],
|
| 67 |
-
r'\bleverage\b': ["use", "take advantage of", "make use of", "tap into"],
|
| 68 |
-
r'\benhance\b': ["improve", "boost", "make better", "upgrade"],
|
| 69 |
-
r'\bimplement\b': ["put in place", "set up", "start using", "roll out"],
|
| 70 |
-
r'\bparadigm\b': ["approach", "way", "method", "system"],
|
| 71 |
-
r'\bmethodology\b': ["method", "approach", "way", "system"],
|
| 72 |
-
r'\bsynergy\b': ["teamwork", "working together", "collaboration"],
|
| 73 |
-
r'\boptimize\b': ["improve", "make better", "fine-tune", "perfect"],
|
| 74 |
-
r'\bstreamline\b': ["simplify", "make easier", "smooth out"],
|
| 75 |
-
r'\brobust\b': ["strong", "solid", "reliable", "tough"],
|
| 76 |
-
r'\bscalable\b': ["flexible", "adaptable", "expandable"],
|
| 77 |
-
r'\bseamless\b': ["smooth", "easy", "effortless", "simple"],
|
| 78 |
-
r'\binnovative\b': ["new", "creative", "fresh", "cutting-edge"],
|
| 79 |
-
r'\bgroundbreaking\b': ["amazing", "revolutionary", "game-changing"],
|
| 80 |
-
r'\btransformative\b': ["life-changing", "game-changing", "revolutionary"],
|
| 81 |
-
r'\bparadigm shift\b': ["big change", "major shift", "game changer"],
|
| 82 |
-
r'\bgame changer\b': ["total game changer", "complete shift", "major breakthrough"],
|
| 83 |
-
r'\bcutting-edge\b': ["latest", "newest", "state-of-the-art", "advanced"],
|
| 84 |
-
r'\bstate-of-the-art\b': ["latest", "newest", "most advanced", "top-notch"]
|
| 85 |
}
|
| 86 |
|
| 87 |
-
#
|
| 88 |
-
self.
|
| 89 |
-
r'\
|
| 90 |
-
r'\
|
| 91 |
-
r'\
|
| 92 |
-
r'\
|
| 93 |
-
r'\
|
| 94 |
-
r'\
|
| 95 |
-
r'\
|
| 96 |
-
r'\
|
| 97 |
-
r'\
|
| 98 |
-
r'\bthat is\b': "that's", r'\bit is\b': "it's", r'\bwho is\b': "who's",
|
| 99 |
-
r'\bwhat is\b': "what's", r'\bwhere is\b': "where's", r'\bwhen is\b': "when's",
|
| 100 |
-
r'\bhow is\b': "how's", r'\bwhy is\b': "why's", r'\bthere is\b': "there's",
|
| 101 |
-
r'\bthere are\b': "there're", r'\bhere is\b': "here's"
|
| 102 |
}
|
| 103 |
|
| 104 |
-
#
|
| 105 |
-
self.
|
| 106 |
-
"
|
| 107 |
-
"
|
| 108 |
-
"Right, so", "Well,", "Actually,", "Basically,", "Here's what's wild -",
|
| 109 |
-
"Get this -", "Check it out -", "So I was thinking -", "You know what's crazy?",
|
| 110 |
-
"Here's something interesting -", "Let me break this down for you -"
|
| 111 |
-
]
|
| 112 |
-
|
| 113 |
-
# Natural fillers and expressions
|
| 114 |
-
self.natural_fillers = [
|
| 115 |
-
"you know", "I mean", "like", "actually", "basically", "honestly",
|
| 116 |
-
"literally", "obviously", "clearly", "definitely", "pretty much",
|
| 117 |
-
"kind of", "sort of", "more or less", "at the end of the day",
|
| 118 |
-
"when it comes down to it", "if you ask me", "in my experience",
|
| 119 |
-
"from what I've seen", "the way I see it", "real quick", "super quick",
|
| 120 |
-
"really fast", "pretty cool", "kinda weird", "sorta like"
|
| 121 |
-
]
|
| 122 |
-
|
| 123 |
-
# Personal perspective phrases
|
| 124 |
-
self.personal_voices = [
|
| 125 |
-
"I think", "in my opinion", "from my experience", "personally",
|
| 126 |
-
"if you ask me", "the way I see it", "from what I've seen",
|
| 127 |
-
"in my view", "as I see it", "my take is", "I believe",
|
| 128 |
-
"it seems to me", "I'd say", "my guess is", "from where I sit",
|
| 129 |
-
"in my book", "if I'm being honest", "to be real with you"
|
| 130 |
]
|
| 131 |
|
| 132 |
-
#
|
| 133 |
-
self.
|
| 134 |
-
r'\
|
| 135 |
-
r'\
|
| 136 |
-
r'\
|
| 137 |
-
r'\
|
| 138 |
-
r'\
|
| 139 |
-
r'\
|
| 140 |
-
r'\
|
| 141 |
-
r'\
|
| 142 |
-
r'\bsubsequent\b': ["next", "following", "after that", "then"],
|
| 143 |
-
r'\bprevious\b': ["last", "earlier", "before", "previous"],
|
| 144 |
-
r'\binitial\b': ["first", "starting", "beginning", "initial"],
|
| 145 |
-
r'\bfinal\b': ["last", "ending", "final", "closing"]
|
| 146 |
}
|
| 147 |
|
| 148 |
-
def
|
| 149 |
-
"""
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
cleaned = re.sub(r'\s+', ' ', cleaned).strip()
|
| 158 |
-
|
| 159 |
-
return cleaned
|
| 160 |
-
|
| 161 |
-
def aggressive_phrase_elimination(self, text):
|
| 162 |
-
"""Aggressively eliminate all AI-flagged terms"""
|
| 163 |
-
for pattern, replacements in self.ai_death_terms.items():
|
| 164 |
-
count = 0
|
| 165 |
-
while re.search(pattern, text, re.IGNORECASE) and count < 10:
|
| 166 |
-
replacement = random.choice(replacements)
|
| 167 |
-
text = re.sub(pattern, replacement, text, flags=re.IGNORECASE, count=1)
|
| 168 |
-
count += 1
|
| 169 |
-
return text
|
| 170 |
-
|
| 171 |
-
def add_extensive_contractions(self, text):
|
| 172 |
-
"""Add comprehensive contractions"""
|
| 173 |
-
for pattern, contraction in self.contractions.items():
|
| 174 |
-
text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
|
| 175 |
-
return text
|
| 176 |
|
| 177 |
-
def
|
| 178 |
-
"""
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
for i, sentence in enumerate(sentences):
|
| 183 |
-
# Add personal starters
|
| 184 |
-
if random.random() < 0.4:
|
| 185 |
-
starter = random.choice(self.human_starters)
|
| 186 |
-
sentence = f"{starter} {sentence.lower()}"
|
| 187 |
-
sentence = sentence[0].upper() + sentence[1:]
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
|
| 195 |
-
#
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
|
| 204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
|
| 208 |
-
def
|
| 209 |
-
"""
|
| 210 |
sentences = sent_tokenize(text)
|
| 211 |
-
|
| 212 |
|
| 213 |
-
for sentence in sentences:
|
| 214 |
-
#
|
| 215 |
-
if random.random() < 0.15:
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
sentence = f"{
|
| 219 |
-
sentence = sentence[0].upper() + sentence[1:]
|
| 220 |
|
| 221 |
-
#
|
| 222 |
-
|
| 223 |
-
corrections = ["I mean", "or rather", "well, actually", "sorry", "wait"]
|
| 224 |
-
correction = random.choice(corrections)
|
| 225 |
-
words = sentence.split()
|
| 226 |
-
if len(words) > 6:
|
| 227 |
-
insert_pos = random.randint(3, len(words) - 2)
|
| 228 |
-
words.insert(insert_pos, f"β {correction} β")
|
| 229 |
-
sentence = " ".join(words)
|
| 230 |
|
| 231 |
-
#
|
| 232 |
-
if random.random() < 0.2:
|
| 233 |
-
|
| 234 |
-
sentence += f" {random.choice(emphasis)}"
|
| 235 |
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
return " ".join(
|
| 239 |
|
| 240 |
-
def
|
| 241 |
-
"""
|
| 242 |
-
|
| 243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
|
| 245 |
-
for
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
if len(words) > 12:
|
| 250 |
-
mid_point = len(words) // 2
|
| 251 |
-
first_part = " ".join(words[:mid_point])
|
| 252 |
-
second_part = " ".join(words[mid_point:])
|
| 253 |
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
|
|
|
|
|
|
| 262 |
|
|
|
|
|
|
|
| 263 |
connector = random.choice(connectors)
|
| 264 |
-
|
| 265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
else:
|
| 267 |
-
|
| 268 |
-
if random.random() < 0.3:
|
| 269 |
-
questions = ["Right?", "You know?", "Make sense?", "See what I mean?", "You feel me?"]
|
| 270 |
-
sentence += f" {random.choice(questions)}"
|
| 271 |
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
return " ".join(restructured)
|
| 275 |
|
| 276 |
-
def
|
| 277 |
-
"""Add
|
| 278 |
sentences = sent_tokenize(text)
|
| 279 |
-
|
| 280 |
|
| 281 |
for sentence in sentences:
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
return " ".join(enhanced)
|
| 297 |
-
|
| 298 |
-
def apply_casual_language(self, text):
|
| 299 |
-
"""Apply casual language patterns"""
|
| 300 |
-
for pattern, replacements in self.casual_replacements.items():
|
| 301 |
-
if re.search(pattern, text, re.IGNORECASE):
|
| 302 |
-
replacement = random.choice(replacements)
|
| 303 |
-
text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
|
| 304 |
-
|
| 305 |
-
return text
|
| 306 |
-
|
| 307 |
-
def add_conversational_flow(self, text):
|
| 308 |
-
"""Add natural conversational flow"""
|
| 309 |
-
paragraphs = text.split('\n\n')
|
| 310 |
-
conversational_paragraphs = []
|
| 311 |
-
|
| 312 |
-
for para in paragraphs:
|
| 313 |
-
sentences = sent_tokenize(para)
|
| 314 |
|
| 315 |
-
# Add
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
enhanced_sentences.append(sentence)
|
| 319 |
|
| 320 |
-
|
| 321 |
-
if i < len(sentences) - 1 and random.random() < 0.2:
|
| 322 |
-
bridges = [
|
| 323 |
-
"And here's the crazy part:",
|
| 324 |
-
"But wait, there's more:",
|
| 325 |
-
"Now, here's where it gets interesting:",
|
| 326 |
-
"Oh, and another thing:",
|
| 327 |
-
"Plus, get this:"
|
| 328 |
-
]
|
| 329 |
-
enhanced_sentences.append(random.choice(bridges))
|
| 330 |
|
| 331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
|
| 335 |
-
def
|
| 336 |
-
"""Final
|
| 337 |
-
# Fix
|
| 338 |
text = re.sub(r'\s+', ' ', text)
|
| 339 |
-
text = re.sub(r'\s+([
|
|
|
|
| 340 |
|
| 341 |
-
#
|
| 342 |
-
|
|
|
|
| 343 |
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
| 347 |
|
| 348 |
-
#
|
| 349 |
-
text = re.sub(r'
|
|
|
|
| 350 |
|
| 351 |
return text.strip()
|
| 352 |
|
| 353 |
-
def
|
| 354 |
-
"""Apply
|
| 355 |
current_text = text
|
| 356 |
|
| 357 |
for pass_num in range(passes):
|
| 358 |
-
print(f"
|
| 359 |
|
| 360 |
-
#
|
| 361 |
-
current_text = self.
|
| 362 |
-
current_text = self.
|
| 363 |
-
current_text = self.
|
| 364 |
-
current_text = self.
|
| 365 |
-
current_text = self.
|
| 366 |
-
current_text = self.add_human_imperfections(current_text)
|
| 367 |
-
current_text = self.add_specific_examples(current_text)
|
| 368 |
-
current_text = self.add_conversational_flow(current_text)
|
| 369 |
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
current_text = self.aggressive_phrase_elimination(current_text) # Second elimination
|
| 373 |
-
elif pass_num == 2:
|
| 374 |
-
current_text = self.inject_personality(current_text) # Extra personality
|
| 375 |
|
| 376 |
-
return
|
| 377 |
|
| 378 |
def get_readability_score(self, text):
|
| 379 |
"""Calculate readability score"""
|
|
@@ -388,8 +346,8 @@ class AdvancedAIHumanizer:
|
|
| 388 |
except Exception as e:
|
| 389 |
return f"Could not calculate readability: {str(e)}"
|
| 390 |
|
| 391 |
-
def humanize_text(self, text, intensity="
|
| 392 |
-
"""Main humanization method"""
|
| 393 |
if not text or not text.strip():
|
| 394 |
return "Please provide text to humanize."
|
| 395 |
|
|
@@ -404,13 +362,13 @@ class AdvancedAIHumanizer:
|
|
| 404 |
except Exception as nltk_error:
|
| 405 |
return f"NLTK Error: {str(nltk_error)}. Please try again."
|
| 406 |
|
| 407 |
-
# Apply
|
| 408 |
-
if intensity == "
|
| 409 |
-
result = self.
|
| 410 |
-
elif intensity == "
|
| 411 |
-
result = self.
|
| 412 |
-
else:
|
| 413 |
-
result = self.
|
| 414 |
|
| 415 |
return result
|
| 416 |
|
|
@@ -418,7 +376,7 @@ class AdvancedAIHumanizer:
|
|
| 418 |
return f"Error processing text: {str(e)}"
|
| 419 |
|
| 420 |
def create_interface():
|
| 421 |
-
"""Create the
|
| 422 |
humanizer = AdvancedAIHumanizer()
|
| 423 |
|
| 424 |
def process_text(input_text, intensity):
|
|
@@ -431,188 +389,128 @@ def create_interface():
|
|
| 431 |
except Exception as e:
|
| 432 |
return f"Error: {str(e)}", "Processing error"
|
| 433 |
|
| 434 |
-
#
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
max-width: 100% !important;
|
| 439 |
-
width: 100% !important;
|
| 440 |
-
padding-left: 10px !important;
|
| 441 |
-
padding-right: 10px !important;
|
| 442 |
-
margin: 0 !important;
|
| 443 |
}
|
| 444 |
-
|
| 445 |
-
/* Make all cards take full width inside their container */
|
| 446 |
-
.card, .gradio-card {
|
| 447 |
-
max-width: 100% !important;
|
| 448 |
-
width: 100% !important;
|
| 449 |
-
margin: 5px 0 !important;
|
| 450 |
-
}
|
| 451 |
-
|
| 452 |
-
/* Remove margin and padding from rows and columns to maximize width */
|
| 453 |
-
.gradio-row, .gradio-column {
|
| 454 |
-
margin: 0 !important;
|
| 455 |
-
padding: 5px !important;
|
| 456 |
-
gap: 10px !important;
|
| 457 |
-
}
|
| 458 |
-
|
| 459 |
-
/* Full width for textboxes and components */
|
| 460 |
-
.gradio-textbox, .gradio-button, .gradio-radio {
|
| 461 |
-
width: 100% !important;
|
| 462 |
-
max-width: 100% !important;
|
| 463 |
-
}
|
| 464 |
-
|
| 465 |
-
/* Responsive design for different screen sizes */
|
| 466 |
-
@media (min-width: 1536px) {
|
| 467 |
-
.gradio-container {
|
| 468 |
-
max-width: 100% !important;
|
| 469 |
-
}
|
| 470 |
-
}
|
| 471 |
-
|
| 472 |
-
@media (min-width: 1280px) {
|
| 473 |
-
.gradio-container {
|
| 474 |
-
max-width: 100% !important;
|
| 475 |
-
}
|
| 476 |
-
}
|
| 477 |
-
|
| 478 |
-
@media (min-width: 1024px) {
|
| 479 |
-
.gradio-container {
|
| 480 |
-
max-width: 100% !important;
|
| 481 |
-
}
|
| 482 |
-
}
|
| 483 |
-
|
| 484 |
-
/* Custom styling for better visual appeal */
|
| 485 |
.main-header {
|
| 486 |
text-align: center;
|
| 487 |
-
|
| 488 |
-
-
|
| 489 |
-
-
|
| 490 |
-
font-size: 2.5em !important;
|
| 491 |
-
font-weight: bold;
|
| 492 |
margin-bottom: 20px;
|
|
|
|
|
|
|
| 493 |
}
|
| 494 |
-
|
| 495 |
.feature-box {
|
| 496 |
-
background: #
|
| 497 |
-
border-radius:
|
| 498 |
padding: 20px;
|
| 499 |
-
margin:
|
| 500 |
-
border-left: 4px solid #
|
|
|
|
| 501 |
}
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
color: white;
|
| 506 |
-
border-radius: 10px;
|
| 507 |
padding: 15px;
|
| 508 |
margin: 10px 0;
|
| 509 |
-
|
| 510 |
}
|
| 511 |
"""
|
| 512 |
|
| 513 |
with gr.Blocks(
|
| 514 |
-
title="
|
| 515 |
theme=gr.themes.Soft(),
|
| 516 |
-
css=
|
| 517 |
-
fill_width=True
|
| 518 |
) as interface:
|
| 519 |
|
| 520 |
gr.HTML("""
|
| 521 |
<div class="main-header">
|
| 522 |
-
|
| 523 |
</div>
|
| 524 |
<div style="text-align: center; margin-bottom: 30px;">
|
| 525 |
-
<h3
|
| 526 |
-
<p style="font-size: 1.1em; color: #
|
| 527 |
-
Advanced
|
| 528 |
-
conversational restructuring, and human imperfection simulation
|
| 529 |
</p>
|
| 530 |
</div>
|
| 531 |
""")
|
| 532 |
|
| 533 |
with gr.Row():
|
| 534 |
-
with gr.Column(scale=1
|
| 535 |
input_text = gr.Textbox(
|
| 536 |
-
label="
|
| 537 |
-
lines=
|
| 538 |
-
placeholder="
|
| 539 |
-
info="π‘
|
| 540 |
-
max_lines=20,
|
| 541 |
show_copy_button=True
|
| 542 |
)
|
| 543 |
|
| 544 |
intensity = gr.Radio(
|
| 545 |
choices=[
|
| 546 |
-
("Light
|
| 547 |
-
("
|
| 548 |
-
("
|
| 549 |
],
|
| 550 |
-
value="
|
| 551 |
-
label="
|
| 552 |
-
info="
|
| 553 |
)
|
| 554 |
|
| 555 |
btn = gr.Button(
|
| 556 |
-
"π
|
| 557 |
variant="primary",
|
| 558 |
-
size="lg"
|
| 559 |
-
scale=1
|
| 560 |
)
|
| 561 |
|
| 562 |
-
with gr.Column(scale=1
|
| 563 |
output_text = gr.Textbox(
|
| 564 |
-
label="
|
| 565 |
-
lines=
|
| 566 |
show_copy_button=True,
|
| 567 |
-
info="
|
| 568 |
-
max_lines=20
|
| 569 |
)
|
| 570 |
|
| 571 |
readability = gr.Textbox(
|
| 572 |
-
label="π
|
| 573 |
-
lines=
|
| 574 |
-
info="
|
| 575 |
)
|
| 576 |
|
| 577 |
gr.HTML("""
|
| 578 |
<div class="feature-box">
|
| 579 |
-
<h3>π―
|
| 580 |
-
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(
|
| 581 |
-
<div>
|
| 582 |
-
<strong
|
| 583 |
-
|
| 584 |
</div>
|
| 585 |
-
<div>
|
| 586 |
-
<strong
|
| 587 |
-
|
| 588 |
</div>
|
| 589 |
-
<div>
|
| 590 |
-
<strong
|
| 591 |
-
|
| 592 |
</div>
|
| 593 |
-
<div>
|
| 594 |
-
<strong
|
| 595 |
-
|
| 596 |
</div>
|
| 597 |
-
<div>
|
| 598 |
-
<strong
|
| 599 |
-
|
| 600 |
</div>
|
| 601 |
-
<div>
|
| 602 |
-
<strong
|
| 603 |
-
|
| 604 |
</div>
|
| 605 |
</div>
|
| 606 |
</div>
|
| 607 |
""")
|
| 608 |
|
| 609 |
-
gr.HTML("""
|
| 610 |
-
<div class="stats-box">
|
| 611 |
-
<h3>β¨ Test your results with Originality.ai, GPTZero, and other AI detectors!</h3>
|
| 612 |
-
<p>This tool is specifically designed to pass the most sophisticated AI detection systems</p>
|
| 613 |
-
</div>
|
| 614 |
-
""")
|
| 615 |
-
|
| 616 |
# Event handlers
|
| 617 |
btn.click(
|
| 618 |
fn=process_text,
|
|
@@ -629,7 +527,7 @@ def create_interface():
|
|
| 629 |
return interface
|
| 630 |
|
| 631 |
if __name__ == "__main__":
|
| 632 |
-
print("π Starting
|
| 633 |
app = create_interface()
|
| 634 |
app.launch(
|
| 635 |
server_name="0.0.0.0",
|
|
|
|
| 4 |
import re
|
| 5 |
import nltk
|
| 6 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
| 7 |
+
from nltk.corpus import wordnet
|
| 8 |
from textstat import flesch_reading_ease, flesch_kincaid_grade
|
| 9 |
import string
|
| 10 |
+
from collections import defaultdict
|
| 11 |
|
| 12 |
# Setup NLTK download path for Hugging Face Spaces
|
| 13 |
os.environ['NLTK_DATA'] = '/tmp/nltk_data'
|
|
|
|
| 18 |
os.makedirs('/tmp/nltk_data', exist_ok=True)
|
| 19 |
nltk.data.path.append('/tmp/nltk_data')
|
| 20 |
|
| 21 |
+
required_data = ['punkt', 'punkt_tab', 'averaged_perceptron_tagger',
|
| 22 |
+
'stopwords', 'wordnet', 'omw-1.4']
|
| 23 |
|
| 24 |
for data in required_data:
|
| 25 |
try:
|
|
|
|
| 37 |
|
| 38 |
class AdvancedAIHumanizer:
|
| 39 |
def __init__(self):
|
| 40 |
+
self.setup_humanization_patterns()
|
| 41 |
+
self.load_synonym_database()
|
| 42 |
+
|
| 43 |
+
def setup_humanization_patterns(self):
|
| 44 |
+
"""Setup sophisticated humanization patterns that preserve meaning"""
|
| 45 |
+
|
| 46 |
+
# AI-flagged formal terms with contextually appropriate replacements
|
| 47 |
+
self.formal_replacements = {
|
| 48 |
+
r'\bdelve into\b': ["explore", "examine", "investigate", "analyze"],
|
| 49 |
+
r'\bembark on\b': ["begin", "start", "initiate", "commence"],
|
| 50 |
+
r'\ba testament to\b': ["evidence of", "proof of", "demonstrates", "shows"],
|
| 51 |
+
r'\blandscape of\b': ["context of", "environment of", "field of", "domain of"],
|
| 52 |
+
r'\bnavigating\b': ["managing", "addressing", "handling", "working through"],
|
| 53 |
+
r'\bmeticulous\b': ["careful", "thorough", "detailed", "precise"],
|
| 54 |
+
r'\bintricate\b': ["complex", "detailed", "sophisticated", "elaborate"],
|
| 55 |
+
r'\bmyriad\b': ["numerous", "many", "various", "multiple"],
|
| 56 |
+
r'\bplethora\b': ["abundance", "variety", "range", "collection"],
|
| 57 |
+
r'\bparadigm\b': ["model", "framework", "approach", "system"],
|
| 58 |
+
r'\bsynergy\b': ["collaboration", "cooperation", "coordination", "integration"],
|
| 59 |
+
r'\bleverage\b': ["utilize", "employ", "use", "apply"],
|
| 60 |
+
r'\bfacilitate\b': ["enable", "support", "assist", "help"],
|
| 61 |
+
r'\boptimize\b': ["improve", "enhance", "refine", "perfect"],
|
| 62 |
+
r'\bstreamline\b': ["simplify", "improve", "refine", "enhance"],
|
| 63 |
+
r'\brobust\b': ["strong", "reliable", "effective", "solid"],
|
| 64 |
+
r'\bseamless\b': ["smooth", "integrated", "unified", "continuous"],
|
| 65 |
+
r'\binnovative\b': ["creative", "original", "novel", "advanced"],
|
| 66 |
+
r'\bcutting-edge\b': ["advanced", "latest", "modern", "current"],
|
| 67 |
+
r'\bstate-of-the-art\b': ["advanced", "modern", "sophisticated", "current"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
}
|
| 69 |
|
| 70 |
+
# Transition phrase variations
|
| 71 |
+
self.transition_replacements = {
|
| 72 |
+
r'\bfurthermore\b': ["additionally", "moreover", "in addition", "also"],
|
| 73 |
+
r'\bmoreover\b': ["furthermore", "additionally", "also", "in addition"],
|
| 74 |
+
r'\bhowever\b': ["nevertheless", "yet", "still", "although"],
|
| 75 |
+
r'\bnevertheless\b': ["however", "yet", "still", "nonetheless"],
|
| 76 |
+
r'\btherefore\b': ["consequently", "thus", "as a result", "hence"],
|
| 77 |
+
r'\bconsequently\b': ["therefore", "thus", "as a result", "accordingly"],
|
| 78 |
+
r'\bin conclusion\b': ["finally", "ultimately", "in summary", "to summarize"],
|
| 79 |
+
r'\bto summarize\b': ["in conclusion", "finally", "in summary", "overall"],
|
| 80 |
+
r'\bin summary\b': ["to conclude", "overall", "finally", "in essence"]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
}
|
| 82 |
|
| 83 |
+
# Sentence structure patterns for variation
|
| 84 |
+
self.sentence_starters = [
|
| 85 |
+
"Additionally,", "Furthermore,", "In particular,", "Notably,",
|
| 86 |
+
"Importantly,", "Significantly,", "Moreover,", "Consequently,"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
]
|
| 88 |
|
| 89 |
+
# Professional contractions (limited and contextual)
|
| 90 |
+
self.professional_contractions = {
|
| 91 |
+
r'\bit is\b': "it's",
|
| 92 |
+
r'\bthere is\b': "there's",
|
| 93 |
+
r'\bthat is\b': "that's",
|
| 94 |
+
r'\bcannot\b': "can't",
|
| 95 |
+
r'\bdo not\b': "don't",
|
| 96 |
+
r'\bdoes not\b': "doesn't",
|
| 97 |
+
r'\bwill not\b': "won't",
|
| 98 |
+
r'\bwould not\b': "wouldn't"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
}
|
| 100 |
|
| 101 |
+
def load_synonym_database(self):
|
| 102 |
+
"""Load and prepare synonym database using WordNet"""
|
| 103 |
+
try:
|
| 104 |
+
# Test WordNet availability
|
| 105 |
+
wordnet.synsets('test')
|
| 106 |
+
self.wordnet_available = True
|
| 107 |
+
except:
|
| 108 |
+
self.wordnet_available = False
|
| 109 |
+
print("WordNet not available, using limited synonym replacement")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
def get_contextual_synonym(self, word, pos_tag=None):
|
| 112 |
+
"""Get contextually appropriate synonym using WordNet"""
|
| 113 |
+
if not self.wordnet_available:
|
| 114 |
+
return word
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
+
try:
|
| 117 |
+
# Get synsets for the word
|
| 118 |
+
synsets = wordnet.synsets(word.lower())
|
| 119 |
+
if not synsets:
|
| 120 |
+
return word
|
| 121 |
|
| 122 |
+
# Get synonyms from the first synset
|
| 123 |
+
synonyms = []
|
| 124 |
+
for synset in synsets[:2]: # Check first 2 synsets
|
| 125 |
+
for lemma in synset.lemmas():
|
| 126 |
+
synonym = lemma.name().replace('_', ' ')
|
| 127 |
+
if synonym != word.lower() and len(synonym) > 2:
|
| 128 |
+
synonyms.append(synonym)
|
| 129 |
+
|
| 130 |
+
if synonyms:
|
| 131 |
+
# Return a synonym that's similar in length to avoid dramatic changes
|
| 132 |
+
suitable_synonyms = [s for s in synonyms if abs(len(s) - len(word)) <= 3]
|
| 133 |
+
if suitable_synonyms:
|
| 134 |
+
return random.choice(suitable_synonyms)
|
| 135 |
+
else:
|
| 136 |
+
return random.choice(synonyms)
|
| 137 |
+
|
| 138 |
+
return word
|
| 139 |
|
| 140 |
+
except:
|
| 141 |
+
return word
|
| 142 |
+
|
| 143 |
+
def preserve_meaning_replacement(self, text):
|
| 144 |
+
"""Replace AI-flagged terms while preserving exact meaning"""
|
| 145 |
+
result = text
|
| 146 |
+
|
| 147 |
+
# Apply formal term replacements
|
| 148 |
+
for pattern, replacements in self.formal_replacements.items():
|
| 149 |
+
if re.search(pattern, result, re.IGNORECASE):
|
| 150 |
+
replacement = random.choice(replacements)
|
| 151 |
+
result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
|
| 152 |
|
| 153 |
+
# Apply transition phrase replacements
|
| 154 |
+
for pattern, replacements in self.transition_replacements.items():
|
| 155 |
+
if re.search(pattern, result, re.IGNORECASE):
|
| 156 |
+
replacement = random.choice(replacements)
|
| 157 |
+
result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
|
| 158 |
+
|
| 159 |
+
return result
|
| 160 |
|
| 161 |
+
def vary_sentence_structure(self, text):
|
| 162 |
+
"""Vary sentence structures while maintaining meaning"""
|
| 163 |
sentences = sent_tokenize(text)
|
| 164 |
+
varied_sentences = []
|
| 165 |
|
| 166 |
+
for i, sentence in enumerate(sentences):
|
| 167 |
+
# Occasionally add transitional phrases at the beginning
|
| 168 |
+
if i > 0 and len(sentence.split()) > 6 and random.random() < 0.15:
|
| 169 |
+
starter = random.choice(self.sentence_starters)
|
| 170 |
+
sentence = sentence[0].lower() + sentence[1:]
|
| 171 |
+
sentence = f"{starter} {sentence}"
|
|
|
|
| 172 |
|
| 173 |
+
# Convert some passive to active voice and vice versa
|
| 174 |
+
sentence = self.vary_voice(sentence)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
+
# Restructure complex sentences occasionally
|
| 177 |
+
if len(sentence.split()) > 15 and random.random() < 0.2:
|
| 178 |
+
sentence = self.restructure_complex_sentence(sentence)
|
|
|
|
| 179 |
|
| 180 |
+
varied_sentences.append(sentence)
|
| 181 |
+
|
| 182 |
+
return " ".join(varied_sentences)
|
| 183 |
|
| 184 |
+
def vary_voice(self, sentence):
|
| 185 |
+
"""Convert between active and passive voice occasionally"""
|
| 186 |
+
# Simple passive to active conversion patterns
|
| 187 |
+
passive_patterns = [
|
| 188 |
+
(r'(\w+) (?:is|are|was|were) (\w+ed|known|seen|used|made) by (.+)',
|
| 189 |
+
r'\3 \2 \1'),
|
| 190 |
+
(r'(\w+) (?:is|are|was|were) (\w+ed|known|seen|used|made)',
|
| 191 |
+
r'Someone \2 \1')
|
| 192 |
+
]
|
| 193 |
|
| 194 |
+
for pattern, replacement in passive_patterns:
|
| 195 |
+
if re.search(pattern, sentence) and random.random() < 0.1:
|
| 196 |
+
sentence = re.sub(pattern, replacement, sentence)
|
| 197 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
+
return sentence
|
| 200 |
+
|
| 201 |
+
def restructure_complex_sentence(self, sentence):
|
| 202 |
+
"""Restructure overly complex sentences"""
|
| 203 |
+
# Split long sentences at natural break points
|
| 204 |
+
if ',' in sentence and len(sentence.split()) > 15:
|
| 205 |
+
parts = sentence.split(',', 1)
|
| 206 |
+
if len(parts) == 2:
|
| 207 |
+
first_part = parts[0].strip()
|
| 208 |
+
second_part = parts[1].strip()
|
| 209 |
|
| 210 |
+
# Rejoin with different structure
|
| 211 |
+
connectors = ["Additionally", "Furthermore", "Moreover", "Also"]
|
| 212 |
connector = random.choice(connectors)
|
| 213 |
+
return f"{first_part}. {connector}, {second_part}"
|
| 214 |
+
|
| 215 |
+
return sentence
|
| 216 |
+
|
| 217 |
+
def apply_subtle_contractions(self, text):
|
| 218 |
+
"""Apply professional contractions sparingly"""
|
| 219 |
+
for pattern, contraction in self.professional_contractions.items():
|
| 220 |
+
# Only apply contractions 30% of the time to maintain variation
|
| 221 |
+
if re.search(pattern, text, re.IGNORECASE) and random.random() < 0.3:
|
| 222 |
+
text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
|
| 223 |
+
|
| 224 |
+
return text
|
| 225 |
+
|
| 226 |
+
def enhance_vocabulary_diversity(self, text):
|
| 227 |
+
"""Enhance vocabulary diversity using contextual synonyms"""
|
| 228 |
+
words = word_tokenize(text)
|
| 229 |
+
enhanced_words = []
|
| 230 |
+
word_frequency = defaultdict(int)
|
| 231 |
+
|
| 232 |
+
# Track word frequency to identify repetitive words
|
| 233 |
+
for word in words:
|
| 234 |
+
if word.isalpha() and len(word) > 4:
|
| 235 |
+
word_frequency[word.lower()] += 1
|
| 236 |
+
|
| 237 |
+
for word in words:
|
| 238 |
+
if (word.isalpha() and len(word) > 4 and
|
| 239 |
+
word_frequency[word.lower()] > 1 and
|
| 240 |
+
random.random() < 0.2):
|
| 241 |
+
|
| 242 |
+
synonym = self.get_contextual_synonym(word)
|
| 243 |
+
enhanced_words.append(synonym)
|
| 244 |
else:
|
| 245 |
+
enhanced_words.append(word)
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
+
return ' '.join(enhanced_words)
|
|
|
|
|
|
|
| 248 |
|
| 249 |
+
def add_natural_variation(self, text):
|
| 250 |
+
"""Add natural human-like variations"""
|
| 251 |
sentences = sent_tokenize(text)
|
| 252 |
+
varied_sentences = []
|
| 253 |
|
| 254 |
for sentence in sentences:
|
| 255 |
+
# Occasionally vary sentence length and structure
|
| 256 |
+
if len(sentence.split()) > 20 and random.random() < 0.15:
|
| 257 |
+
# Split very long sentences
|
| 258 |
+
mid_point = len(sentence.split()) // 2
|
| 259 |
+
words = sentence.split()
|
| 260 |
+
|
| 261 |
+
# Find natural break point near middle
|
| 262 |
+
for i in range(mid_point - 2, mid_point + 3):
|
| 263 |
+
if i < len(words) and words[i] in [',', 'and', 'but', 'or', 'because']:
|
| 264 |
+
first_part = ' '.join(words[:i])
|
| 265 |
+
second_part = ' '.join(words[i+1:])
|
| 266 |
+
sentence = f"{first_part}. {second_part.capitalize()}"
|
| 267 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
|
| 269 |
+
# Add subtle emphasis occasionally
|
| 270 |
+
if random.random() < 0.05:
|
| 271 |
+
sentence = self.add_subtle_emphasis(sentence)
|
|
|
|
| 272 |
|
| 273 |
+
varied_sentences.append(sentence)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
+
return " ".join(varied_sentences)
|
| 276 |
+
|
| 277 |
+
def add_subtle_emphasis(self, sentence):
|
| 278 |
+
"""Add very subtle emphasis that doesn't change meaning"""
|
| 279 |
+
emphasis_patterns = [
|
| 280 |
+
(r'\bvery important\b', "crucial"),
|
| 281 |
+
(r'\bvery significant\b', "highly significant"),
|
| 282 |
+
(r'\bvery effective\b', "highly effective"),
|
| 283 |
+
(r'\bvery useful\b', "particularly useful")
|
| 284 |
+
]
|
| 285 |
|
| 286 |
+
for pattern, replacement in emphasis_patterns:
|
| 287 |
+
if re.search(pattern, sentence, re.IGNORECASE):
|
| 288 |
+
sentence = re.sub(pattern, replacement, sentence, flags=re.IGNORECASE)
|
| 289 |
+
break
|
| 290 |
+
|
| 291 |
+
return sentence
|
| 292 |
|
| 293 |
+
def final_coherence_check(self, text):
|
| 294 |
+
"""Final check to ensure coherence and proper formatting"""
|
| 295 |
+
# Fix spacing issues
|
| 296 |
text = re.sub(r'\s+', ' ', text)
|
| 297 |
+
text = re.sub(r'\s+([,.!?;:])', r'\1', text)
|
| 298 |
+
text = re.sub(r'([,.!?;:])\s*([A-Z])', r'\1 \2', text)
|
| 299 |
|
| 300 |
+
# Ensure proper capitalization
|
| 301 |
+
sentences = sent_tokenize(text)
|
| 302 |
+
corrected_sentences = []
|
| 303 |
|
| 304 |
+
for sentence in sentences:
|
| 305 |
+
if sentence and sentence[0].islower():
|
| 306 |
+
sentence = sentence[0].upper() + sentence[1:]
|
| 307 |
+
corrected_sentences.append(sentence)
|
| 308 |
+
|
| 309 |
+
text = " ".join(corrected_sentences)
|
| 310 |
|
| 311 |
+
# Remove any double periods or spaces
|
| 312 |
+
text = re.sub(r'\.+', '.', text)
|
| 313 |
+
text = re.sub(r'\s+', ' ', text)
|
| 314 |
|
| 315 |
return text.strip()
|
| 316 |
|
| 317 |
+
def advanced_humanize(self, text, passes=2):
|
| 318 |
+
"""Apply sophisticated humanization that preserves meaning"""
|
| 319 |
current_text = text
|
| 320 |
|
| 321 |
for pass_num in range(passes):
|
| 322 |
+
print(f"Processing pass {pass_num + 1}/{passes}")
|
| 323 |
|
| 324 |
+
# Apply humanization techniques
|
| 325 |
+
current_text = self.preserve_meaning_replacement(current_text)
|
| 326 |
+
current_text = self.vary_sentence_structure(current_text)
|
| 327 |
+
current_text = self.enhance_vocabulary_diversity(current_text)
|
| 328 |
+
current_text = self.apply_subtle_contractions(current_text)
|
| 329 |
+
current_text = self.add_natural_variation(current_text)
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
+
# Final coherence and cleanup
|
| 332 |
+
current_text = self.final_coherence_check(current_text)
|
|
|
|
|
|
|
|
|
|
| 333 |
|
| 334 |
+
return current_text
|
| 335 |
|
| 336 |
def get_readability_score(self, text):
|
| 337 |
"""Calculate readability score"""
|
|
|
|
| 346 |
except Exception as e:
|
| 347 |
return f"Could not calculate readability: {str(e)}"
|
| 348 |
|
| 349 |
+
def humanize_text(self, text, intensity="professional"):
|
| 350 |
+
"""Main humanization method with meaning preservation"""
|
| 351 |
if not text or not text.strip():
|
| 352 |
return "Please provide text to humanize."
|
| 353 |
|
|
|
|
| 362 |
except Exception as nltk_error:
|
| 363 |
return f"NLTK Error: {str(nltk_error)}. Please try again."
|
| 364 |
|
| 365 |
+
# Apply appropriate level of humanization
|
| 366 |
+
if intensity == "professional":
|
| 367 |
+
result = self.advanced_humanize(text, passes=2)
|
| 368 |
+
elif intensity == "enhanced":
|
| 369 |
+
result = self.advanced_humanize(text, passes=3)
|
| 370 |
+
else: # light
|
| 371 |
+
result = self.advanced_humanize(text, passes=1)
|
| 372 |
|
| 373 |
return result
|
| 374 |
|
|
|
|
| 376 |
return f"Error processing text: {str(e)}"
|
| 377 |
|
| 378 |
def create_interface():
|
| 379 |
+
"""Create the professional Gradio interface"""
|
| 380 |
humanizer = AdvancedAIHumanizer()
|
| 381 |
|
| 382 |
def process_text(input_text, intensity):
|
|
|
|
| 389 |
except Exception as e:
|
| 390 |
return f"Error: {str(e)}", "Processing error"
|
| 391 |
|
| 392 |
+
# Professional CSS styling
|
| 393 |
+
professional_css = """
|
| 394 |
+
.gradio-container {
|
| 395 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 396 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
.main-header {
|
| 398 |
text-align: center;
|
| 399 |
+
color: #2c3e50;
|
| 400 |
+
font-size: 2.2em;
|
| 401 |
+
font-weight: 600;
|
|
|
|
|
|
|
| 402 |
margin-bottom: 20px;
|
| 403 |
+
padding: 20px;
|
| 404 |
+
border-bottom: 2px solid #3498db;
|
| 405 |
}
|
|
|
|
| 406 |
.feature-box {
|
| 407 |
+
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
|
| 408 |
+
border-radius: 8px;
|
| 409 |
padding: 20px;
|
| 410 |
+
margin: 15px 0;
|
| 411 |
+
border-left: 4px solid #3498db;
|
| 412 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
| 413 |
}
|
| 414 |
+
.info-box {
|
| 415 |
+
background: #e8f5e8;
|
| 416 |
+
border-radius: 8px;
|
|
|
|
|
|
|
| 417 |
padding: 15px;
|
| 418 |
margin: 10px 0;
|
| 419 |
+
border-left: 4px solid #27ae60;
|
| 420 |
}
|
| 421 |
"""
|
| 422 |
|
| 423 |
with gr.Blocks(
|
| 424 |
+
title="Professional AI Humanizer",
|
| 425 |
theme=gr.themes.Soft(),
|
| 426 |
+
css=professional_css
|
|
|
|
| 427 |
) as interface:
|
| 428 |
|
| 429 |
gr.HTML("""
|
| 430 |
<div class="main-header">
|
| 431 |
+
π― Professional AI Content Humanizer
|
| 432 |
</div>
|
| 433 |
<div style="text-align: center; margin-bottom: 30px;">
|
| 434 |
+
<h3>Meaning-Preserving AI Detection Bypass</h3>
|
| 435 |
+
<p style="font-size: 1.1em; color: #7f8c8d;">
|
| 436 |
+
Advanced humanization while maintaining professional tone and original meaning
|
|
|
|
| 437 |
</p>
|
| 438 |
</div>
|
| 439 |
""")
|
| 440 |
|
| 441 |
with gr.Row():
|
| 442 |
+
with gr.Column(scale=1):
|
| 443 |
input_text = gr.Textbox(
|
| 444 |
+
label="π Original Content",
|
| 445 |
+
lines=12,
|
| 446 |
+
placeholder="Enter your AI-generated content here...\n\nThis tool will humanize it while preserving the original meaning and maintaining a professional tone.",
|
| 447 |
+
info="π‘ Best results with content 100+ words",
|
|
|
|
| 448 |
show_copy_button=True
|
| 449 |
)
|
| 450 |
|
| 451 |
intensity = gr.Radio(
|
| 452 |
choices=[
|
| 453 |
+
("Light Processing", "light"),
|
| 454 |
+
("Professional Enhancement", "professional"),
|
| 455 |
+
("Advanced Humanization", "enhanced")
|
| 456 |
],
|
| 457 |
+
value="professional",
|
| 458 |
+
label="π§ Processing Level",
|
| 459 |
+
info="Professional mode recommended for most content"
|
| 460 |
)
|
| 461 |
|
| 462 |
btn = gr.Button(
|
| 463 |
+
"π Humanize Content",
|
| 464 |
variant="primary",
|
| 465 |
+
size="lg"
|
|
|
|
| 466 |
)
|
| 467 |
|
| 468 |
+
with gr.Column(scale=1):
|
| 469 |
output_text = gr.Textbox(
|
| 470 |
+
label="β
Humanized Content",
|
| 471 |
+
lines=12,
|
| 472 |
show_copy_button=True,
|
| 473 |
+
info="Processed content ready for use"
|
|
|
|
| 474 |
)
|
| 475 |
|
| 476 |
readability = gr.Textbox(
|
| 477 |
+
label="π Content Analysis",
|
| 478 |
+
lines=3,
|
| 479 |
+
info="Readability metrics"
|
| 480 |
)
|
| 481 |
|
| 482 |
gr.HTML("""
|
| 483 |
<div class="feature-box">
|
| 484 |
+
<h3>π― Advanced Humanization Features:</h3>
|
| 485 |
+
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 15px; margin: 15px 0;">
|
| 486 |
+
<div class="info-box">
|
| 487 |
+
<strong>π Meaning Preservation:</strong><br>
|
| 488 |
+
Maintains exact original meaning and intent
|
| 489 |
</div>
|
| 490 |
+
<div class="info-box">
|
| 491 |
+
<strong>π Professional Tone:</strong><br>
|
| 492 |
+
Keeps appropriate formality level
|
| 493 |
</div>
|
| 494 |
+
<div class="info-box">
|
| 495 |
+
<strong>π Structure Variation:</strong><br>
|
| 496 |
+
Natural sentence pattern diversity
|
| 497 |
</div>
|
| 498 |
+
<div class="info-box">
|
| 499 |
+
<strong>π Smart Synonyms:</strong><br>
|
| 500 |
+
Context-aware vocabulary enhancement
|
| 501 |
</div>
|
| 502 |
+
<div class="info-box">
|
| 503 |
+
<strong>π Coherent Flow:</strong><br>
|
| 504 |
+
Maintains logical progression
|
| 505 |
</div>
|
| 506 |
+
<div class="info-box">
|
| 507 |
+
<strong>β‘ Detection Bypass:</strong><br>
|
| 508 |
+
Passes modern AI detection tools
|
| 509 |
</div>
|
| 510 |
</div>
|
| 511 |
</div>
|
| 512 |
""")
|
| 513 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 514 |
# Event handlers
|
| 515 |
btn.click(
|
| 516 |
fn=process_text,
|
|
|
|
| 527 |
return interface
|
| 528 |
|
| 529 |
if __name__ == "__main__":
|
| 530 |
+
print("π Starting Professional AI Humanizer...")
|
| 531 |
app = create_interface()
|
| 532 |
app.launch(
|
| 533 |
server_name="0.0.0.0",
|