DZRobo
commited on
Commit
·
e42ce45
1
Parent(s):
42c372b
Add CWN and AGC guidance options to CADE25 nodes
Browse filesIntroduces Conditioning Weight Normalization (CWN) and Adaptive Guidance Clipping (AGC) to both easy and hard CADE25 modules, with configurable parameters exposed in the code and presets. Updates mg_cade25.cfg to include default values for these new options and adjusts some scheduler and denoise settings for improved stability and control.
- mod/easy/mg_cade25_easy.py +56 -4
- mod/hard/mg_cade25.py +49 -6
- pressets/mg_cade25.cfg +32 -4
mod/easy/mg_cade25_easy.py
CHANGED
|
@@ -1322,7 +1322,9 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1322 |
mahiro_plus_enable: bool = False, mahiro_plus_strength: float = 0.5,
|
| 1323 |
eps_scale_enable: bool = False, eps_scale: float = 0.0,
|
| 1324 |
cfg_sched_type: str = "off", cfg_sched_min: float = 0.0, cfg_sched_max: float = 0.0,
|
| 1325 |
-
cfg_sched_gamma: float = 1.5, cfg_sched_u_pow: float = 1.0
|
|
|
|
|
|
|
| 1326 |
|
| 1327 |
"""Clone model and attach a cfg mixing function implementing RescaleCFG/FDG, CFGZero*/FD, or hybrid ZeResFDG.
|
| 1328 |
guidance_mode: 'default' | 'RescaleCFG' | 'RescaleFDG' | 'CFGZero*' | 'CFGZeroFD' | 'ZeResFDG'
|
|
@@ -1494,6 +1496,17 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1494 |
return cond * 0.0
|
| 1495 |
except Exception:
|
| 1496 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1497 |
# Project cond onto uncond subspace (batch-wise alpha)
|
| 1498 |
bsz = cond.shape[0]
|
| 1499 |
pos_flat = cond.view(bsz, -1)
|
|
@@ -1515,6 +1528,14 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1515 |
lg = _local_gain_for((cond.shape[-2], cond.shape[-1]))
|
| 1516 |
if lg is not None:
|
| 1517 |
resid = resid * lg.expand(-1, resid.shape[1], -1, -1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1518 |
noise_pred = uncond * alpha + cond_scale_eff * resid
|
| 1519 |
return noise_pred
|
| 1520 |
|
|
@@ -1536,6 +1557,14 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1536 |
cond = uncond + delta
|
| 1537 |
else:
|
| 1538 |
prev_delta["t"] = delta.detach()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1539 |
# After momentum: optionally apply FDG and rebuild cond
|
| 1540 |
if mode == "RescaleFDG":
|
| 1541 |
# Adaptive low gain if enabled
|
|
@@ -1595,8 +1624,22 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1595 |
x = x_orig / (sigma_ * sigma_ + 1.0)
|
| 1596 |
v_cond = ((x - (x_orig - cond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1597 |
v_uncond = ((x - (x_orig - uncond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1598 |
-
|
| 1599 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1600 |
ro_cfg = torch.std(v_cfg, dim=(1, 2, 3), keepdim=True).clamp_min(1e-6)
|
| 1601 |
v_rescaled = v_cfg * (ro_pos / ro_cfg)
|
| 1602 |
v_final = float(rescale_multiplier) * v_rescaled + (1.0 - float(rescale_multiplier)) * v_cfg
|
|
@@ -2198,6 +2241,13 @@ class ComfyAdaptiveDetailEnhancer25:
|
|
| 2198 |
cfg_sched_max = float(pv("cfg_sched_max", cfg))
|
| 2199 |
cfg_sched_gamma = float(pv("cfg_sched_gamma", 1.5))
|
| 2200 |
cfg_sched_u_pow = float(pv("cfg_sched_u_pow", 1.0))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2201 |
# Latent buffer (internal-only; configured via presets)
|
| 2202 |
latent_buffer = bool(pv("latent_buffer", True))
|
| 2203 |
lb_inject = float(pv("lb_inject", 0.25))
|
|
@@ -2450,7 +2500,9 @@ class ComfyAdaptiveDetailEnhancer25:
|
|
| 2450 |
mahiro_plus_enable=bool(muse_blend), mahiro_plus_strength=float(muse_blend_strength),
|
| 2451 |
eps_scale_enable=bool(eps_scale_enable), eps_scale=float(eps_scale),
|
| 2452 |
cfg_sched_type=str(cfg_sched), cfg_sched_min=float(cfg_sched_min), cfg_sched_max=float(cfg_sched_max),
|
| 2453 |
-
cfg_sched_gamma=float(cfg_sched_gamma), cfg_sched_u_pow=float(cfg_sched_u_pow)
|
|
|
|
|
|
|
| 2454 |
)
|
| 2455 |
# check once more right before the loop starts
|
| 2456 |
model_management.throw_exception_if_processing_interrupted()
|
|
|
|
| 1322 |
mahiro_plus_enable: bool = False, mahiro_plus_strength: float = 0.5,
|
| 1323 |
eps_scale_enable: bool = False, eps_scale: float = 0.0,
|
| 1324 |
cfg_sched_type: str = "off", cfg_sched_min: float = 0.0, cfg_sched_max: float = 0.0,
|
| 1325 |
+
cfg_sched_gamma: float = 1.5, cfg_sched_u_pow: float = 1.0,
|
| 1326 |
+
cwn_enable: bool = True, alpha_c: float = 1.0, alpha_u: float = 1.0,
|
| 1327 |
+
agc_enable: bool = True, agc_tau: float = 2.8):
|
| 1328 |
|
| 1329 |
"""Clone model and attach a cfg mixing function implementing RescaleCFG/FDG, CFGZero*/FD, or hybrid ZeResFDG.
|
| 1330 |
guidance_mode: 'default' | 'RescaleCFG' | 'RescaleFDG' | 'CFGZero*' | 'CFGZeroFD' | 'ZeResFDG'
|
|
|
|
| 1496 |
return cond * 0.0
|
| 1497 |
except Exception:
|
| 1498 |
pass
|
| 1499 |
+
# CWN for CFGZero branches: energy align cond/uncond before projection
|
| 1500 |
+
if bool(cwn_enable):
|
| 1501 |
+
try:
|
| 1502 |
+
_eps = 1e-6
|
| 1503 |
+
sc = (cond.pow(2).mean(dim=(1, 2, 3), keepdim=True).sqrt() + _eps)
|
| 1504 |
+
su = (uncond.pow(2).mean(dim=(1, 2, 3), keepdim=True).sqrt() + _eps)
|
| 1505 |
+
g = 0.5 * (sc + su)
|
| 1506 |
+
cond = cond * (float(alpha_c) * g / sc)
|
| 1507 |
+
uncond = uncond * (float(alpha_u) * g / su)
|
| 1508 |
+
except Exception:
|
| 1509 |
+
pass
|
| 1510 |
# Project cond onto uncond subspace (batch-wise alpha)
|
| 1511 |
bsz = cond.shape[0]
|
| 1512 |
pos_flat = cond.view(bsz, -1)
|
|
|
|
| 1528 |
lg = _local_gain_for((cond.shape[-2], cond.shape[-1]))
|
| 1529 |
if lg is not None:
|
| 1530 |
resid = resid * lg.expand(-1, resid.shape[1], -1, -1)
|
| 1531 |
+
# --- AGC for CFGZero branches ---
|
| 1532 |
+
if bool(agc_enable):
|
| 1533 |
+
try:
|
| 1534 |
+
t = float(max(0.5, agc_tau))
|
| 1535 |
+
resid = t * torch.tanh(resid / t)
|
| 1536 |
+
except Exception:
|
| 1537 |
+
pass
|
| 1538 |
+
|
| 1539 |
noise_pred = uncond * alpha + cond_scale_eff * resid
|
| 1540 |
return noise_pred
|
| 1541 |
|
|
|
|
| 1557 |
cond = uncond + delta
|
| 1558 |
else:
|
| 1559 |
prev_delta["t"] = delta.detach()
|
| 1560 |
+
# --- Adaptive Guidance Clipping (AGC) ---
|
| 1561 |
+
if bool(agc_enable):
|
| 1562 |
+
try:
|
| 1563 |
+
t = float(max(0.5, agc_tau))
|
| 1564 |
+
delta = t * torch.tanh(delta / t)
|
| 1565 |
+
except Exception:
|
| 1566 |
+
pass
|
| 1567 |
+
|
| 1568 |
# After momentum: optionally apply FDG and rebuild cond
|
| 1569 |
if mode == "RescaleFDG":
|
| 1570 |
# Adaptive low gain if enabled
|
|
|
|
| 1624 |
x = x_orig / (sigma_ * sigma_ + 1.0)
|
| 1625 |
v_cond = ((x - (x_orig - cond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1626 |
v_uncond = ((x - (x_orig - uncond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1627 |
+
|
| 1628 |
+
# CWN in v-space (more stable than eps-space)
|
| 1629 |
+
if bool(cwn_enable):
|
| 1630 |
+
try:
|
| 1631 |
+
_eps = 1e-6
|
| 1632 |
+
rc = (v_cond.pow(2).mean(dim=(1,2,3), keepdim=True).sqrt() + _eps)
|
| 1633 |
+
ru = (v_uncond.pow(2).mean(dim=(1,2,3), keepdim=True).sqrt() + _eps)
|
| 1634 |
+
v_cond_n = (v_cond / rc) * float(alpha_c)
|
| 1635 |
+
v_uncond_n = (v_uncond / ru) * float(alpha_u)
|
| 1636 |
+
except Exception:
|
| 1637 |
+
v_cond_n, v_uncond_n = v_cond, v_uncond
|
| 1638 |
+
else:
|
| 1639 |
+
v_cond_n, v_uncond_n = v_cond, v_uncond
|
| 1640 |
+
|
| 1641 |
+
v_cfg = v_uncond_n + cond_scale_eff * (v_cond_n - v_uncond_n)
|
| 1642 |
+
ro_pos = torch.std(v_cond_n, dim=(1, 2, 3), keepdim=True)
|
| 1643 |
ro_cfg = torch.std(v_cfg, dim=(1, 2, 3), keepdim=True).clamp_min(1e-6)
|
| 1644 |
v_rescaled = v_cfg * (ro_pos / ro_cfg)
|
| 1645 |
v_final = float(rescale_multiplier) * v_rescaled + (1.0 - float(rescale_multiplier)) * v_cfg
|
|
|
|
| 2241 |
cfg_sched_max = float(pv("cfg_sched_max", cfg))
|
| 2242 |
cfg_sched_gamma = float(pv("cfg_sched_gamma", 1.5))
|
| 2243 |
cfg_sched_u_pow = float(pv("cfg_sched_u_pow", 1.0))
|
| 2244 |
+
|
| 2245 |
+
# CWN + AGC defaults (hidden in Easy; controlled via presets)
|
| 2246 |
+
cwn_enable = bool(pv("cwn_enable", True))
|
| 2247 |
+
alpha_c = float(pv("alpha_c", 1.0))
|
| 2248 |
+
alpha_u = float(pv("alpha_u", 1.0))
|
| 2249 |
+
agc_enable = bool(pv("agc_enable", True))
|
| 2250 |
+
agc_tau = float(pv("agc_tau", 2.8))
|
| 2251 |
# Latent buffer (internal-only; configured via presets)
|
| 2252 |
latent_buffer = bool(pv("latent_buffer", True))
|
| 2253 |
lb_inject = float(pv("lb_inject", 0.25))
|
|
|
|
| 2500 |
mahiro_plus_enable=bool(muse_blend), mahiro_plus_strength=float(muse_blend_strength),
|
| 2501 |
eps_scale_enable=bool(eps_scale_enable), eps_scale=float(eps_scale),
|
| 2502 |
cfg_sched_type=str(cfg_sched), cfg_sched_min=float(cfg_sched_min), cfg_sched_max=float(cfg_sched_max),
|
| 2503 |
+
cfg_sched_gamma=float(cfg_sched_gamma), cfg_sched_u_pow=float(cfg_sched_u_pow),
|
| 2504 |
+
cwn_enable=bool(cwn_enable), alpha_c=float(alpha_c), alpha_u=float(alpha_u),
|
| 2505 |
+
agc_enable=bool(agc_enable), agc_tau=float(agc_tau)
|
| 2506 |
)
|
| 2507 |
# check once more right before the loop starts
|
| 2508 |
model_management.throw_exception_if_processing_interrupted()
|
mod/hard/mg_cade25.py
CHANGED
|
@@ -968,7 +968,10 @@ def _fdg_energy_fraction(delta: torch.Tensor, sigma: float = 1.0, radius: int =
|
|
| 968 |
def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: float, momentum_beta: float, cfg_curve: float, perp_damp: float, use_zero_init: bool=False, zero_init_steps: int=0, fdg_low: float = 0.6, fdg_high: float = 1.3, fdg_sigma: float = 1.0, ze_zero_steps: int = 0, ze_adaptive: bool = False, ze_r_switch_hi: float = 0.6, ze_r_switch_lo: float = 0.45, fdg_low_adaptive: bool = False, fdg_low_min: float = 0.45, fdg_low_max: float = 0.7, fdg_ema_beta: float = 0.8, use_local_mask: bool = False, mask_inside: float = 1.0, mask_outside: float = 1.0,
|
| 969 |
midfreq_enable: bool = False, midfreq_gain: float = 0.0, midfreq_sigma_lo: float = 0.8, midfreq_sigma_hi: float = 2.0,
|
| 970 |
mahiro_plus_enable: bool = False, mahiro_plus_strength: float = 0.5,
|
| 971 |
-
eps_scale_enable: bool = False, eps_scale: float = 0.0
|
|
|
|
|
|
|
|
|
|
| 972 |
|
| 973 |
"""Clone model and attach a cfg mixing function implementing RescaleCFG/FDG, CFGZero*/FD, or hybrid ZeResFDG.
|
| 974 |
guidance_mode: 'default' | 'RescaleCFG' | 'RescaleFDG' | 'CFGZero*' | 'CFGZeroFD' | 'ZeResFDG'
|
|
@@ -1073,6 +1076,17 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1073 |
return cond * 0.0
|
| 1074 |
except Exception:
|
| 1075 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1076 |
# Project cond onto uncond subspace (batch-wise alpha)
|
| 1077 |
bsz = cond.shape[0]
|
| 1078 |
pos_flat = cond.view(bsz, -1)
|
|
@@ -1115,6 +1129,13 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1115 |
cond = uncond + delta
|
| 1116 |
else:
|
| 1117 |
prev_delta["t"] = delta.detach()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1118 |
# After momentum: optionally apply FDG and rebuild cond
|
| 1119 |
if mode == "RescaleFDG":
|
| 1120 |
# Adaptive low gain if enabled
|
|
@@ -1197,8 +1218,20 @@ def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: flo
|
|
| 1197 |
x = x_orig / (sigma_ * sigma_ + 1.0)
|
| 1198 |
v_cond = ((x - (x_orig - cond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1199 |
v_uncond = ((x - (x_orig - uncond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1200 |
-
|
| 1201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1202 |
ro_cfg = torch.std(v_cfg, dim=(1, 2, 3), keepdim=True).clamp_min(1e-6)
|
| 1203 |
v_rescaled = v_cfg * (ro_pos / ro_cfg)
|
| 1204 |
v_final = float(rescale_multiplier) * v_rescaled + (1.0 - float(rescale_multiplier)) * v_cfg
|
|
@@ -1465,6 +1498,13 @@ class ComfyAdaptiveDetailEnhancer25:
|
|
| 1465 |
"cfg_curve": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "S-curve shaping of cond_scale across steps (0=flat)."}),
|
| 1466 |
"perp_damp": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Remove a small portion of the component parallel to previous delta (0-1)."}),
|
| 1467 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1468 |
# NAG (Normalized Attention Guidance) toggles
|
| 1469 |
"use_nag": ("BOOLEAN", {"default": False, "tooltip": "Apply NAG inside CrossAttention (positive branch) during this node."}),
|
| 1470 |
"nag_scale": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 50.0, "step": 0.1}),
|
|
@@ -1555,8 +1595,9 @@ class ComfyAdaptiveDetailEnhancer25:
|
|
| 1555 |
Sharpnes_strenght=0.300, threshold=0.03, latent_compare=False, accumulation="default",
|
| 1556 |
reference_clean=False, reference_image=None, clip_vision=None, ref_preview=224, ref_threshold=0.03, ref_cooldown=1,
|
| 1557 |
guidance_mode="RescaleCFG", rescale_multiplier=0.7, momentum_beta=0.0, cfg_curve=0.0, perp_damp=0.0,
|
| 1558 |
-
|
| 1559 |
-
|
|
|
|
| 1560 |
use_zero_init=False, zero_init_steps=0,
|
| 1561 |
fdg_low=0.6, fdg_high=1.3, fdg_sigma=1.0, ze_res_zero_steps=2,
|
| 1562 |
ze_adaptive=False, ze_r_switch_hi=0.60, ze_r_switch_lo=0.45,
|
|
@@ -1719,7 +1760,9 @@ class ComfyAdaptiveDetailEnhancer25:
|
|
| 1719 |
fdg_low_adaptive=bool(fdg_low_adaptive), fdg_low_min=float(fdg_low_min), fdg_low_max=float(fdg_low_max), fdg_ema_beta=float(fdg_ema_beta),
|
| 1720 |
use_local_mask=False, mask_inside=1.0, mask_outside=1.0,
|
| 1721 |
mahiro_plus_enable=bool(muse_blend), mahiro_plus_strength=float(muse_blend_strength),
|
| 1722 |
-
eps_scale_enable=bool(eps_scale_enable), eps_scale=float(eps_scale)
|
|
|
|
|
|
|
| 1723 |
)
|
| 1724 |
# early interruption check before starting the loop
|
| 1725 |
try:
|
|
|
|
| 968 |
def _wrap_model_with_guidance(model, guidance_mode: str, rescale_multiplier: float, momentum_beta: float, cfg_curve: float, perp_damp: float, use_zero_init: bool=False, zero_init_steps: int=0, fdg_low: float = 0.6, fdg_high: float = 1.3, fdg_sigma: float = 1.0, ze_zero_steps: int = 0, ze_adaptive: bool = False, ze_r_switch_hi: float = 0.6, ze_r_switch_lo: float = 0.45, fdg_low_adaptive: bool = False, fdg_low_min: float = 0.45, fdg_low_max: float = 0.7, fdg_ema_beta: float = 0.8, use_local_mask: bool = False, mask_inside: float = 1.0, mask_outside: float = 1.0,
|
| 969 |
midfreq_enable: bool = False, midfreq_gain: float = 0.0, midfreq_sigma_lo: float = 0.8, midfreq_sigma_hi: float = 2.0,
|
| 970 |
mahiro_plus_enable: bool = False, mahiro_plus_strength: float = 0.5,
|
| 971 |
+
eps_scale_enable: bool = False, eps_scale: float = 0.0,
|
| 972 |
+
# NEW: CWN + AGC for Hard node too
|
| 973 |
+
cwn_enable: bool = True, alpha_c: float = 1.0, alpha_u: float = 1.0,
|
| 974 |
+
agc_enable: bool = True, agc_tau: float = 2.8):
|
| 975 |
|
| 976 |
"""Clone model and attach a cfg mixing function implementing RescaleCFG/FDG, CFGZero*/FD, or hybrid ZeResFDG.
|
| 977 |
guidance_mode: 'default' | 'RescaleCFG' | 'RescaleFDG' | 'CFGZero*' | 'CFGZeroFD' | 'ZeResFDG'
|
|
|
|
| 1076 |
return cond * 0.0
|
| 1077 |
except Exception:
|
| 1078 |
pass
|
| 1079 |
+
# CWN for CFGZero branches: align energies before projection
|
| 1080 |
+
if bool(cwn_enable):
|
| 1081 |
+
try:
|
| 1082 |
+
_eps = 1e-6
|
| 1083 |
+
sc = (cond.pow(2).mean(dim=(1, 2, 3), keepdim=True).sqrt() + _eps)
|
| 1084 |
+
su = (uncond.pow(2).mean(dim=(1, 2, 3), keepdim=True).sqrt() + _eps)
|
| 1085 |
+
g = 0.5 * (sc + su)
|
| 1086 |
+
cond = cond * (float(alpha_c) * g / sc)
|
| 1087 |
+
uncond = uncond * (float(alpha_u) * g / su)
|
| 1088 |
+
except Exception:
|
| 1089 |
+
pass
|
| 1090 |
# Project cond onto uncond subspace (batch-wise alpha)
|
| 1091 |
bsz = cond.shape[0]
|
| 1092 |
pos_flat = cond.view(bsz, -1)
|
|
|
|
| 1129 |
cond = uncond + delta
|
| 1130 |
else:
|
| 1131 |
prev_delta["t"] = delta.detach()
|
| 1132 |
+
# Adaptive Guidance Clipping on delta (Rescale path)
|
| 1133 |
+
if bool(agc_enable):
|
| 1134 |
+
try:
|
| 1135 |
+
t = float(max(0.5, agc_tau))
|
| 1136 |
+
delta = t * torch.tanh(delta / t)
|
| 1137 |
+
except Exception:
|
| 1138 |
+
pass
|
| 1139 |
# After momentum: optionally apply FDG and rebuild cond
|
| 1140 |
if mode == "RescaleFDG":
|
| 1141 |
# Adaptive low gain if enabled
|
|
|
|
| 1218 |
x = x_orig / (sigma_ * sigma_ + 1.0)
|
| 1219 |
v_cond = ((x - (x_orig - cond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1220 |
v_uncond = ((x - (x_orig - uncond)) * (sigma_ ** 2 + 1.0) ** 0.5) / (sigma_)
|
| 1221 |
+
# CWN in v-space for Rescale path (safer than eps-space)
|
| 1222 |
+
if bool(cwn_enable):
|
| 1223 |
+
try:
|
| 1224 |
+
_e = 1e-6
|
| 1225 |
+
rc = (v_cond.pow(2).mean(dim=(1,2,3), keepdim=True).sqrt() + _e)
|
| 1226 |
+
ru = (v_uncond.pow(2).mean(dim=(1,2,3), keepdim=True).sqrt() + _e)
|
| 1227 |
+
v_cond_n = (v_cond / rc) * float(alpha_c)
|
| 1228 |
+
v_uncond_n = (v_uncond / ru) * float(alpha_u)
|
| 1229 |
+
except Exception:
|
| 1230 |
+
v_cond_n, v_uncond_n = v_cond, v_uncond
|
| 1231 |
+
else:
|
| 1232 |
+
v_cond_n, v_uncond_n = v_cond, v_uncond
|
| 1233 |
+
v_cfg = v_uncond_n + cond_scale_eff * (v_cond_n - v_uncond_n)
|
| 1234 |
+
ro_pos = torch.std(v_cond_n, dim=(1, 2, 3), keepdim=True)
|
| 1235 |
ro_cfg = torch.std(v_cfg, dim=(1, 2, 3), keepdim=True).clamp_min(1e-6)
|
| 1236 |
v_rescaled = v_cfg * (ro_pos / ro_cfg)
|
| 1237 |
v_final = float(rescale_multiplier) * v_rescaled + (1.0 - float(rescale_multiplier)) * v_cfg
|
|
|
|
| 1498 |
"cfg_curve": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "S-curve shaping of cond_scale across steps (0=flat)."}),
|
| 1499 |
"perp_damp": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Remove a small portion of the component parallel to previous delta (0-1)."}),
|
| 1500 |
|
| 1501 |
+
# Conditioning Weight Normalization (CWN) + Adaptive Guidance Clipping (AGC)
|
| 1502 |
+
"cwn_enable": ("BOOLEAN", {"default": True, "tooltip": "Normalize cond/uncond energy to steady CFG mixing."}),
|
| 1503 |
+
"alpha_c": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}),
|
| 1504 |
+
"alpha_u": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}),
|
| 1505 |
+
"agc_enable": ("BOOLEAN", {"default": True, "tooltip": "Soft-clip residual guidance to prevent rare spikes."}),
|
| 1506 |
+
"agc_tau": ("FLOAT", {"default": 2.8, "min": 0.5, "max": 6.0, "step": 0.1}),
|
| 1507 |
+
|
| 1508 |
# NAG (Normalized Attention Guidance) toggles
|
| 1509 |
"use_nag": ("BOOLEAN", {"default": False, "tooltip": "Apply NAG inside CrossAttention (positive branch) during this node."}),
|
| 1510 |
"nag_scale": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 50.0, "step": 0.1}),
|
|
|
|
| 1595 |
Sharpnes_strenght=0.300, threshold=0.03, latent_compare=False, accumulation="default",
|
| 1596 |
reference_clean=False, reference_image=None, clip_vision=None, ref_preview=224, ref_threshold=0.03, ref_cooldown=1,
|
| 1597 |
guidance_mode="RescaleCFG", rescale_multiplier=0.7, momentum_beta=0.0, cfg_curve=0.0, perp_damp=0.0,
|
| 1598 |
+
cwn_enable=True, alpha_c=1.0, alpha_u=1.0, agc_enable=True, agc_tau=2.8,
|
| 1599 |
+
use_nag=False, nag_scale=4.0, nag_tau=2.5, nag_alpha=0.25,
|
| 1600 |
+
aqclip_enable=False, aq_tile=32, aq_stride=16, aq_alpha=2.0, aq_ema_beta=0.8, aq_attn=False,
|
| 1601 |
use_zero_init=False, zero_init_steps=0,
|
| 1602 |
fdg_low=0.6, fdg_high=1.3, fdg_sigma=1.0, ze_res_zero_steps=2,
|
| 1603 |
ze_adaptive=False, ze_r_switch_hi=0.60, ze_r_switch_lo=0.45,
|
|
|
|
| 1760 |
fdg_low_adaptive=bool(fdg_low_adaptive), fdg_low_min=float(fdg_low_min), fdg_low_max=float(fdg_low_max), fdg_ema_beta=float(fdg_ema_beta),
|
| 1761 |
use_local_mask=False, mask_inside=1.0, mask_outside=1.0,
|
| 1762 |
mahiro_plus_enable=bool(muse_blend), mahiro_plus_strength=float(muse_blend_strength),
|
| 1763 |
+
eps_scale_enable=bool(eps_scale_enable), eps_scale=float(eps_scale),
|
| 1764 |
+
cwn_enable=bool(cwn_enable), alpha_c=float(alpha_c), alpha_u=float(alpha_u),
|
| 1765 |
+
agc_enable=bool(agc_enable), agc_tau=float(agc_tau)
|
| 1766 |
)
|
| 1767 |
# early interruption check before starting the loop
|
| 1768 |
try:
|
pressets/mg_cade25.cfg
CHANGED
|
@@ -65,6 +65,13 @@ momentum_beta: 0.22
|
|
| 65 |
cfg_curve: 0.85
|
| 66 |
perp_damp: 0.80
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
# NAG
|
| 69 |
use_nag: true
|
| 70 |
nag_scale: 4.0
|
|
@@ -140,7 +147,7 @@ seed: 0
|
|
| 140 |
control_after_generate: randomize
|
| 141 |
steps: 10
|
| 142 |
cfg: 6.5
|
| 143 |
-
denoise: 0.
|
| 144 |
sampler_name: ddim
|
| 145 |
scheduler: MGHybrid
|
| 146 |
iterations: 2
|
|
@@ -199,6 +206,13 @@ momentum_beta: 0.15
|
|
| 199 |
cfg_curve: 0.85
|
| 200 |
perp_damp: 0.80
|
| 201 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
# NAG
|
| 203 |
use_nag: true
|
| 204 |
nag_scale: 4.0
|
|
@@ -335,6 +349,13 @@ momentum_beta: 0.15
|
|
| 335 |
cfg_curve: 0.85
|
| 336 |
perp_damp: 0.80
|
| 337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
# NAG
|
| 339 |
use_nag: true
|
| 340 |
nag_scale: 4.0
|
|
@@ -456,9 +477,9 @@ ref_cooldown: 2
|
|
| 456 |
|
| 457 |
# cfg schedule (internal)
|
| 458 |
#cfg_sched: off | cosine | warmup | u
|
| 459 |
-
cfg_sched:
|
| 460 |
-
cfg_sched_min:
|
| 461 |
-
cfg_sched_max:
|
| 462 |
cfg_sched_gamma: 1.5
|
| 463 |
cfg_sched_u_pow: 1.2
|
| 464 |
|
|
@@ -470,6 +491,13 @@ momentum_beta: 0.15
|
|
| 470 |
cfg_curve: 0.60
|
| 471 |
perp_damp: 0.85
|
| 472 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 473 |
# NAG
|
| 474 |
use_nag: true
|
| 475 |
nag_scale: 3.6
|
|
|
|
| 65 |
cfg_curve: 0.85
|
| 66 |
perp_damp: 0.80
|
| 67 |
|
| 68 |
+
# CWN/AGC
|
| 69 |
+
cwn_enable: true
|
| 70 |
+
alpha_c: 1.0
|
| 71 |
+
alpha_u: 1.0
|
| 72 |
+
agc_enable: true
|
| 73 |
+
agc_tau: 2.8
|
| 74 |
+
|
| 75 |
# NAG
|
| 76 |
use_nag: true
|
| 77 |
nag_scale: 4.0
|
|
|
|
| 147 |
control_after_generate: randomize
|
| 148 |
steps: 10
|
| 149 |
cfg: 6.5
|
| 150 |
+
denoise: 0.44
|
| 151 |
sampler_name: ddim
|
| 152 |
scheduler: MGHybrid
|
| 153 |
iterations: 2
|
|
|
|
| 206 |
cfg_curve: 0.85
|
| 207 |
perp_damp: 0.80
|
| 208 |
|
| 209 |
+
# CWN/AGC
|
| 210 |
+
cwn_enable: true
|
| 211 |
+
alpha_c: 1.0
|
| 212 |
+
alpha_u: 1.0
|
| 213 |
+
agc_enable: true
|
| 214 |
+
agc_tau: 2.8
|
| 215 |
+
|
| 216 |
# NAG
|
| 217 |
use_nag: true
|
| 218 |
nag_scale: 4.0
|
|
|
|
| 349 |
cfg_curve: 0.85
|
| 350 |
perp_damp: 0.80
|
| 351 |
|
| 352 |
+
# CWN/AGC
|
| 353 |
+
cwn_enable: true
|
| 354 |
+
alpha_c: 1.0
|
| 355 |
+
alpha_u: 1.0
|
| 356 |
+
agc_enable: true
|
| 357 |
+
agc_tau: 2.8
|
| 358 |
+
|
| 359 |
# NAG
|
| 360 |
use_nag: true
|
| 361 |
nag_scale: 4.0
|
|
|
|
| 477 |
|
| 478 |
# cfg schedule (internal)
|
| 479 |
#cfg_sched: off | cosine | warmup | u
|
| 480 |
+
cfg_sched: cosine
|
| 481 |
+
cfg_sched_min: 3.9
|
| 482 |
+
cfg_sched_max: 8.0
|
| 483 |
cfg_sched_gamma: 1.5
|
| 484 |
cfg_sched_u_pow: 1.2
|
| 485 |
|
|
|
|
| 491 |
cfg_curve: 0.60
|
| 492 |
perp_damp: 0.85
|
| 493 |
|
| 494 |
+
# CWN/AGC
|
| 495 |
+
cwn_enable: true
|
| 496 |
+
alpha_c: 1.0
|
| 497 |
+
alpha_u: 1.0
|
| 498 |
+
agc_enable: true
|
| 499 |
+
agc_tau: 2.2
|
| 500 |
+
|
| 501 |
# NAG
|
| 502 |
use_nag: true
|
| 503 |
nag_scale: 3.6
|