Spaces:
Sleeping
Sleeping
File size: 3,900 Bytes
9bad583 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
"""
Test your setup locally before deploying to HF
This helps identify issues without waiting for HF Space builds
"""
import os
import sys
import torch
print("=" * 80)
print(" " * 25 + "Local Setup Test")
print("=" * 80)
# Test 1: Python version
print("\n[1/7] Python Version")
print(f"β Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}")
# Test 2: PyTorch
print("\n[2/7] PyTorch")
try:
print(f"β PyTorch version: {torch.__version__}")
print(f"β CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"β CUDA version: {torch.version.cuda}")
print(f"β GPU: {torch.cuda.get_device_name(0)}")
else:
print("β οΈ No GPU detected (will use CPU)")
except Exception as e:
print(f"β Error: {e}")
# Test 3: Critical imports
print("\n[3/7] Critical Dependencies")
deps = {
'gradio': 'Gradio',
'numpy': 'NumPy',
'scipy': 'SciPy',
'matplotlib': 'Matplotlib',
'trimesh': 'Trimesh',
'einops': 'Einops',
'clip': 'OpenAI CLIP'
}
for module, name in deps.items():
try:
__import__(module)
print(f"β {name}")
except ImportError as e:
print(f"β {name} - NOT INSTALLED")
# Test 4: Model checkpoints
print("\n[4/7] Model Checkpoints")
checkpoints_dir = './checkpoints'
dataset_name = 't2m'
if os.path.exists(checkpoints_dir):
print(f"β Checkpoints directory exists: {checkpoints_dir}")
# Check for specific model directories
models_to_check = [
f'{checkpoints_dir}/{dataset_name}/t2m_nlayer8_nhead6_ld384_ff1024_cdp0.1_rvq6ns',
f'{checkpoints_dir}/{dataset_name}/rvq_nq6_dc512_nc512_noshare_qdp0.2',
f'{checkpoints_dir}/{dataset_name}/length_estimator',
]
for model_path in models_to_check:
if os.path.exists(model_path):
# Count files
files = []
for root, dirs, filenames in os.walk(model_path):
files.extend(filenames)
print(f"β {os.path.basename(model_path)} ({len(files)} files)")
else:
print(f"β {os.path.basename(model_path)} - NOT FOUND")
else:
print(f"β Checkpoints directory NOT FOUND: {checkpoints_dir}")
print(" Models must be present for the app to work!")
# Test 5: Try loading app.py
print("\n[5/7] App.py Syntax")
try:
with open('app.py', 'r', encoding='utf-8') as f:
compile(f.read(), 'app.py', 'exec')
print("β app.py syntax is valid")
except FileNotFoundError:
print("β app.py not found")
except SyntaxError as e:
print(f"β Syntax error: {e}")
# Test 6: Required files
print("\n[6/7] Required Files")
required = ['app.py', 'requirements.txt', 'README.md']
for file in required:
if os.path.exists(file):
size = os.path.getsize(file)
print(f"β {file} ({size} bytes)")
else:
print(f"β {file} - NOT FOUND")
# Test 7: Disk space for outputs
print("\n[7/7] Output Directory")
output_dir = './gradio_outputs'
try:
os.makedirs(output_dir, exist_ok=True)
print(f"β Output directory ready: {output_dir}")
except Exception as e:
print(f"β Error creating output directory: {e}")
# Summary
print("\n" + "=" * 80)
print("SUMMARY")
print("=" * 80)
# Check if ready
if os.path.exists(checkpoints_dir) and os.path.exists('app.py'):
print("\nβ
Basic setup looks good!")
print("\nNext steps:")
print("1. Test locally: python app.py")
print("2. Visit http://localhost:7860 in browser")
print("3. Try a prompt and check for errors")
print("4. If it works locally, redeploy to HF")
else:
print("\nβ οΈ Setup incomplete!")
if not os.path.exists(checkpoints_dir):
print("\nβ Missing: Model checkpoints")
print(" β’ Download models to ./checkpoints/")
print(" β’ Or configure model download in app.py")
print("\n" + "=" * 80)
|