#!/usr/bin/env python3 """ Test cases for app.py inference pipeline with R² score validation. This test validates that the app can load models, process geometries, and achieve R² scores > threshold for specific test cases. """ ########################################## global variables ######################################################### test_r2_threshold = 0.7014 #r2 =0.7014 for cadillac_64 test_geometry_path = "/raid/ansysai/udbhav/alphaLPFM/datasets/examples/cadillac_64.vtp" config_path = "/raid/ansysai/udbhav/alphaLPFM/configs/app_configs/Incompressible flow over car/config.yaml" dataset = "Incompressible flow over car" variable = "pressure" velocity = 44.7387 # mph (from testcase config boundary_conditions_max) ## import libraries import pytest import os import sys import tempfile import shutil import numpy as np import torch import pyvista as pv import uuid # Add the parent directory to the path to import app modules sys.path.append('/raid/ansysai/udbhav/alphaLPFM') # Import app components from app import ( run_inference_fast, process_mesh_fast, handle_upload_fast, handle_infer_fast, GEOMETRY_CACHE, MODEL_STORE, REGISTRY, SESSION_MANAGER, get_user_directories, DECIMATION_CONFIG, auto_target_reduction ) from utils.app_utils import decimate_mesh class TestAppInference: """Test class for app.py inference pipeline.""" @pytest.fixture(scope="class") def test_session_id(self): """Generate a test session ID for testing.""" # Use a fixed test session ID for reproducibility return "test_session_" + str(uuid.uuid4()) @pytest.fixture(scope="class") def test_mesh_path(self): """Path to the test mesh file.""" if not os.path.exists(test_geometry_path): pytest.skip(f"Test mesh not found: {test_geometry_path}") return test_geometry_path @pytest.fixture(scope="class") def temp_dir(self): """Create temporary directory for test outputs.""" temp_dir = tempfile.mkdtemp(prefix='pytest_app_') yield temp_dir # Cleanup if os.path.exists(temp_dir): shutil.rmtree(temp_dir) @pytest.fixture(scope="class") def testcase_config_path(self): """Path to the testcase config file.""" if not os.path.exists(config_path): pytest.skip(f"Testcase config not found: {config_path}") return config_path def test_model_registry(self): """Test that model registry is properly configured.""" assert REGISTRY is not None assert len(REGISTRY) > 0 # Check that cadillac dataset is available cadillac_datasets = [k for k in REGISTRY.keys() if 'car' in k.lower() or 'cadillac' in k.lower()] assert len(cadillac_datasets) > 0, "No cadillac/car datasets found in registry" print(f"Available datasets: {list(REGISTRY.keys())}") def test_inference_pipeline_r2_validation(self, test_mesh_path, testcase_config_path, temp_dir, test_session_id): """ Test the complete inference pipeline and validate R² scores using testcase config. This test: 1. Loads the cadillac_64 test mesh 2. Uses testcase config for inference 3. Validates that R² range in line with test_r2_threshold """ if not os.path.exists(test_mesh_path): pytest.skip(f"Test mesh not found: {test_mesh_path}") # Set up test environment original_data_dir = os.environ.get("APP_DATA_DIR") os.environ["APP_DATA_DIR"] = temp_dir os.environ["TESTCASE_CONFIG_PATH"] = testcase_config_path try: # Set up test session session = SESSION_MANAGER.create_session(test_session_id) print(f"🔑 Created test session: {test_session_id}") print(f" Session data dir: {session['data_dir']}") # Get user-specific directories (same as handle_upload_fast) user_dirs = get_user_directories(test_session_id) # Follow the same steps as handle_upload_fast in app.py # Step 1: File hash calculation print(f"📄 Calculating file hash for: {os.path.basename(test_mesh_path)}") file_size = os.path.getsize(test_mesh_path) / (1024 * 1024) # Size in MB print(f"📏 File size: {file_size:.2f} MB") file_hash = GEOMETRY_CACHE.get_file_hash(test_mesh_path) # Step 2: Mesh reading print("📖 Reading mesh file...") mesh = pv.read(test_mesh_path) print(f"📊 Loaded mesh with {mesh.n_points} points and {mesh.n_cells} cells") # Step 3: Apply decimation (same as handle_upload_fast) if DECIMATION_CONFIG["enabled"]: print("✂️ Applying mesh decimation...") auto_reduction = auto_target_reduction(mesh.n_cells) print(f"🔧 Auto target reduction: {auto_reduction:.2f} (faces: {mesh.n_cells})") decimation_config = DECIMATION_CONFIG.copy() decimation_config["target_reduction"] = auto_reduction mesh = decimate_mesh(mesh, decimation_config) print(f"✅ Decimation complete. New mesh: {mesh.n_cells} cells") # Step 4: Process geometry and compute normals (for triangular mesh) if mesh.n_points != mesh.n_cells and mesh.get_cell(0).type != 1: print("🔺 Processing as triangular mesh...") mesh_fixed = mesh.compute_normals( consistent_normals=True, auto_orient_normals=True, point_normals=True, cell_normals=False, inplace=False ) mesh = mesh_fixed # Step 5: Set mesh in geometry cache GEOMETRY_CACHE.set_mesh(test_session_id, test_mesh_path, mesh, file_hash) print(f"✅ Loaded mesh into cache: {mesh.n_points} points, {mesh.n_cells} cells") print(f"🧪 Testing inference pipeline:") print(f" Dataset: {dataset}") print(f" Variable: {variable}") print(f" Velocity: {velocity} mph") print(f" Mesh points: {mesh.n_points}") print(f" Session ID: {test_session_id}") # Run inference using the app's inference function try: # Convert velocity from mph to m/s for boundary conditions from app import mph_to_ms velocity_ms = mph_to_ms(velocity) boundary_conditions = {"freestream_velocity": velocity_ms} result = run_inference_fast(dataset, variable, boundary_conditions, session_id=test_session_id) # Validate results assert result is not None assert "r_squared" in result assert "pred" in result assert "tgt" in result r2_score = result["r_squared"] print(f"📊 Inference Results:") print(f" R² Score: {r2_score:.6f}") print(f" Absolute Error: {result['abs_err']:.6f}") print(f" MSE Error: {result['mse_err']:.6f}") print(f" Relative L2: {result['rel_l2']:.6f}") # Validate R² score if r2_score > float(0.99*test_r2_threshold) and r2_score < float(1.01*test_r2_threshold): print(f"✅ R² validation passed: {r2_score:.6f} > {0.99*test_r2_threshold} and {r2_score:.6f} < {1.01*test_r2_threshold}") else: pytest.fail(f"R² score validation failed: {r2_score:.6f} <= {0.99*test_r2_threshold} or {r2_score:.6f} >= {1.01*test_r2_threshold}") except Exception as e: # If model loading fails, skip the test if "model" in str(e).lower() or "download" in str(e).lower(): pytest.skip(f"Model loading failed (likely network issue): {e}") else: raise e finally: # Restore original environment if original_data_dir: os.environ["APP_DATA_DIR"] = original_data_dir else: os.environ.pop("APP_DATA_DIR", None) if __name__ == "__main__": # Run the tests pytest.main([__file__, "-v", "--tb=short"])