#!/usr/bin/env python3 # /// script # requires-python = ">=3.10" # dependencies = [ # "inspect-ai @ git+https://github.com/dvsrepo/inspect_ai.git@fallback-to-modified-for-hf-fs", # "datasets", # "openai", # "transformers", # "accelerate", # "huggingface_hub", # "inspect-evals", # "pandas", # "pyarrow", # ] # /// """Runner that downloads an eval script and executes it using inspect CLI with HF filesystem logging.""" import os import sys import subprocess import tempfile import urllib.request from pathlib import Path from inspect_ai.analysis import evals_df, samples_df def export_logs_to_parquet(log_dir: str, dataset_repo: str) -> None: """Export eval logs to parquet format and upload to HuggingFace dataset. Args: log_dir: HF filesystem path to logs (e.g., "hf://datasets/username/name/logs") dataset_repo: Dataset repository ID (e.g., "datasets/username/name") """ from huggingface_hub import HfApi # Get HF token from environment hf_token = os.getenv("HF_TOKEN") if not hf_token: raise ValueError("HF_TOKEN environment variable not set") api = HfApi(token=hf_token) # Remove 'datasets/' prefix for API calls repo_id = ( dataset_repo.replace("datasets/", "") if dataset_repo.startswith("datasets/") else dataset_repo ) # Read evals dataframe print(" Reading evals dataframe...") print(f" Log directory: {log_dir}") try: evals = evals_df(logs=log_dir) print(f" ✓ Read {len(evals)} eval records") except Exception as e: print(f" ✗ Error reading evals: {e}") raise # Read samples dataframe print(" Reading samples dataframe...") try: samples = samples_df(logs=log_dir) print(f" ✓ Read {len(samples)} sample records") except Exception as e: print(f" ✗ Error reading samples: {e}") raise # Write to temporary parquet files with tempfile.TemporaryDirectory() as tmpdir: evals_path = Path(tmpdir) / "evals.parquet" samples_path = Path(tmpdir) / "samples.parquet" print(f" Writing evals to parquet ({len(evals)} rows)...") evals.to_parquet(evals_path, index=False, engine="pyarrow") print(f" Writing samples to parquet ({len(samples)} rows)...") samples.to_parquet(samples_path, index=False, engine="pyarrow") # Upload parquet files to root (HuggingFace will auto-detect as separate data files) # We use descriptive names so they can be loaded separately print(" Uploading evals.parquet...") api.upload_file( path_or_fileobj=str(evals_path), path_in_repo="evals.parquet", repo_id=repo_id, repo_type="dataset", token=hf_token, ) print(" Uploading samples.parquet...") api.upload_file( path_or_fileobj=str(samples_path), path_in_repo="samples.parquet", repo_id=repo_id, repo_type="dataset", token=hf_token, ) print( f" ✓ Parquet files available at: https://huggingface.co/datasets/{repo_id}/tree/main" ) if __name__ == "__main__": if len(sys.argv) < 4: print( "Usage: eval_runner.py [--inspect-evals] [extra_args...]" ) sys.exit(1) eval_ref = sys.argv[1] model = sys.argv[2] dataset_repo = sys.argv[3] # Changed from space_id to dataset_repo # Check if this is an inspect_evals path is_inspect_evals = "--inspect-evals" in sys.argv extra_args = [arg for arg in sys.argv[4:] if arg != "--inspect-evals"] # Construct log directory path for HF filesystem if not dataset_repo.startswith("datasets/"): dataset_repo = f"datasets/{dataset_repo}" log_dir = f"hf://{dataset_repo}/logs" if is_inspect_evals: # Use inspect_evals path directly print(f"Using inspect_evals: {eval_ref}") eval_target = eval_ref cleanup_file = None else: # Download custom eval script print(f"Downloading eval from {eval_ref}...") with urllib.request.urlopen(eval_ref) as response: eval_code = response.read().decode("utf-8") eval_filename = "downloaded_eval.py" with open(eval_filename, "w") as f: f.write(eval_code) eval_target = eval_filename cleanup_file = eval_filename try: print(f"Running inspect eval with model {model}...") print(f"Logs will be written to: {log_dir}") # Build command with HF filesystem logging parameters cmd = [ "inspect", "eval", eval_target, "--model", model, "--log-dir", log_dir, "--log-shared", # Enable shared logging for remote filesystems "--log-buffer", "100", # Buffer size for stable ZIP files ] cmd.extend(extra_args) print(f"Command: {' '.join(cmd)}") subprocess.run(cmd, check=True) print("\n✓ Eval completed!") print( f"Logs are available at: https://huggingface.co/{dataset_repo}/tree/main/logs" ) # Export logs to parquet and upload to dataset print("\nExporting logs to parquet...") try: export_logs_to_parquet(log_dir, dataset_repo) except Exception as e: print(f"⚠ Warning: Could not export to parquet: {e}") print( f" Logs are still available at: https://huggingface.co/{dataset_repo}/tree/main/logs" ) finally: if cleanup_file and os.path.exists(cleanup_file): os.unlink(cleanup_file)