# This file was autogenerated by uv via the following command: # uv export --no-dev --no-hashes accelerate==1.12.0 # via fastapi-apertus annotated-doc==0.0.4 # via # fastapi # typer annotated-types==0.7.0 # via pydantic anyio==4.10.0 # via # httpx # starlette # watchfiles bitsandbytes==0.49.2 # via fastapi-apertus certifi==2025.8.3 # via # httpcore # httpx click==8.2.1 # via # typer # uvicorn colorama==0.4.6 ; sys_platform == 'win32' # via # click # loguru # tqdm # uvicorn compressed-tensors==0.13.0 # via fastapi-apertus cuda-bindings==12.9.4 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch cuda-pathfinder==1.3.4 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via cuda-bindings fastapi==0.129.0 # via fastapi-apertus filelock==3.19.1 # via # huggingface-hub # torch fsspec==2025.9.0 # via # huggingface-hub # torch h11==0.16.0 # via # httpcore # uvicorn hf-xet==1.2.0 ; platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64' # via huggingface-hub httpcore==1.0.9 # via httpx httpx==0.28.1 # via huggingface-hub huggingface-hub==1.4.1 # via # accelerate # tokenizers # transformers idna==3.10 # via # anyio # httpx jinja2==3.1.6 # via torch loguru==0.7.3 # via compressed-tensors markdown-it-py==4.0.0 # via rich markupsafe==3.0.2 # via jinja2 mdurl==0.1.2 # via markdown-it-py mpmath==1.3.0 # via sympy networkx==3.5 # via torch numpy==2.4.2 # via # accelerate # bitsandbytes # fastapi-apertus # transformers nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via # nvidia-cudnn-cu12 # nvidia-cusolver-cu12 # torch nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via # nvidia-cusolver-cu12 # torch nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via # nvidia-cufft-cu12 # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 # torch nvidia-nvshmem-cu12==3.4.5 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch packaging==25.0 # via # accelerate # bitsandbytes # huggingface-hub # transformers psutil==7.0.0 # via accelerate pygments==2.19.2 # via rich python-dotenv==1.1.1 # via uvicorn pyyaml==6.0.2 # via # accelerate # huggingface-hub # transformers # uvicorn regex==2025.9.1 # via transformers rich==14.3.2 # via typer safetensors==0.6.2 # via # accelerate # transformers setuptools==80.9.0 # via torch shellingham==1.5.4 # via # huggingface-hub # typer sniffio==1.3.1 # via anyio starlette==0.47.3 # via fastapi sympy==1.14.0 # via torch tokenizers==0.22.2 # via transformers torch==2.10.0 # via # accelerate # bitsandbytes # compressed-tensors # fastapi-apertus tqdm==4.67.1 # via # huggingface-hub # transformers transformers==5.2.0 # via # compressed-tensors # fastapi-apertus triton==3.6.0 ; platform_machine == 'x86_64' and sys_platform == 'linux' # via torch typer==0.24.0 # via typer-slim typer-slim==0.24.0 # via # huggingface-hub # transformers typing-extensions==4.15.0 # via # anyio # fastapi # huggingface-hub # pydantic # pydantic-core # starlette # torch # typing-inspection typing-inspection==0.4.2 # via # fastapi # pydantic uvicorn==0.41.0 # via fastapi-apertus# via uvicorn watchfiles==1.1.0 # via uvicorn websockets==15.0.1 # via uvicorn win32-setctime==1.2.0 ; sys_platform == 'win32' # via loguru