id
stringlengths 11
50
| scripts
listlengths 0
3
| code_urls
listlengths 0
3
| execution_urls
listlengths 0
3
| estimated_vram
float64 0
1.74k
|
|---|---|---|---|---|
zai-org/GLM-4.7
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.7_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7_0.txt|zai-org_GLM-4.7_0.txt>',\n )\n\n with open('zai-org_GLM-4.7_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.7_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7_1.txt|zai-org_GLM-4.7_1.txt>',\n )\n\n with open('zai-org_GLM-4.7_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7_1.txt"
] | 867.69
|
Qwen/Qwen-Image-Layered
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Layered\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Qwen_Qwen-Image-Layered_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Layered_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Layered_0.txt|Qwen_Qwen-Image-Layered_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Layered_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Layered\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Layered_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Layered_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Layered_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Layered_0.txt"
] | 0
|
nvidia/NitroGen
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('nvidia_NitroGen_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_NitroGen_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_NitroGen_0.txt|nvidia_NitroGen_0.txt>',\n )\n\n with open('nvidia_NitroGen_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_NitroGen_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_NitroGen_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
microsoft/TRELLIS.2-4B
|
[] |
[] |
[] | 0
|
Qwen/Qwen-Image-Edit-2511
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2511_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2511_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2511_0.txt|Qwen_Qwen-Image-Edit-2511_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2511_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2511_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2511_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2511_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2511_0.txt"
] | 0
|
Shakker-Labs/AWPortrait-Z
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"Shakker-Labs/AWPortrait-Z\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Shakker-Labs_AWPortrait-Z_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Shakker-Labs_AWPortrait-Z_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Shakker-Labs_AWPortrait-Z_0.txt|Shakker-Labs_AWPortrait-Z_0.txt>',\n )\n\n with open('Shakker-Labs_AWPortrait-Z_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"Shakker-Labs/AWPortrait-Z\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Shakker-Labs_AWPortrait-Z_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Shakker-Labs_AWPortrait-Z_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Shakker-Labs_AWPortrait-Z_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Shakker-Labs_AWPortrait-Z_0.txt"
] | 0
|
tencent/HY-WorldPlay
|
[] |
[] |
[] | 0
|
google/functiongemma-270m-it
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_functiongemma-270m-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_functiongemma-270m-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_functiongemma-270m-it_0.txt|google_functiongemma-270m-it_0.txt>',\n )\n\n with open('google_functiongemma-270m-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_functiongemma-270m-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_functiongemma-270m-it_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"google/functiongemma-270m-it\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('google_functiongemma-270m-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_functiongemma-270m-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_functiongemma-270m-it_1.txt|google_functiongemma-270m-it_1.txt>',\n )\n\n with open('google_functiongemma-270m-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"google/functiongemma-270m-it\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_functiongemma-270m-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_functiongemma-270m-it_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"google/functiongemma-270m-it\")\n model = AutoModelForCausalLM.from_pretrained(\"google/functiongemma-270m-it\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_functiongemma-270m-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_functiongemma-270m-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_functiongemma-270m-it_2.txt|google_functiongemma-270m-it_2.txt>',\n )\n\n with open('google_functiongemma-270m-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"google/functiongemma-270m-it\")\nmodel = AutoModelForCausalLM.from_pretrained(\"google/functiongemma-270m-it\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_functiongemma-270m-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_functiongemma-270m-it_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_functiongemma-270m-it_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_functiongemma-270m-it_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_functiongemma-270m-it_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_functiongemma-270m-it_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_functiongemma-270m-it_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_functiongemma-270m-it_2.txt"
] | 0.65
|
facebook/sam-audio-large
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam-audio-large_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-audio-large_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-audio-large_0.txt|facebook_sam-audio-large_0.txt>',\n )\n\n with open('facebook_sam-audio-large_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-audio-large_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-audio-large_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('facebook_sam-audio-large_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-audio-large_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-audio-large_1.txt|facebook_sam-audio-large_1.txt>',\n )\n\n with open('facebook_sam-audio-large_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-audio-large_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-audio-large_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam-audio-large_0.py",
"DO NOT EXECUTE"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam-audio-large_0.txt",
"WAS NOT EXECUTED"
] | 0
|
XiaomiMiMo/MiMo-V2-Flash
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"XiaomiMiMo/MiMo-V2-Flash\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('XiaomiMiMo_MiMo-V2-Flash_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in XiaomiMiMo_MiMo-V2-Flash_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/XiaomiMiMo_MiMo-V2-Flash_0.txt|XiaomiMiMo_MiMo-V2-Flash_0.txt>',\n )\n\n with open('XiaomiMiMo_MiMo-V2-Flash_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"XiaomiMiMo/MiMo-V2-Flash\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='XiaomiMiMo_MiMo-V2-Flash_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='XiaomiMiMo_MiMo-V2-Flash_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"XiaomiMiMo/MiMo-V2-Flash\", trust_remote_code=True, dtype=\"auto\")\n with open('XiaomiMiMo_MiMo-V2-Flash_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in XiaomiMiMo_MiMo-V2-Flash_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/XiaomiMiMo_MiMo-V2-Flash_1.txt|XiaomiMiMo_MiMo-V2-Flash_1.txt>',\n )\n\n with open('XiaomiMiMo_MiMo-V2-Flash_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"XiaomiMiMo/MiMo-V2-Flash\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='XiaomiMiMo_MiMo-V2-Flash_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='XiaomiMiMo_MiMo-V2-Flash_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/XiaomiMiMo_MiMo-V2-Flash_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/XiaomiMiMo_MiMo-V2-Flash_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/XiaomiMiMo_MiMo-V2-Flash_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/XiaomiMiMo_MiMo-V2-Flash_1.txt"
] | 1,500.25
|
google/t5gemma-2-270m-270m
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_t5gemma-2-270m-270m_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-270m-270m_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-270m-270m_0.txt|google_t5gemma-2-270m-270m_0.txt>',\n )\n\n with open('google_t5gemma-2-270m-270m_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-270m-270m_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-270m-270m_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-270m-270m\")\n with open('google_t5gemma-2-270m-270m_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-270m-270m_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-270m-270m_1.txt|google_t5gemma-2-270m-270m_1.txt>',\n )\n\n with open('google_t5gemma-2-270m-270m_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-270m-270m\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-270m-270m_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-270m-270m_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n \n processor = AutoProcessor.from_pretrained(\"google/t5gemma-2-270m-270m\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-270m-270m\")\n with open('google_t5gemma-2-270m-270m_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-270m-270m_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-270m-270m_2.txt|google_t5gemma-2-270m-270m_2.txt>',\n )\n\n with open('google_t5gemma-2-270m-270m_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSeq2SeqLM\n\nprocessor = AutoProcessor.from_pretrained(\"google/t5gemma-2-270m-270m\")\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-270m-270m\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-270m-270m_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-270m-270m_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-270m-270m_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-270m-270m_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-270m-270m_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-270m-270m_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-270m-270m_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-270m-270m_2.txt"
] | 1.9
|
lightx2v/Qwen-Image-Edit-2511-Lightning
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"lightx2v/Qwen-Image-Edit-2511-Lightning\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt|lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt>',\n )\n\n with open('lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"lightx2v/Qwen-Image-Edit-2511-Lightning\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightx2v_Qwen-Image-Edit-2511-Lightning_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightx2v_Qwen-Image-Edit-2511-Lightning_0.txt"
] | 0
|
ResembleAI/chatterbox-turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('ResembleAI_chatterbox-turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ResembleAI_chatterbox-turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ResembleAI_chatterbox-turbo_0.txt|ResembleAI_chatterbox-turbo_0.txt>',\n )\n\n with open('ResembleAI_chatterbox-turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ResembleAI_chatterbox-turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ResembleAI_chatterbox-turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.1
|
[] |
[] |
[] | 0
|
google/t5gemma-2-4b-4b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_t5gemma-2-4b-4b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-4b-4b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-4b-4b_0.txt|google_t5gemma-2-4b-4b_0.txt>',\n )\n\n with open('google_t5gemma-2-4b-4b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-4b-4b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-4b-4b_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-4b-4b\")\n with open('google_t5gemma-2-4b-4b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-4b-4b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-4b-4b_1.txt|google_t5gemma-2-4b-4b_1.txt>',\n )\n\n with open('google_t5gemma-2-4b-4b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-4b-4b\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-4b-4b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-4b-4b_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n \n processor = AutoProcessor.from_pretrained(\"google/t5gemma-2-4b-4b\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-4b-4b\")\n with open('google_t5gemma-2-4b-4b_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-4b-4b_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-4b-4b_2.txt|google_t5gemma-2-4b-4b_2.txt>',\n )\n\n with open('google_t5gemma-2-4b-4b_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSeq2SeqLM\n\nprocessor = AutoProcessor.from_pretrained(\"google/t5gemma-2-4b-4b\")\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-4b-4b\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-4b-4b_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-4b-4b_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-4b-4b_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-4b-4b_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-4b-4b_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-4b-4b_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-4b-4b_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-4b-4b_2.txt"
] | 21.43
|
YatharthS/MiraTTS
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('YatharthS_MiraTTS_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in YatharthS_MiraTTS_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/YatharthS_MiraTTS_0.txt|YatharthS_MiraTTS_0.txt>',\n )\n\n with open('YatharthS_MiraTTS_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='YatharthS_MiraTTS_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='YatharthS_MiraTTS_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 1.23
|
apple/Sharp
|
[] |
[] |
[] | 0
|
LiquidAI/LFM2-2.6B-Exp
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2-2.6B-Exp\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('LiquidAI_LFM2-2.6B-Exp_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2-2.6B-Exp_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-2.6B-Exp_0.txt|LiquidAI_LFM2-2.6B-Exp_0.txt>',\n )\n\n with open('LiquidAI_LFM2-2.6B-Exp_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2-2.6B-Exp\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2-2.6B-Exp_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2-2.6B-Exp_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2-2.6B-Exp\")\n model = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2-2.6B-Exp\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('LiquidAI_LFM2-2.6B-Exp_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2-2.6B-Exp_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-2.6B-Exp_1.txt|LiquidAI_LFM2-2.6B-Exp_1.txt>',\n )\n\n with open('LiquidAI_LFM2-2.6B-Exp_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2-2.6B-Exp\")\nmodel = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2-2.6B-Exp\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2-2.6B-Exp_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2-2.6B-Exp_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2-2.6B-Exp_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2-2.6B-Exp_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2-2.6B-Exp_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2-2.6B-Exp_1.txt"
] | 13.09
|
FunAudioLLM/Fun-CosyVoice3-0.5B-2512
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt|FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt>',\n )\n\n with open('FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FunAudioLLM_Fun-CosyVoice3-0.5B-2512_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
browser-use/bu-30b-a3b-preview
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"browser-use/bu-30b-a3b-preview\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('browser-use_bu-30b-a3b-preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in browser-use_bu-30b-a3b-preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/browser-use_bu-30b-a3b-preview_0.txt|browser-use_bu-30b-a3b-preview_0.txt>',\n )\n\n with open('browser-use_bu-30b-a3b-preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"browser-use/bu-30b-a3b-preview\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='browser-use_bu-30b-a3b-preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='browser-use_bu-30b-a3b-preview_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"browser-use/bu-30b-a3b-preview\")\n model = AutoModelForVision2Seq.from_pretrained(\"browser-use/bu-30b-a3b-preview\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('browser-use_bu-30b-a3b-preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in browser-use_bu-30b-a3b-preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/browser-use_bu-30b-a3b-preview_1.txt|browser-use_bu-30b-a3b-preview_1.txt>',\n )\n\n with open('browser-use_bu-30b-a3b-preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"browser-use/bu-30b-a3b-preview\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"browser-use/bu-30b-a3b-preview\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='browser-use_bu-30b-a3b-preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='browser-use_bu-30b-a3b-preview_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/browser-use_bu-30b-a3b-preview_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/browser-use_bu-30b-a3b-preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/browser-use_bu-30b-a3b-preview_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/browser-use_bu-30b-a3b-preview_1.txt"
] | 75.24
|
meituan-longcat/LongCat-Video-Avatar
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"meituan-longcat/LongCat-Video-Avatar\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('meituan-longcat_LongCat-Video-Avatar_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Video-Avatar_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Video-Avatar_0.txt|meituan-longcat_LongCat-Video-Avatar_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Video-Avatar_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"meituan-longcat/LongCat-Video-Avatar\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Video-Avatar_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Video-Avatar_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Video-Avatar_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Video-Avatar_0.txt"
] | 0
|
black-forest-labs/FLUX.2-dev
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_0.txt|black-forest-labs_FLUX.2-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_1.txt|black-forest-labs_FLUX.2-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_1.txt"
] | 0
|
Tongyi-MAI/Z-Image-Turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Tongyi-MAI_Z-Image-Turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Tongyi-MAI_Z-Image-Turbo_0.txt|Tongyi-MAI_Z-Image-Turbo_0.txt>',\n )\n\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.txt"
] | 0
|
nunchaku-tech/nunchaku-z-image-turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"nunchaku-tech/nunchaku-z-image-turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('nunchaku-tech_nunchaku-z-image-turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nunchaku-tech_nunchaku-z-image-turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nunchaku-tech_nunchaku-z-image-turbo_0.txt|nunchaku-tech_nunchaku-z-image-turbo_0.txt>',\n )\n\n with open('nunchaku-tech_nunchaku-z-image-turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"nunchaku-tech/nunchaku-z-image-turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nunchaku-tech_nunchaku-z-image-turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nunchaku-tech_nunchaku-z-image-turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nunchaku-tech_nunchaku-z-image-turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nunchaku-tech_nunchaku-z-image-turbo_0.txt"
] | 0
|
upstage/Solar-Open-100B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('upstage_Solar-Open-100B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in upstage_Solar-Open-100B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/upstage_Solar-Open-100B_0.txt|upstage_Solar-Open-100B_0.txt>',\n )\n\n with open('upstage_Solar-Open-100B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='upstage_Solar-Open-100B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='upstage_Solar-Open-100B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt|nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt>',\n )\n\n with open('nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8\", dtype=\"auto\")\n with open('nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt|nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt>',\n )\n\n with open('nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_NVIDIA-Nemotron-3-Nano-30B-A3B-FP8_1.txt"
] | 152.93
|
microsoft/VibeVoice-Realtime-0.5B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-Realtime-0.5B\")\n with open('microsoft_VibeVoice-Realtime-0.5B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-Realtime-0.5B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-Realtime-0.5B_0.txt|microsoft_VibeVoice-Realtime-0.5B_0.txt>',\n )\n\n with open('microsoft_VibeVoice-Realtime-0.5B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-Realtime-0.5B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-Realtime-0.5B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-Realtime-0.5B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import VibeVoiceStreamingForConditionalGenerationInference\n model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(\"microsoft/VibeVoice-Realtime-0.5B\", dtype=\"auto\")\n with open('microsoft_VibeVoice-Realtime-0.5B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-Realtime-0.5B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-Realtime-0.5B_1.txt|microsoft_VibeVoice-Realtime-0.5B_1.txt>',\n )\n\n with open('microsoft_VibeVoice-Realtime-0.5B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import VibeVoiceStreamingForConditionalGenerationInference\nmodel = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(\"microsoft/VibeVoice-Realtime-0.5B\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-Realtime-0.5B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-Realtime-0.5B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_1.txt"
] | 2.46
|
Tongyi-Zhiwen/QwenLong-L1.5-30B-A3B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt|Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt>',\n )\n\n with open('Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Tongyi-Zhiwen_QwenLong-L1.5-30B-A3B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 73.93
|
TurboDiffusion/TurboWan2.2-I2V-A14B-720P
|
[] |
[] |
[] | 0
|
deepseek-ai/DeepSeek-V3.2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_0.txt|deepseek-ai_DeepSeek-V3.2_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_1.txt|deepseek-ai_DeepSeek-V3.2_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.txt"
] | 1,659.65
|
facebook/sam3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_0.txt|facebook_sam3_0.txt>',\n )\n\n with open('facebook_sam3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n with open('facebook_sam3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_1.txt|facebook_sam3_1.txt>',\n )\n\n with open('facebook_sam3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoImageProcessor, AutoModel\n \n processor = AutoImageProcessor.from_pretrained(\"facebook/sam3\")\n model = AutoModel.from_pretrained(\"facebook/sam3\")\n with open('facebook_sam3_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_2.txt|facebook_sam3_2.txt>',\n )\n\n with open('facebook_sam3_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoImageProcessor, AutoModel\n\nprocessor = AutoImageProcessor.from_pretrained(\"facebook/sam3\")\nmodel = AutoModel.from_pretrained(\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_2.txt"
] | 4.16
|
ekwek/Soprano-80M
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"ekwek/Soprano-80M\")\n with open('ekwek_Soprano-80M_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ekwek_Soprano-80M_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ekwek_Soprano-80M_0.txt|ekwek_Soprano-80M_0.txt>',\n )\n\n with open('ekwek_Soprano-80M_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"ekwek/Soprano-80M\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ekwek_Soprano-80M_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ekwek_Soprano-80M_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"ekwek/Soprano-80M\")\n model = AutoModelForCausalLM.from_pretrained(\"ekwek/Soprano-80M\")\n with open('ekwek_Soprano-80M_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ekwek_Soprano-80M_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ekwek_Soprano-80M_1.txt|ekwek_Soprano-80M_1.txt>',\n )\n\n with open('ekwek_Soprano-80M_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"ekwek/Soprano-80M\")\nmodel = AutoModelForCausalLM.from_pretrained(\"ekwek/Soprano-80M\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ekwek_Soprano-80M_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ekwek_Soprano-80M_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ekwek_Soprano-80M_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ekwek_Soprano-80M_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ekwek_Soprano-80M_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ekwek_Soprano-80M_1.txt"
] | 0.19
|
google/t5gemma-2-1b-1b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_t5gemma-2-1b-1b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-1b-1b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-1b-1b_0.txt|google_t5gemma-2-1b-1b_0.txt>',\n )\n\n with open('google_t5gemma-2-1b-1b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-1b-1b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-1b-1b_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-1b-1b\")\n with open('google_t5gemma-2-1b-1b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-1b-1b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-1b-1b_1.txt|google_t5gemma-2-1b-1b_1.txt>',\n )\n\n with open('google_t5gemma-2-1b-1b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/t5gemma-2-1b-1b\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-1b-1b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-1b-1b_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n \n processor = AutoProcessor.from_pretrained(\"google/t5gemma-2-1b-1b\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-1b-1b\")\n with open('google_t5gemma-2-1b-1b_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_t5gemma-2-1b-1b_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_t5gemma-2-1b-1b_2.txt|google_t5gemma-2-1b-1b_2.txt>',\n )\n\n with open('google_t5gemma-2-1b-1b_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSeq2SeqLM\n\nprocessor = AutoProcessor.from_pretrained(\"google/t5gemma-2-1b-1b\")\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"google/t5gemma-2-1b-1b\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_t5gemma-2-1b-1b_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_t5gemma-2-1b-1b_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-1b-1b_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-1b-1b_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_t5gemma-2-1b-1b_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-1b-1b_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-1b-1b_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_t5gemma-2-1b-1b_2.txt"
] | 5.12
|
Comfy-Org/z_image_turbo
|
[] |
[] |
[] | 0
|
zai-org/GLM-4.7-FP8
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7-FP8\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.7-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7-FP8_0.txt|zai-org_GLM-4.7-FP8_0.txt>',\n )\n\n with open('zai-org_GLM-4.7-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7-FP8\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7-FP8_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7-FP8\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7-FP8\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.7-FP8_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7-FP8_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7-FP8_1.txt|zai-org_GLM-4.7-FP8_1.txt>',\n )\n\n with open('zai-org_GLM-4.7-FP8_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7-FP8\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7-FP8\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7-FP8_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7-FP8_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7-FP8_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7-FP8_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7-FP8_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7-FP8_1.txt"
] | 1,735.97
|
Owen777/UltraFlux-v1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Owen777/UltraFlux-v1\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \" \"\n image = pipe(prompt).images[0]\n with open('Owen777_UltraFlux-v1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Owen777_UltraFlux-v1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Owen777_UltraFlux-v1_0.txt|Owen777_UltraFlux-v1_0.txt>',\n )\n\n with open('Owen777_UltraFlux-v1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Owen777/UltraFlux-v1\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \" \"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Owen777_UltraFlux-v1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Owen777_UltraFlux-v1_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Owen777_UltraFlux-v1_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Owen777_UltraFlux-v1_0.txt"
] | 0
|
FunAudioLLM/Fun-Audio-Chat-8B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForSeq2SeqLM\n model = AutoModelForSeq2SeqLM.from_pretrained(\"FunAudioLLM/Fun-Audio-Chat-8B\", dtype=\"auto\")\n with open('FunAudioLLM_Fun-Audio-Chat-8B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FunAudioLLM_Fun-Audio-Chat-8B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FunAudioLLM_Fun-Audio-Chat-8B_0.txt|FunAudioLLM_Fun-Audio-Chat-8B_0.txt>',\n )\n\n with open('FunAudioLLM_Fun-Audio-Chat-8B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForSeq2SeqLM\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"FunAudioLLM/Fun-Audio-Chat-8B\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FunAudioLLM_Fun-Audio-Chat-8B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FunAudioLLM_Fun-Audio-Chat-8B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/FunAudioLLM_Fun-Audio-Chat-8B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/FunAudioLLM_Fun-Audio-Chat-8B_0.txt"
] | 22.89
|
mistralai/Devstral-Small-2-24B-Instruct-2512
|
[] |
[] |
[] | 58.14
|
FunAudioLLM/Fun-ASR-Nano-2512
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('FunAudioLLM_Fun-ASR-Nano-2512_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FunAudioLLM_Fun-ASR-Nano-2512_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FunAudioLLM_Fun-ASR-Nano-2512_0.txt|FunAudioLLM_Fun-ASR-Nano-2512_0.txt>',\n )\n\n with open('FunAudioLLM_Fun-ASR-Nano-2512_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FunAudioLLM_Fun-ASR-Nano-2512_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FunAudioLLM_Fun-ASR-Nano-2512_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
Qwen/Qwen3-VL-8B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_0.txt|Qwen_Qwen3-VL-8B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_1.txt|Qwen_Qwen3-VL-8B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.txt"
] | 21.23
|
stepfun-ai/GELab-Zero-4B-preview
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"stepfun-ai/GELab-Zero-4B-preview\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('stepfun-ai_GELab-Zero-4B-preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_GELab-Zero-4B-preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_GELab-Zero-4B-preview_0.txt|stepfun-ai_GELab-Zero-4B-preview_0.txt>',\n )\n\n with open('stepfun-ai_GELab-Zero-4B-preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"stepfun-ai/GELab-Zero-4B-preview\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_GELab-Zero-4B-preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_GELab-Zero-4B-preview_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\n model = AutoModelForVision2Seq.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('stepfun-ai_GELab-Zero-4B-preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_GELab-Zero-4B-preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_GELab-Zero-4B-preview_1.txt|stepfun-ai_GELab-Zero-4B-preview_1.txt>',\n )\n\n with open('stepfun-ai_GELab-Zero-4B-preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_GELab-Zero-4B-preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_GELab-Zero-4B-preview_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_1.txt"
] | 10.75
|
google/gemma-scope-2
|
[] |
[] |
[] | 0
|
allenai/Molmo2-8B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForImageTextToText\n model = AutoModelForImageTextToText.from_pretrained(\"allenai/Molmo2-8B\", trust_remote_code=True, dtype=\"auto\")\n with open('allenai_Molmo2-8B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in allenai_Molmo2-8B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/allenai_Molmo2-8B_0.txt|allenai_Molmo2-8B_0.txt>',\n )\n\n with open('allenai_Molmo2-8B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForImageTextToText\nmodel = AutoModelForImageTextToText.from_pretrained(\"allenai/Molmo2-8B\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='allenai_Molmo2-8B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='allenai_Molmo2-8B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/allenai_Molmo2-8B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/allenai_Molmo2-8B_0.txt"
] | 41.95
|
Phr00t/Qwen-Image-Edit-Rapid-AIO
|
[] |
[] |
[] | 0
|
Qwen/Qwen-Image-Edit-2509
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2509_0.txt|Qwen_Qwen-Image-Edit-2509_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.txt"
] | 0
|
Comfy-Org/Qwen-Image-Edit_ComfyUI
|
[] |
[] |
[] | 0
|
zai-org/RealVideo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('zai-org_RealVideo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_RealVideo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_RealVideo_0.txt|zai-org_RealVideo_0.txt>',\n )\n\n with open('zai-org_RealVideo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_RealVideo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_RealVideo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
zai-org/AutoGLM-Phone-9B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"zai-org/AutoGLM-Phone-9B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('zai-org_AutoGLM-Phone-9B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_AutoGLM-Phone-9B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_AutoGLM-Phone-9B_0.txt|zai-org_AutoGLM-Phone-9B_0.txt>',\n )\n\n with open('zai-org_AutoGLM-Phone-9B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"zai-org/AutoGLM-Phone-9B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_AutoGLM-Phone-9B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_AutoGLM-Phone-9B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForMultimodalLM\n \n processor = AutoProcessor.from_pretrained(\"zai-org/AutoGLM-Phone-9B\")\n model = AutoModelForMultimodalLM.from_pretrained(\"zai-org/AutoGLM-Phone-9B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_AutoGLM-Phone-9B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_AutoGLM-Phone-9B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_AutoGLM-Phone-9B_1.txt|zai-org_AutoGLM-Phone-9B_1.txt>',\n )\n\n with open('zai-org_AutoGLM-Phone-9B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForMultimodalLM\n\nprocessor = AutoProcessor.from_pretrained(\"zai-org/AutoGLM-Phone-9B\")\nmodel = AutoModelForMultimodalLM.from_pretrained(\"zai-org/AutoGLM-Phone-9B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_AutoGLM-Phone-9B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_AutoGLM-Phone-9B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_AutoGLM-Phone-9B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_AutoGLM-Phone-9B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_AutoGLM-Phone-9B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_AutoGLM-Phone-9B_1.txt"
] | 0
|
zai-org/GLM-4.6V-Flash
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"zai-org/GLM-4.6V-Flash\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('zai-org_GLM-4.6V-Flash_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6V-Flash_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6V-Flash_0.txt|zai-org_GLM-4.6V-Flash_0.txt>',\n )\n\n with open('zai-org_GLM-4.6V-Flash_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"zai-org/GLM-4.6V-Flash\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6V-Flash_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6V-Flash_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForMultimodalLM\n \n processor = AutoProcessor.from_pretrained(\"zai-org/GLM-4.6V-Flash\")\n model = AutoModelForMultimodalLM.from_pretrained(\"zai-org/GLM-4.6V-Flash\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.6V-Flash_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6V-Flash_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6V-Flash_1.txt|zai-org_GLM-4.6V-Flash_1.txt>',\n )\n\n with open('zai-org_GLM-4.6V-Flash_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForMultimodalLM\n\nprocessor = AutoProcessor.from_pretrained(\"zai-org/GLM-4.6V-Flash\")\nmodel = AutoModelForMultimodalLM.from_pretrained(\"zai-org/GLM-4.6V-Flash\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6V-Flash_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6V-Flash_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6V-Flash_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6V-Flash_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6V-Flash_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6V-Flash_1.txt"
] | 24.92
|
facebook/sam-audio-small
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam-audio-small_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-audio-small_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-audio-small_0.txt|facebook_sam-audio-small_0.txt>',\n )\n\n with open('facebook_sam-audio-small_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-audio-small_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-audio-small_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('facebook_sam-audio-small_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-audio-small_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-audio-small_1.txt|facebook_sam-audio-small_1.txt>',\n )\n\n with open('facebook_sam-audio-small_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-audio-small_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-audio-small_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam-audio-small_0.py",
"DO NOT EXECUTE"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam-audio-small_0.txt",
"WAS NOT EXECUTED"
] | 0
|
Aratako/T5Gemma-TTS-2b-2b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('Aratako_T5Gemma-TTS-2b-2b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Aratako_T5Gemma-TTS-2b-2b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Aratako_T5Gemma-TTS-2b-2b_0.txt|Aratako_T5Gemma-TTS-2b-2b_0.txt>',\n )\n\n with open('Aratako_T5Gemma-TTS-2b-2b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Aratako_T5Gemma-TTS-2b-2b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Aratako_T5Gemma-TTS-2b-2b_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"Aratako/T5Gemma-TTS-2b-2b\", trust_remote_code=True)\n with open('Aratako_T5Gemma-TTS-2b-2b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Aratako_T5Gemma-TTS-2b-2b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Aratako_T5Gemma-TTS-2b-2b_1.txt|Aratako_T5Gemma-TTS-2b-2b_1.txt>',\n )\n\n with open('Aratako_T5Gemma-TTS-2b-2b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"Aratako/T5Gemma-TTS-2b-2b\", trust_remote_code=True)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Aratako_T5Gemma-TTS-2b-2b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Aratako_T5Gemma-TTS-2b-2b_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForSeq2SeqLM\n model = AutoModelForSeq2SeqLM.from_pretrained(\"Aratako/T5Gemma-TTS-2b-2b\", trust_remote_code=True, dtype=\"auto\")\n with open('Aratako_T5Gemma-TTS-2b-2b_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Aratako_T5Gemma-TTS-2b-2b_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Aratako_T5Gemma-TTS-2b-2b_2.txt|Aratako_T5Gemma-TTS-2b-2b_2.txt>',\n )\n\n with open('Aratako_T5Gemma-TTS-2b-2b_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForSeq2SeqLM\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"Aratako/T5Gemma-TTS-2b-2b\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Aratako_T5Gemma-TTS-2b-2b_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Aratako_T5Gemma-TTS-2b-2b_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Aratako_T5Gemma-TTS-2b-2b_2.txt"
] | 12.87
|
LatitudeGames/Hearthfire-24B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('LatitudeGames_Hearthfire-24B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LatitudeGames_Hearthfire-24B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LatitudeGames_Hearthfire-24B_0.txt|LatitudeGames_Hearthfire-24B_0.txt>',\n )\n\n with open('LatitudeGames_Hearthfire-24B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LatitudeGames_Hearthfire-24B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LatitudeGames_Hearthfire-24B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 57.08
|
Nurburgring/BEYOND_REALITY_Z_IMAGE
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt|Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt>',\n )\n\n with open('Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Nurburgring_BEYOND_REALITY_Z_IMAGE_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
Comfy-Org/Qwen-Image-Layered_ComfyUI
|
[] |
[] |
[] | 0
|
openai/gpt-oss-20b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-20b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_0.txt|openai_gpt-oss-20b_0.txt>',\n )\n\n with open('openai_gpt-oss-20b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-20b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_1.txt|openai_gpt-oss-20b_1.txt>',\n )\n\n with open('openai_gpt-oss-20b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_1.txt"
] | 52.09
|
Maincode/Maincoder-1B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Maincode/Maincoder-1B\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Maincode_Maincoder-1B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Maincode_Maincoder-1B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Maincode_Maincoder-1B_0.txt|Maincode_Maincoder-1B_0.txt>',\n )\n\n with open('Maincode_Maincoder-1B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Maincode/Maincoder-1B\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Maincode_Maincoder-1B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Maincode_Maincoder-1B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"Maincode/Maincoder-1B\", trust_remote_code=True, dtype=\"auto\")\n with open('Maincode_Maincoder-1B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Maincode_Maincoder-1B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Maincode_Maincoder-1B_1.txt|Maincode_Maincoder-1B_1.txt>',\n )\n\n with open('Maincode_Maincoder-1B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"Maincode/Maincoder-1B\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Maincode_Maincoder-1B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Maincode_Maincoder-1B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Maincode_Maincoder-1B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Maincode_Maincoder-1B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Maincode_Maincoder-1B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Maincode_Maincoder-1B_1.txt"
] | 2.48
|
Kijai/WanVideo_comfy
|
[] |
[] |
[] | 0
|
Kevin-thu/StoryMem
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Kevin-thu_StoryMem_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Kevin-thu_StoryMem_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Kevin-thu_StoryMem_0.txt|Kevin-thu_StoryMem_0.txt>',\n )\n\n with open('Kevin-thu_StoryMem_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Kevin-thu_StoryMem_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Kevin-thu_StoryMem_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
Qwen/Qwen-Image-Edit
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit_0.txt|Qwen_Qwen-Image-Edit_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit_0.txt"
] | 0
|
zai-org/GLM-ASR-Nano-2512
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"automatic-speech-recognition\", model=\"zai-org/GLM-ASR-Nano-2512\")\n with open('zai-org_GLM-ASR-Nano-2512_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-ASR-Nano-2512_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-ASR-Nano-2512_0.txt|zai-org_GLM-ASR-Nano-2512_0.txt>',\n )\n\n with open('zai-org_GLM-ASR-Nano-2512_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"zai-org/GLM-ASR-Nano-2512\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-ASR-Nano-2512_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-ASR-Nano-2512_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n \n processor = AutoProcessor.from_pretrained(\"zai-org/GLM-ASR-Nano-2512\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"zai-org/GLM-ASR-Nano-2512\")\n with open('zai-org_GLM-ASR-Nano-2512_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-ASR-Nano-2512_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-ASR-Nano-2512_1.txt|zai-org_GLM-ASR-Nano-2512_1.txt>',\n )\n\n with open('zai-org_GLM-ASR-Nano-2512_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSeq2SeqLM\n\nprocessor = AutoProcessor.from_pretrained(\"zai-org/GLM-ASR-Nano-2512\")\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"zai-org/GLM-ASR-Nano-2512\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-ASR-Nano-2512_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-ASR-Nano-2512_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-ASR-Nano-2512_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-ASR-Nano-2512_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-ASR-Nano-2512_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-ASR-Nano-2512_1.txt"
] | 5.47
|
Nanbeige/Nanbeige4-3B-Thinking-2511
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt|Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt>',\n )\n\n with open('Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\n model = AutoModelForCausalLM.from_pretrained(\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt|Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt>',\n )\n\n with open('Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Nanbeige/Nanbeige4-3B-Thinking-2511\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Nanbeige_Nanbeige4-3B-Thinking-2511_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Nanbeige_Nanbeige4-3B-Thinking-2511_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Nanbeige_Nanbeige4-3B-Thinking-2511_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Nanbeige_Nanbeige4-3B-Thinking-2511_1.txt"
] | 9.53
|
ResembleAI/chatterbox
|
[] |
[] |
[] | 0
|
xms991/Qwen-Image-Edit-2511-fp8-e4m3fn
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt|xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt>',\n )\n\n with open('xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='xms991_Qwen-Image-Edit-2511-fp8-e4m3fn_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
zai-org/GLM-TTS
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('zai-org_GLM-TTS_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-TTS_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-TTS_0.txt|zai-org_GLM-TTS_0.txt>',\n )\n\n with open('zai-org_GLM-TTS_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-TTS_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-TTS_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 75