id
stringlengths
11
45
scripts
listlengths
0
3
code_urls
listlengths
0
3
execution_urls
listlengths
0
3
estimated_vram
float64
0
4.97k
moonshotai/Kimi-K2-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Thinking_0.txt|moonshotai_Kimi-K2-Thinking_0.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Thinking_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True, dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Thinking_1.txt|moonshotai_Kimi-K2-Thinking_1.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Thinking_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Thinking_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Thinking_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Thinking_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Thinking_1.txt" ]
0
vafipas663/Qwen-Edit-2509-Upscale-LoRA
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"vafipas663/Qwen-Edit-2509-Upscale-LoRA\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt|vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt>',\n )\n\n with open('vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"vafipas663/Qwen-Edit-2509-Upscale-LoRA\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/vafipas663_Qwen-Edit-2509-Upscale-LoRA_0.txt" ]
0
dx8152/Qwen-Edit-2509-Multiple-angles
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"dx8152/Qwen-Edit-2509-Multiple-angles\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Qwen-Edit-2509-Multiple-angles_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Edit-2509-Multiple-angles_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Edit-2509-Multiple-angles_0.txt|dx8152_Qwen-Edit-2509-Multiple-angles_0.txt>',\n )\n\n with open('dx8152_Qwen-Edit-2509-Multiple-angles_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"dx8152/Qwen-Edit-2509-Multiple-angles\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Edit-2509-Multiple-angles_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Edit-2509-Multiple-angles_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Qwen-Edit-2509-Multiple-angles_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Qwen-Edit-2509-Multiple-angles_0.txt" ]
0
MiniMaxAI/MiniMax-M2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('MiniMaxAI_MiniMax-M2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2_0.txt|MiniMaxAI_MiniMax-M2_0.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2\", trust_remote_code=True, dtype=\"auto\")\n with open('MiniMaxAI_MiniMax-M2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2_1.txt|MiniMaxAI_MiniMax-M2_1.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2_1.txt" ]
1,107.58
datalab-to/chandra
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"datalab-to/chandra\")\n with open('datalab-to_chandra_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in datalab-to_chandra_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/datalab-to_chandra_0.txt|datalab-to_chandra_0.txt>',\n )\n\n with open('datalab-to_chandra_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"datalab-to/chandra\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='datalab-to_chandra_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='datalab-to_chandra_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"datalab-to/chandra\")\n model = AutoModelForVision2Seq.from_pretrained(\"datalab-to/chandra\")\n with open('datalab-to_chandra_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in datalab-to_chandra_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/datalab-to_chandra_1.txt|datalab-to_chandra_1.txt>',\n )\n\n with open('datalab-to_chandra_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"datalab-to/chandra\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"datalab-to/chandra\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='datalab-to_chandra_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='datalab-to_chandra_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/datalab-to_chandra_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/datalab-to_chandra_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/datalab-to_chandra_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/datalab-to_chandra_1.txt" ]
21.23
maya-research/maya1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"maya-research/maya1\")\n with open('maya-research_maya1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in maya-research_maya1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/maya-research_maya1_0.txt|maya-research_maya1_0.txt>',\n )\n\n with open('maya-research_maya1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"maya-research/maya1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='maya-research_maya1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='maya-research_maya1_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"maya-research/maya1\")\n model = AutoModelForCausalLM.from_pretrained(\"maya-research/maya1\")\n with open('maya-research_maya1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in maya-research_maya1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/maya-research_maya1_1.txt|maya-research_maya1_1.txt>',\n )\n\n with open('maya-research_maya1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"maya-research/maya1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"maya-research/maya1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='maya-research_maya1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='maya-research_maya1_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/maya-research_maya1_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/maya-research_maya1_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/maya-research_maya1_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/maya-research_maya1_1.txt" ]
7.99
baidu/ERNIE-4.5-VL-28B-A3B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"baidu/ERNIE-4.5-VL-28B-A3B-Thinking\", trust_remote_code=True)\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt|baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt>',\n )\n\n with open('baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"baidu/ERNIE-4.5-VL-28B-A3B-Thinking\", trust_remote_code=True)\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"baidu/ERNIE-4.5-VL-28B-A3B-Thinking\", trust_remote_code=True, dtype=\"auto\")\n with open('baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt|baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt>',\n )\n\n with open('baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"baidu/ERNIE-4.5-VL-28B-A3B-Thinking\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/baidu_ERNIE-4.5-VL-28B-A3B-Thinking_1.txt" ]
71.83
dx8152/Qwen-Image-Edit-2509-Light_restoration
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Light_restoration\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt|dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt>',\n )\n\n with open('dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Light_restoration\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Qwen-Image-Edit-2509-Light_restoration_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Qwen-Image-Edit-2509-Light_restoration_0.txt" ]
0
salakash/SamKash-Tolstoy
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from peft import PeftModel\n from transformers import AutoModelForCausalLM\n \n base_model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\")\n model = PeftModel.from_pretrained(base_model, \"salakash/SamKash-Tolstoy\")\n with open('salakash_SamKash-Tolstoy_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in salakash_SamKash-Tolstoy_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/salakash_SamKash-Tolstoy_0.txt|salakash_SamKash-Tolstoy_0.txt>',\n )\n\n with open('salakash_SamKash-Tolstoy_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom peft import PeftModel\nfrom transformers import AutoModelForCausalLM\n\nbase_model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\")\nmodel = PeftModel.from_pretrained(base_model, \"salakash/SamKash-Tolstoy\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='salakash_SamKash-Tolstoy_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='salakash_SamKash-Tolstoy_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/salakash_SamKash-Tolstoy_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/salakash_SamKash-Tolstoy_0.txt" ]
0
ByteDance/BindWeave
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('ByteDance_BindWeave_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance_BindWeave_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance_BindWeave_0.txt|ByteDance_BindWeave_0.txt>',\n )\n\n with open('ByteDance_BindWeave_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance_BindWeave_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance_BindWeave_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
deepseek-ai/DeepSeek-OCR
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True)\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-OCR_0.txt|deepseek-ai_DeepSeek-OCR_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True)\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True, dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-OCR_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-OCR_1.txt|deepseek-ai_DeepSeek-OCR_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-OCR_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-OCR_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-OCR_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-OCR_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-OCR_1.txt" ]
8.08
zai-org/GLM-4.6
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.6_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_0.txt|zai-org_GLM-4.6_0.txt>',\n )\n\n with open('zai-org_GLM-4.6_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.6_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_1.txt|zai-org_GLM-4.6_1.txt>',\n )\n\n with open('zai-org_GLM-4.6_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_1.txt" ]
863.94
tlennon-ie/qwen-edit-skin
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"tlennon-ie/qwen-edit-skin\")\n \n prompt = \"make the subjects skin details more prominent and natural\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('tlennon-ie_qwen-edit-skin_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tlennon-ie_qwen-edit-skin_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tlennon-ie_qwen-edit-skin_0.txt|tlennon-ie_qwen-edit-skin_0.txt>',\n )\n\n with open('tlennon-ie_qwen-edit-skin_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"tlennon-ie/qwen-edit-skin\")\n\nprompt = \"make the subjects skin details more prominent and natural\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tlennon-ie_qwen-edit-skin_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tlennon-ie_qwen-edit-skin_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/tlennon-ie_qwen-edit-skin_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/tlennon-ie_qwen-edit-skin_0.txt" ]
0
PleIAs/Baguettotron
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"PleIAs/Baguettotron\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('PleIAs_Baguettotron_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PleIAs_Baguettotron_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PleIAs_Baguettotron_0.txt|PleIAs_Baguettotron_0.txt>',\n )\n\n with open('PleIAs_Baguettotron_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"PleIAs/Baguettotron\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PleIAs_Baguettotron_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PleIAs_Baguettotron_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"PleIAs/Baguettotron\")\n model = AutoModelForCausalLM.from_pretrained(\"PleIAs/Baguettotron\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('PleIAs_Baguettotron_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PleIAs_Baguettotron_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PleIAs_Baguettotron_1.txt|PleIAs_Baguettotron_1.txt>',\n )\n\n with open('PleIAs_Baguettotron_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"PleIAs/Baguettotron\")\nmodel = AutoModelForCausalLM.from_pretrained(\"PleIAs/Baguettotron\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PleIAs_Baguettotron_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PleIAs_Baguettotron_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PleIAs_Baguettotron_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PleIAs_Baguettotron_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PleIAs_Baguettotron_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PleIAs_Baguettotron_1.txt" ]
0.78
moonshotai/Kimi-Linear-48B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-Linear-48B-A3B-Instruct\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt|moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt>',\n )\n\n with open('moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-Linear-48B-A3B-Instruct\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-Linear-48B-A3B-Instruct\", trust_remote_code=True, dtype=\"auto\")\n with open('moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt|moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt>',\n )\n\n with open('moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-Linear-48B-A3B-Instruct\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-Linear-48B-A3B-Instruct_1.txt" ]
118.95
Phr00t/Qwen-Image-Edit-Rapid-AIO
[]
[]
[]
0
aquif-ai/aquif-3.5-Max-42B-A3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Max-42B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('aquif-ai_aquif-3.5-Max-42B-A3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Max-42B-A3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Max-42B-A3B_0.txt|aquif-ai_aquif-3.5-Max-42B-A3B_0.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Max-42B-A3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Max-42B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Max-42B-A3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Max-42B-A3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Max-42B-A3B\")\n model = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Max-42B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('aquif-ai_aquif-3.5-Max-42B-A3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Max-42B-A3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Max-42B-A3B_1.txt|aquif-ai_aquif-3.5-Max-42B-A3B_1.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Max-42B-A3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Max-42B-A3B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Max-42B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Max-42B-A3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Max-42B-A3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Max-42B-A3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Max-42B-A3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Max-42B-A3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Max-42B-A3B_1.txt" ]
102.6
yonigozlan/EdgeTAM-hf
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"mask-generation\", model=\"yonigozlan/EdgeTAM-hf\")\n with open('yonigozlan_EdgeTAM-hf_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in yonigozlan_EdgeTAM-hf_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/yonigozlan_EdgeTAM-hf_0.txt|yonigozlan_EdgeTAM-hf_0.txt>',\n )\n\n with open('yonigozlan_EdgeTAM-hf_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"mask-generation\", model=\"yonigozlan/EdgeTAM-hf\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='yonigozlan_EdgeTAM-hf_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='yonigozlan_EdgeTAM-hf_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModel\n \n tokenizer = AutoTokenizer.from_pretrained(\"yonigozlan/EdgeTAM-hf\")\n model = AutoModel.from_pretrained(\"yonigozlan/EdgeTAM-hf\")\n with open('yonigozlan_EdgeTAM-hf_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in yonigozlan_EdgeTAM-hf_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/yonigozlan_EdgeTAM-hf_1.txt|yonigozlan_EdgeTAM-hf_1.txt>',\n )\n\n with open('yonigozlan_EdgeTAM-hf_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModel\n\ntokenizer = AutoTokenizer.from_pretrained(\"yonigozlan/EdgeTAM-hf\")\nmodel = AutoModel.from_pretrained(\"yonigozlan/EdgeTAM-hf\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='yonigozlan_EdgeTAM-hf_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='yonigozlan_EdgeTAM-hf_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/yonigozlan_EdgeTAM-hf_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/yonigozlan_EdgeTAM-hf_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/yonigozlan_EdgeTAM-hf_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/yonigozlan_EdgeTAM-hf_1.txt" ]
0.07
nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt|nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt>',\n )\n\n with open('nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt" ]
0
WeiboAI/VibeThinker-1.5B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"WeiboAI/VibeThinker-1.5B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('WeiboAI_VibeThinker-1.5B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in WeiboAI_VibeThinker-1.5B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/WeiboAI_VibeThinker-1.5B_0.txt|WeiboAI_VibeThinker-1.5B_0.txt>',\n )\n\n with open('WeiboAI_VibeThinker-1.5B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"WeiboAI/VibeThinker-1.5B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='WeiboAI_VibeThinker-1.5B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='WeiboAI_VibeThinker-1.5B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"WeiboAI/VibeThinker-1.5B\")\n model = AutoModelForCausalLM.from_pretrained(\"WeiboAI/VibeThinker-1.5B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('WeiboAI_VibeThinker-1.5B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in WeiboAI_VibeThinker-1.5B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/WeiboAI_VibeThinker-1.5B_1.txt|WeiboAI_VibeThinker-1.5B_1.txt>',\n )\n\n with open('WeiboAI_VibeThinker-1.5B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"WeiboAI/VibeThinker-1.5B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"WeiboAI/VibeThinker-1.5B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='WeiboAI_VibeThinker-1.5B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='WeiboAI_VibeThinker-1.5B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/WeiboAI_VibeThinker-1.5B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/WeiboAI_VibeThinker-1.5B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/WeiboAI_VibeThinker-1.5B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/WeiboAI_VibeThinker-1.5B_1.txt" ]
4.3
Qwen/Qwen-Image-Edit-2509
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2509_0.txt|Qwen_Qwen-Image-Edit-2509_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.txt" ]
0
stepfun-ai/Step-Audio-EditX
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"stepfun-ai/Step-Audio-EditX\", trust_remote_code=True)\n with open('stepfun-ai_Step-Audio-EditX_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step-Audio-EditX_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step-Audio-EditX_0.txt|stepfun-ai_Step-Audio-EditX_0.txt>',\n )\n\n with open('stepfun-ai_Step-Audio-EditX_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"stepfun-ai/Step-Audio-EditX\", trust_remote_code=True)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step-Audio-EditX_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step-Audio-EditX_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-EditX\", trust_remote_code=True, dtype=\"auto\")\n with open('stepfun-ai_Step-Audio-EditX_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step-Audio-EditX_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step-Audio-EditX_1.txt|stepfun-ai_Step-Audio-EditX_1.txt>',\n )\n\n with open('stepfun-ai_Step-Audio-EditX_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-EditX\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step-Audio-EditX_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step-Audio-EditX_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step-Audio-EditX_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step-Audio-EditX_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step-Audio-EditX_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step-Audio-EditX_1.txt" ]
8.55
moonshotai/Kimi-K2-Instruct-0905
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct-0905_0.txt|moonshotai_Kimi-K2-Instruct-0905_0.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True, dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct-0905_1.txt|moonshotai_Kimi-K2-Instruct-0905_1.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.txt" ]
4,971.07
jzhang533/PaddleOCR-VL-For-Manga
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Please refer to the document for information on how to use the model. \n # https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/module_usage/module_overview.html\n with open('jzhang533_PaddleOCR-VL-For-Manga_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in jzhang533_PaddleOCR-VL-For-Manga_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/jzhang533_PaddleOCR-VL-For-Manga_0.txt|jzhang533_PaddleOCR-VL-For-Manga_0.txt>',\n )\n\n with open('jzhang533_PaddleOCR-VL-For-Manga_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Please refer to the document for information on how to use the model. \n# https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/module_usage/module_overview.html\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='jzhang533_PaddleOCR-VL-For-Manga_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='jzhang533_PaddleOCR-VL-For-Manga_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/jzhang533_PaddleOCR-VL-For-Manga_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/jzhang533_PaddleOCR-VL-For-Manga_0.txt" ]
2.32
autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt|autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt>',\n )\n\n with open('autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/autoweeb_Qwen-Image-Edit-2509-Photo-to-Anime_0.txt" ]
0
nvidia/ChronoEdit-14B-Diffusers
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"nvidia/ChronoEdit-14B-Diffusers\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('nvidia_ChronoEdit-14B-Diffusers_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_ChronoEdit-14B-Diffusers_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_ChronoEdit-14B-Diffusers_0.txt|nvidia_ChronoEdit-14B-Diffusers_0.txt>',\n )\n\n with open('nvidia_ChronoEdit-14B-Diffusers_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"nvidia/ChronoEdit-14B-Diffusers\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_ChronoEdit-14B-Diffusers_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_ChronoEdit-14B-Diffusers_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_ChronoEdit-14B-Diffusers_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_ChronoEdit-14B-Diffusers_0.txt" ]
0
PaddlePaddle/PaddleOCR-VL
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation\n \n from paddleocr import PaddleOCRVL\n pipeline = PaddleOCRVL()\n output = pipeline.predict(\"path/to/document_image.png\")\n for res in output:\n \tres.print()\n \tres.save_to_json(save_path=\"output\")\n \tres.save_to_markdown(save_path=\"output\")\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PaddlePaddle_PaddleOCR-VL_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PaddlePaddle_PaddleOCR-VL_0.txt|PaddlePaddle_PaddleOCR-VL_0.txt>',\n )\n\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation\n\nfrom paddleocr import PaddleOCRVL\npipeline = PaddleOCRVL()\noutput = pipeline.predict(\"path/to/document_image.png\")\nfor res in output:\n\tres.print()\n\tres.save_to_json(save_path=\"output\")\n\tres.save_to_markdown(save_path=\"output\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.txt" ]
2.32
dx8152/Qwen-Image-Edit-2509-Fusion
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Fusion\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Qwen-Image-Edit-2509-Fusion_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Image-Edit-2509-Fusion_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Image-Edit-2509-Fusion_0.txt|dx8152_Qwen-Image-Edit-2509-Fusion_0.txt>',\n )\n\n with open('dx8152_Qwen-Image-Edit-2509-Fusion_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Fusion\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Image-Edit-2509-Fusion_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Image-Edit-2509-Fusion_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Qwen-Image-Edit-2509-Fusion_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Qwen-Image-Edit-2509-Fusion_0.txt" ]
0
briaai/FIBO
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('briaai_FIBO_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in briaai_FIBO_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/briaai_FIBO_0.txt|briaai_FIBO_0.txt>',\n )\n\n with open('briaai_FIBO_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='briaai_FIBO_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='briaai_FIBO_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"briaai/FIBO\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"A man holding a goose while screaming\"\n image = pipe(prompt).images[0]\n with open('briaai_FIBO_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in briaai_FIBO_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/briaai_FIBO_1.txt|briaai_FIBO_1.txt>',\n )\n\n with open('briaai_FIBO_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"briaai/FIBO\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"A man holding a goose while screaming\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='briaai_FIBO_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='briaai_FIBO_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/briaai_FIBO_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/briaai_FIBO_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/briaai_FIBO_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/briaai_FIBO_1.txt" ]
0
Phr00t/WAN2.2-14B-Rapid-AllInOne
[]
[]
[]
0
BAAI/Emu3.5
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"BAAI/Emu3.5\", dtype=\"auto\")\n with open('BAAI_Emu3.5_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in BAAI_Emu3.5_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/BAAI_Emu3.5_0.txt|BAAI_Emu3.5_0.txt>',\n )\n\n with open('BAAI_Emu3.5_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"BAAI/Emu3.5\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='BAAI_Emu3.5_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='BAAI_Emu3.5_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/BAAI_Emu3.5_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/BAAI_Emu3.5_0.txt" ]
82.58
Soul-AILab/SoulX-Podcast-1.7B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Soul-AILab_SoulX-Podcast-1.7B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Soul-AILab_SoulX-Podcast-1.7B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Soul-AILab_SoulX-Podcast-1.7B_0.txt|Soul-AILab_SoulX-Podcast-1.7B_0.txt>',\n )\n\n with open('Soul-AILab_SoulX-Podcast-1.7B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Soul-AILab_SoulX-Podcast-1.7B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Soul-AILab_SoulX-Podcast-1.7B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
4.99
JunhaoZhuang/FlashVSR-v1.1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('JunhaoZhuang_FlashVSR-v1.1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in JunhaoZhuang_FlashVSR-v1.1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/JunhaoZhuang_FlashVSR-v1.1_0.txt|JunhaoZhuang_FlashVSR-v1.1_0.txt>',\n )\n\n with open('JunhaoZhuang_FlashVSR-v1.1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='JunhaoZhuang_FlashVSR-v1.1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='JunhaoZhuang_FlashVSR-v1.1_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
openai/gpt-oss-20b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-20b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_0.txt|openai_gpt-oss-20b_0.txt>',\n )\n\n with open('openai_gpt-oss-20b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-20b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_1.txt|openai_gpt-oss-20b_1.txt>',\n )\n\n with open('openai_gpt-oss-20b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_1.txt" ]
52.09
meituan-longcat/LongCat-Video
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"meituan-longcat/LongCat-Video\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('meituan-longcat_LongCat-Video_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Video_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Video_0.txt|meituan-longcat_LongCat-Video_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Video_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"meituan-longcat/LongCat-Video\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Video_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Video_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Video_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Video_0.txt" ]
0
Alibaba-EI/SmartResume
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Alibaba-EI_SmartResume_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alibaba-EI_SmartResume_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alibaba-EI_SmartResume_0.txt|Alibaba-EI_SmartResume_0.txt>',\n )\n\n with open('Alibaba-EI_SmartResume_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alibaba-EI_SmartResume_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alibaba-EI_SmartResume_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
moonshotai/Kimi-K2-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct_0.txt|moonshotai_Kimi-K2-Instruct_0.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct\", trust_remote_code=True, dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct_1.txt|moonshotai_Kimi-K2-Instruct_1.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct_1.txt" ]
4,971.07
lovis93/next-scene-qwen-image-lora-2509
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"lovis93/next-scene-qwen-image-lora-2509\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('lovis93_next-scene-qwen-image-lora-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lovis93_next-scene-qwen-image-lora-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lovis93_next-scene-qwen-image-lora-2509_0.txt|lovis93_next-scene-qwen-image-lora-2509_0.txt>',\n )\n\n with open('lovis93_next-scene-qwen-image-lora-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"lovis93/next-scene-qwen-image-lora-2509\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lovis93_next-scene-qwen-image-lora-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lovis93_next-scene-qwen-image-lora-2509_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lovis93_next-scene-qwen-image-lora-2509_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lovis93_next-scene-qwen-image-lora-2509_0.txt" ]
0
FoundationVision/InfinityStar
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('FoundationVision_InfinityStar_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FoundationVision_InfinityStar_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FoundationVision_InfinityStar_0.txt|FoundationVision_InfinityStar_0.txt>',\n )\n\n with open('FoundationVision_InfinityStar_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FoundationVision_InfinityStar_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FoundationVision_InfinityStar_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
cerebras/Kimi-Linear-REAP-35B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"cerebras/Kimi-Linear-REAP-35B-A3B-Instruct\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt|cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt>',\n )\n\n with open('cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"cerebras/Kimi-Linear-REAP-35B-A3B-Instruct\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"cerebras/Kimi-Linear-REAP-35B-A3B-Instruct\", trust_remote_code=True, dtype=\"auto\")\n with open('cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt|cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt>',\n )\n\n with open('cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"cerebras/Kimi-Linear-REAP-35B-A3B-Instruct\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_Kimi-Linear-REAP-35B-A3B-Instruct_1.txt" ]
85.07
Wan-AI/Wan2.2-Animate-14B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Wan-AI/Wan2.2-Animate-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Wan-AI_Wan2.2-Animate-14B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Wan-AI_Wan2.2-Animate-14B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Wan-AI_Wan2.2-Animate-14B_0.txt|Wan-AI_Wan2.2-Animate-14B_0.txt>',\n )\n\n with open('Wan-AI_Wan2.2-Animate-14B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Wan-AI/Wan2.2-Animate-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Wan-AI_Wan2.2-Animate-14B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Wan-AI_Wan2.2-Animate-14B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Wan-AI_Wan2.2-Animate-14B_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Wan-AI_Wan2.2-Animate-14B_0.txt" ]
0
openai/gpt-oss-120b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-120b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-120b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-120b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-120b_0.txt|openai_gpt-oss-120b_0.txt>',\n )\n\n with open('openai_gpt-oss-120b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-120b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-120b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-120b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-120b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-120b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-120b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-120b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-120b_1.txt|openai_gpt-oss-120b_1.txt>',\n )\n\n with open('openai_gpt-oss-120b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-120b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-120b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-120b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-120b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-120b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-120b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-120b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-120b_1.txt" ]
291.57
Qwen/Qwen-Image-Edit
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit_0.txt|Qwen_Qwen-Image-Edit_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit_0.txt" ]
0
Qwen/Qwen3-VL-8B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_0.txt|Qwen_Qwen3-VL-8B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_1.txt|Qwen_Qwen3-VL-8B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.txt" ]
21.23
dx8152/Qwen-Image-Edit-2509-Relight
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Relight\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Qwen-Image-Edit-2509-Relight_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Image-Edit-2509-Relight_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Image-Edit-2509-Relight_0.txt|dx8152_Qwen-Image-Edit-2509-Relight_0.txt>',\n )\n\n with open('dx8152_Qwen-Image-Edit-2509-Relight_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"dx8152/Qwen-Image-Edit-2509-Relight\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Image-Edit-2509-Relight_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Image-Edit-2509-Relight_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Qwen-Image-Edit-2509-Relight_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Qwen-Image-Edit-2509-Relight_0.txt" ]
0
peteromallet/Qwen-Image-Edit-InScene
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"peteromallet/Qwen-Image-Edit-InScene\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('peteromallet_Qwen-Image-Edit-InScene_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in peteromallet_Qwen-Image-Edit-InScene_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/peteromallet_Qwen-Image-Edit-InScene_0.txt|peteromallet_Qwen-Image-Edit-InScene_0.txt>',\n )\n\n with open('peteromallet_Qwen-Image-Edit-InScene_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"peteromallet/Qwen-Image-Edit-InScene\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='peteromallet_Qwen-Image-Edit-InScene_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='peteromallet_Qwen-Image-Edit-InScene_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/peteromallet_Qwen-Image-Edit-InScene_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/peteromallet_Qwen-Image-Edit-InScene_0.txt" ]
0
amazon/chronos-2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import pandas as pd\n from chronos import BaseChronosPipeline\n \n pipeline = BaseChronosPipeline.from_pretrained(\"amazon/chronos-2\", device_map=\"cuda\")\n \n # Load historical data\n context_df = pd.read_csv(\"https://autogluon.s3.us-west-2.amazonaws.com/datasets/timeseries/misc/AirPassengers.csv\")\n \n # Generate predictions\n pred_df = pipeline.predict_df(\n context_df,\n prediction_length=36, # Number of steps to forecast\n quantile_levels=[0.1, 0.5, 0.9], # Quantiles for probabilistic forecast\n id_column=\"item_id\", # Column identifying different time series\n timestamp_column=\"Month\", # Column with datetime information\n target=\"#Passengers\", # Column(s) with time series values to predict\n )\n with open('amazon_chronos-2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in amazon_chronos-2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/amazon_chronos-2_0.txt|amazon_chronos-2_0.txt>',\n )\n\n with open('amazon_chronos-2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport pandas as pd\nfrom chronos import BaseChronosPipeline\n\npipeline = BaseChronosPipeline.from_pretrained(\"amazon/chronos-2\", device_map=\"cuda\")\n\n# Load historical data\ncontext_df = pd.read_csv(\"https://autogluon.s3.us-west-2.amazonaws.com/datasets/timeseries/misc/AirPassengers.csv\")\n\n# Generate predictions\npred_df = pipeline.predict_df(\n context_df,\n prediction_length=36, # Number of steps to forecast\n quantile_levels=[0.1, 0.5, 0.9], # Quantiles for probabilistic forecast\n id_column=\"item_id\", # Column identifying different time series\n timestamp_column=\"Month\", # Column with datetime information\n target=\"#Passengers\", # Column(s) with time series values to predict\n)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='amazon_chronos-2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='amazon_chronos-2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/amazon_chronos-2_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/amazon_chronos-2_0.txt" ]
0.58
Qwen/Qwen-Image
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Qwen_Qwen-Image_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image_0.txt|Qwen_Qwen-Image_0.txt>',\n )\n\n with open('Qwen_Qwen-Image_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image_0.txt" ]
0
Prior-Labs/tabpfn_2_5
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('Prior-Labs_tabpfn_2_5_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Prior-Labs_tabpfn_2_5_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Prior-Labs_tabpfn_2_5_0.txt|Prior-Labs_tabpfn_2_5_0.txt>',\n )\n\n with open('Prior-Labs_tabpfn_2_5_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Prior-Labs_tabpfn_2_5_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Prior-Labs_tabpfn_2_5_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Prior-Labs_tabpfn_2_5_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Prior-Labs_tabpfn_2_5_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Prior-Labs_tabpfn_2_5_1.txt|Prior-Labs_tabpfn_2_5_1.txt>',\n )\n\n with open('Prior-Labs_tabpfn_2_5_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Prior-Labs_tabpfn_2_5_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Prior-Labs_tabpfn_2_5_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Prior-Labs_tabpfn_2_5_0.py", "DO NOT EXECUTE" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Prior-Labs_tabpfn_2_5_0.txt", "WAS NOT EXECUTED" ]
0
openai/whisper-large-v3
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_0.txt|openai_whisper-large-v3_0.txt>',\n )\n\n with open('openai_whisper-large-v3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n \n processor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\n model = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_1.txt|openai_whisper-large-v3_1.txt>',\n )\n\n with open('openai_whisper-large-v3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\nmodel = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_1.txt" ]
7.47
YaoJiefu/multiple-characters
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('YaoJiefu_multiple-characters_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in YaoJiefu_multiple-characters_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/YaoJiefu_multiple-characters_0.txt|YaoJiefu_multiple-characters_0.txt>',\n )\n\n with open('YaoJiefu_multiple-characters_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='YaoJiefu_multiple-characters_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='YaoJiefu_multiple-characters_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
sentence-transformers/all-MiniLM-L6-v2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in sentence-transformers_all-MiniLM-L6-v2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/sentence-transformers_all-MiniLM-L6-v2_0.txt|sentence-transformers_all-MiniLM-L6-v2_0.txt>',\n )\n\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.txt" ]
0.11
lightx2v/Qwen-Image-Lightning
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"lightx2v/Qwen-Image-Lightning\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('lightx2v_Qwen-Image-Lightning_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Qwen-Image-Lightning_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Qwen-Image-Lightning_0.txt|lightx2v_Qwen-Image-Lightning_0.txt>',\n )\n\n with open('lightx2v_Qwen-Image-Lightning_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"lightx2v/Qwen-Image-Lightning\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Qwen-Image-Lightning_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Qwen-Image-Lightning_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightx2v_Qwen-Image-Lightning_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightx2v_Qwen-Image-Lightning_0.txt" ]
0
manifestai/Brumby-14B-Base
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('manifestai_Brumby-14B-Base_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in manifestai_Brumby-14B-Base_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/manifestai_Brumby-14B-Base_0.txt|manifestai_Brumby-14B-Base_0.txt>',\n )\n\n with open('manifestai_Brumby-14B-Base_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='manifestai_Brumby-14B-Base_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='manifestai_Brumby-14B-Base_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
71.53
lightx2v/Wan2.2-Lightning
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('lightx2v_Wan2.2-Lightning_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Wan2.2-Lightning_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Wan2.2-Lightning_0.txt|lightx2v_Wan2.2-Lightning_0.txt>',\n )\n\n with open('lightx2v_Wan2.2-Lightning_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Wan2.2-Lightning_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Wan2.2-Lightning_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
ByteDance/Ouro-2.6B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"ByteDance/Ouro-2.6B-Thinking\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('ByteDance_Ouro-2.6B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance_Ouro-2.6B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance_Ouro-2.6B-Thinking_0.txt|ByteDance_Ouro-2.6B-Thinking_0.txt>',\n )\n\n with open('ByteDance_Ouro-2.6B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"ByteDance/Ouro-2.6B-Thinking\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance_Ouro-2.6B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance_Ouro-2.6B-Thinking_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"ByteDance/Ouro-2.6B-Thinking\", trust_remote_code=True, dtype=\"auto\")\n with open('ByteDance_Ouro-2.6B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance_Ouro-2.6B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance_Ouro-2.6B-Thinking_1.txt|ByteDance_Ouro-2.6B-Thinking_1.txt>',\n )\n\n with open('ByteDance_Ouro-2.6B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"ByteDance/Ouro-2.6B-Thinking\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance_Ouro-2.6B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance_Ouro-2.6B-Thinking_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ByteDance_Ouro-2.6B-Thinking_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ByteDance_Ouro-2.6B-Thinking_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ByteDance_Ouro-2.6B-Thinking_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ByteDance_Ouro-2.6B-Thinking_1.txt" ]
0
aquif-ai/aquif-3.5-Plus-30B-A3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt|aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\n model = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt|aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Plus-30B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Plus-30B-A3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Plus-30B-A3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Plus-30B-A3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt" ]
73.93
IndexTeam/IndexTTS-2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('IndexTeam_IndexTTS-2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in IndexTeam_IndexTTS-2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/IndexTeam_IndexTTS-2_0.txt|IndexTeam_IndexTTS-2_0.txt>',\n )\n\n with open('IndexTeam_IndexTTS-2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='IndexTeam_IndexTTS-2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='IndexTeam_IndexTTS-2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
black-forest-labs/FLUX.1-Kontext-dev
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.1-Kontext-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-Kontext-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-Kontext-dev_0.txt|black-forest-labs_FLUX.1-Kontext-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-Kontext-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-Kontext-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-Kontext-dev_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-Kontext-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.1-Kontext-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-Kontext-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-Kontext-dev_1.txt|black-forest-labs_FLUX.1-Kontext-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-Kontext-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-Kontext-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-Kontext-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-Kontext-dev_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-Kontext-dev_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-Kontext-dev_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-Kontext-dev_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-Kontext-dev_1.txt" ]
0
Kijai/WanVideo_comfy
[]
[]
[]
0
stabilityai/stable-diffusion-xl-base-1.0
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stabilityai_stable-diffusion-xl-base-1.0_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt|stabilityai_stable-diffusion-xl-base-1.0_0.txt>',\n )\n\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt" ]
0
meta-llama/Llama-3.1-8B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_0.txt|meta-llama_Llama-3.1-8B-Instruct_0.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_1.txt|meta-llama_Llama-3.1-8B-Instruct_1.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n model = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_2.txt|meta-llama_Llama-3.1-8B-Instruct_2.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmodel = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.txt" ]
19.44
google/gemma-3-1b-it
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_gemma-3-1b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-1b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-1b-it_0.txt|google_gemma-3-1b-it_0.txt>',\n )\n\n with open('google_gemma-3-1b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-1b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-1b-it_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"google/gemma-3-1b-it\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('google_gemma-3-1b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-1b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-1b-it_1.txt|google_gemma-3-1b-it_1.txt>',\n )\n\n with open('google_gemma-3-1b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"google/gemma-3-1b-it\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-1b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-1b-it_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"google/gemma-3-1b-it\")\n model = AutoModelForCausalLM.from_pretrained(\"google/gemma-3-1b-it\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_gemma-3-1b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-1b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-1b-it_2.txt|google_gemma-3-1b-it_2.txt>',\n )\n\n with open('google_gemma-3-1b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-3-1b-it\")\nmodel = AutoModelForCausalLM.from_pretrained(\"google/gemma-3-1b-it\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-1b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-1b-it_2.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-1b-it_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-1b-it_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-1b-it_2.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-1b-it_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-1b-it_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-1b-it_2.txt" ]
2.42
Wan-AI/Wan2.2-I2V-A14B
[]
[]
[]
0
Qwen/Qwen3-0.6B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-0.6B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_0.txt|Qwen_Qwen3-0.6B_0.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-0.6B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_1.txt|Qwen_Qwen3-0.6B_1.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_1.txt" ]
1.82
Qwen/Qwen3-4B-Instruct-2507
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-4B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-4B-Instruct-2507_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_0.txt|Qwen_Qwen3-4B-Instruct-2507_0.txt>',\n )\n\n with open('Qwen_Qwen3-4B-Instruct-2507_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-4B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-4B-Instruct-2507_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-4B-Instruct-2507_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_1.txt|Qwen_Qwen3-4B-Instruct-2507_1.txt>',\n )\n\n with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-4B-Instruct-2507_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-4B-Instruct-2507_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_1.txt" ]
9.74
Qwen/Qwen3-Embedding-0.6B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"Qwen/Qwen3-Embedding-0.6B\")\n \n sentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [3, 3]\n with open('Qwen_Qwen3-Embedding-0.6B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-Embedding-0.6B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-Embedding-0.6B_0.txt|Qwen_Qwen3-Embedding-0.6B_0.txt>',\n )\n\n with open('Qwen_Qwen3-Embedding-0.6B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"Qwen/Qwen3-Embedding-0.6B\")\n\nsentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-Embedding-0.6B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-Embedding-0.6B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-Embedding-0.6B_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-Embedding-0.6B_0.txt" ]
1.44
Alissonerdx/BFS-Best-Face-Swap
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"Alissonerdx/BFS-Best-Face-Swap\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Alissonerdx_BFS-Best-Face-Swap_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alissonerdx_BFS-Best-Face-Swap_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alissonerdx_BFS-Best-Face-Swap_0.txt|Alissonerdx_BFS-Best-Face-Swap_0.txt>',\n )\n\n with open('Alissonerdx_BFS-Best-Face-Swap_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"Alissonerdx/BFS-Best-Face-Swap\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alissonerdx_BFS-Best-Face-Swap_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alissonerdx_BFS-Best-Face-Swap_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Alissonerdx_BFS-Best-Face-Swap_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Alissonerdx_BFS-Best-Face-Swap_0.txt" ]
0
yanolja/YanoljaNEXT-Rosetta-27B-2511
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"translation\", model=\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\n with open('yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt|yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt>',\n )\n\n with open('yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\n model = AutoModelForCausalLM.from_pretrained(\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\n with open('yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt|yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt>',\n )\n\n with open('yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\nmodel = AutoModelForCausalLM.from_pretrained(\"yanolja/YanoljaNEXT-Rosetta-27B-2511\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/yanolja_YanoljaNEXT-Rosetta-27B-2511_1.txt" ]
65.4
Qwen/Qwen3-Coder-30B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt|Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt|Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-Coder-30B-A3B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-Coder-30B-A3B-Instruct_1.txt" ]
73.93
PleIAs/Monad
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"PleIAs/Monad\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('PleIAs_Monad_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PleIAs_Monad_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PleIAs_Monad_0.txt|PleIAs_Monad_0.txt>',\n )\n\n with open('PleIAs_Monad_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"PleIAs/Monad\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PleIAs_Monad_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PleIAs_Monad_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"PleIAs/Monad\")\n model = AutoModelForCausalLM.from_pretrained(\"PleIAs/Monad\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('PleIAs_Monad_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PleIAs_Monad_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PleIAs_Monad_1.txt|PleIAs_Monad_1.txt>',\n )\n\n with open('PleIAs_Monad_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"PleIAs/Monad\")\nmodel = AutoModelForCausalLM.from_pretrained(\"PleIAs/Monad\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PleIAs_Monad_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PleIAs_Monad_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PleIAs_Monad_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PleIAs_Monad_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PleIAs_Monad_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PleIAs_Monad_1.txt" ]
0.14
Qwen/Qwen3-8B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-8B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-8B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-8B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-8B_0.txt|Qwen_Qwen3-8B_0.txt>',\n )\n\n with open('Qwen_Qwen3-8B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-8B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-8B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-8B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-8B\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-8B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-8B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-8B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-8B_1.txt|Qwen_Qwen3-8B_1.txt>',\n )\n\n with open('Qwen_Qwen3-8B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-8B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-8B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-8B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-8B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-8B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-8B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-8B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-8B_1.txt" ]
19.83
Kijai/WanVideo_comfy_fp8_scaled
[]
[]
[]
0
ibm-granite/granite-4.0-h-1b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"ibm-granite/granite-4.0-h-1b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('ibm-granite_granite-4.0-h-1b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ibm-granite_granite-4.0-h-1b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ibm-granite_granite-4.0-h-1b_0.txt|ibm-granite_granite-4.0-h-1b_0.txt>',\n )\n\n with open('ibm-granite_granite-4.0-h-1b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"ibm-granite/granite-4.0-h-1b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ibm-granite_granite-4.0-h-1b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ibm-granite_granite-4.0-h-1b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"ibm-granite/granite-4.0-h-1b\")\n model = AutoModelForCausalLM.from_pretrained(\"ibm-granite/granite-4.0-h-1b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('ibm-granite_granite-4.0-h-1b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ibm-granite_granite-4.0-h-1b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ibm-granite_granite-4.0-h-1b_1.txt|ibm-granite_granite-4.0-h-1b_1.txt>',\n )\n\n with open('ibm-granite_granite-4.0-h-1b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"ibm-granite/granite-4.0-h-1b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"ibm-granite/granite-4.0-h-1b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ibm-granite_granite-4.0-h-1b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ibm-granite_granite-4.0-h-1b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ibm-granite_granite-4.0-h-1b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ibm-granite_granite-4.0-h-1b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ibm-granite_granite-4.0-h-1b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ibm-granite_granite-4.0-h-1b_1.txt" ]
3.54
dx8152/Qwen-Image-Edit-2509-White_to_Scene
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"dx8152/Qwen-Image-Edit-2509-White_to_Scene\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt|dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt>',\n )\n\n with open('dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"dx8152/Qwen-Image-Edit-2509-White_to_Scene\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Qwen-Image-Edit-2509-White_to_Scene_0.txt" ]
0
Qwen/Qwen3-Omni-30B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForTextToWaveform\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-Omni-30B-A3B-Instruct\")\n model = AutoModelForTextToWaveform.from_pretrained(\"Qwen/Qwen3-Omni-30B-A3B-Instruct\")\n with open('Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt|Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForTextToWaveform\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-Omni-30B-A3B-Instruct\")\nmodel = AutoModelForTextToWaveform.from_pretrained(\"Qwen/Qwen3-Omni-30B-A3B-Instruct\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-Omni-30B-A3B-Instruct_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-Omni-30B-A3B-Instruct_0.txt" ]
85.38
coqui/XTTS-v2
[]
[]
[]
0
google/gemma-3n-E2B-it-litert-lm
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_gemma-3n-E2B-it-litert-lm_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3n-E2B-it-litert-lm_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3n-E2B-it-litert-lm_0.txt|google_gemma-3n-E2B-it-litert-lm_0.txt>',\n )\n\n with open('google_gemma-3n-E2B-it-litert-lm_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3n-E2B-it-litert-lm_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3n-E2B-it-litert-lm_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3n-E2B-it-litert-lm_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3n-E2B-it-litert-lm_0.txt" ]
0
nvidia/omnivinci
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"feature-extraction\", model=\"nvidia/omnivinci\", trust_remote_code=True)\n with open('nvidia_omnivinci_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_omnivinci_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_omnivinci_0.txt|nvidia_omnivinci_0.txt>',\n )\n\n with open('nvidia_omnivinci_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"feature-extraction\", model=\"nvidia/omnivinci\", trust_remote_code=True)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_omnivinci_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"nvidia/omnivinci\", trust_remote_code=True, dtype=\"auto\")\n with open('nvidia_omnivinci_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_omnivinci_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_omnivinci_1.txt|nvidia_omnivinci_1.txt>',\n )\n\n with open('nvidia_omnivinci_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"nvidia/omnivinci\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_omnivinci_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_omnivinci_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_omnivinci_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_omnivinci_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_omnivinci_1.txt" ]
0
Qwen/Qwen3-VL-30B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt|Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
75.24
Qwen/Qwen3-30B-A3B-Instruct-2507
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt|Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt>',\n )\n\n with open('Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt|Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt>',\n )\n\n with open('Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-30B-A3B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-30B-A3B-Instruct-2507_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-30B-A3B-Instruct-2507_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-30B-A3B-Instruct-2507_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-30B-A3B-Instruct-2507_1.txt" ]
73.93
deepseek-ai/DeepSeek-V3.2-Exp
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt|deepseek-ai_DeepSeek-V3.2-Exp_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt|deepseek-ai_DeepSeek-V3.2-Exp_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt" ]
1,659.65
meituan-longcat/LongCat-Flash-Omni
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"meituan-longcat/LongCat-Flash-Omni\", trust_remote_code=True, dtype=\"auto\")\n with open('meituan-longcat_LongCat-Flash-Omni_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Flash-Omni_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Flash-Omni_0.txt|meituan-longcat_LongCat-Flash-Omni_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Flash-Omni_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"meituan-longcat/LongCat-Flash-Omni\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Flash-Omni_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Flash-Omni_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Flash-Omni_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Flash-Omni_0.txt" ]
1,357.62
spacepxl/Wan2.1-VAE-upscale2x
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"spacepxl/Wan2.1-VAE-upscale2x\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('spacepxl_Wan2.1-VAE-upscale2x_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in spacepxl_Wan2.1-VAE-upscale2x_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/spacepxl_Wan2.1-VAE-upscale2x_0.txt|spacepxl_Wan2.1-VAE-upscale2x_0.txt>',\n )\n\n with open('spacepxl_Wan2.1-VAE-upscale2x_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"spacepxl/Wan2.1-VAE-upscale2x\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='spacepxl_Wan2.1-VAE-upscale2x_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='spacepxl_Wan2.1-VAE-upscale2x_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/spacepxl_Wan2.1-VAE-upscale2x_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/spacepxl_Wan2.1-VAE-upscale2x_0.txt" ]
0
black-forest-labs/FLUX.1-dev
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_0.txt|black-forest-labs_FLUX.1-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_1.txt|black-forest-labs_FLUX.1-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_1.txt" ]
0
deepseek-ai/DeepSeek-R1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-R1_0.txt|deepseek-ai_DeepSeek-R1_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-R1_1.txt|deepseek-ai_DeepSeek-R1_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-R1_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-R1_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_1.txt" ]
1,657.55
BAAI/bge-m3
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"BAAI/bge-m3\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('BAAI_bge-m3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in BAAI_bge-m3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/BAAI_bge-m3_0.txt|BAAI_bge-m3_0.txt>',\n )\n\n with open('BAAI_bge-m3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"BAAI/bge-m3\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='BAAI_bge-m3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='BAAI_bge-m3_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/BAAI_bge-m3_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/BAAI_bge-m3_0.txt" ]
0