run colab t4 but
pip install transformers>=5.0.0rc0
from transformers import AutoProcessor, Glm4vMoeForConditionalGeneration
import torch
MODEL_PATH = "zai-org/GLM-4.6V-Flash"
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
},
{
"type": "text",
"text": "describe this image"
}
],
}
]
processor = AutoProcessor.from_pretrained(MODEL_PATH)
model = Glm4vMoeForConditionalGeneration.from_pretrained(
pretrained_model_name_or_path=MODEL_PATH,
torch_dtype="auto",
device_map="auto",
)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
inputs.pop("token_type_ids", None)
generated_ids = model.generate(**inputs, max_new_tokens=8192)
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
print(output_text)
He worked on Colab T4 but didn't complete it because he needed more than
12 ram
16 vram
colab t4
pip install transformers>=5.0.0rc0
pip install transformers>=5.0.0rc0 XXXXXXX
pip install transformers==5.0.0rc0
Same problem with kagle 2*t4 What's the problem?
why??????????????!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from transformers import AutoProcessor, GLM4vMoeForConditionalGeneration
import torch
from transformers.image_utils import load_image
from IPython.display import display, Image as IPImage
MODEL_PATH = "zai-org/GLM-4.6V-Flash"
processor = AutoProcessor.from_pretrained(MODEL_PATH)
model = GLM4vMoeForConditionalGeneration.from_pretrained(
pretrained_model_name_or_path=MODEL_PATH,
torch_dtype="auto",
device_map="auto",
)
from IPython.display import display, Image as IPImage
image_path = "/home/Ubuntu/images/lane.png"
image = load_image(image_path)
prompt = """
Which lane is open?
"""
messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt}],}]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
inputs.pop("token_type_ids", None)
generated_ids = model.generate(**inputs, max_new_tokens=8192)
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
print(output_text)