Fix pipeline_tag πŸ€—

#2
by merve HF Staff - opened
Files changed (1) hide show
  1. README.md +19 -13
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  license: apache-2.0
3
- pipeline_tag: image-text-to-text
4
  library_name: transformers
5
  ---
6
 
@@ -43,27 +43,33 @@ The following provides demo code illustrating how to generate text using JanusCo
43
  > Please use transformers >= 4.55.0 to ensure the model works normally.
44
 
45
  ```python
46
- from transformers import AutoTokenizer, AutoModelForCausalLM
47
  import torch
 
48
 
49
  model_name = "internlm/JanusCoder-14B"
 
50
  tokenizer = AutoTokenizer.from_pretrained(model_name)
51
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
 
 
52
 
53
  messages = [
54
- {
55
- "role": "user",
56
- "content": [
57
- {"type": "text", "text": "Create a line plot that illustrates function y=x."},
58
- ],
59
- }
60
  ]
61
 
62
- inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
63
 
64
- generate_ids = model.generate(**inputs, max_new_tokens=32768)
65
- decoded_output = processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
66
- print(decoded_output)
67
  ```
68
 
69
  ## Citation
 
1
  ---
2
  license: apache-2.0
3
+ pipeline_tag: text-generation
4
  library_name: transformers
5
  ---
6
 
 
43
  > Please use transformers >= 4.55.0 to ensure the model works normally.
44
 
45
  ```python
 
46
  import torch
47
+ from transformers import AutoTokenizer, AutoModelForCausalLM
48
 
49
  model_name = "internlm/JanusCoder-14B"
50
+
51
  tokenizer = AutoTokenizer.from_pretrained(model_name)
52
+ model = AutoModelForCausalLM.from_pretrained(
53
+ model_name, device_map="auto", dtype="auto",
54
+ ).eval()
55
 
56
  messages = [
57
+ {"role": "user", "content": "Create a line plot that illustrates function y=x."}
 
 
 
 
 
58
  ]
59
 
60
+ inputs = tokenizer.apply_chat_template(
61
+ messages,
62
+ add_generation_prompt=True,
63
+ tokenize=True,
64
+ return_dict=True,
65
+ return_tensors="pt"
66
+ ).to(model.device)
67
+
68
+ with torch.inference_mode():
69
+ generate_ids = model.generate(**inputs, max_new_tokens=200)
70
+ decoded_output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True)
71
 
72
+ print(decoded_output[0])
 
 
73
  ```
74
 
75
  ## Citation