burtenshaw HF Staff commited on
Commit
b9dd845
·
verified ·
1 Parent(s): 24bbf3b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -116,7 +116,7 @@ import torch
116
  from transformers import AutoModelForCausalLM, AutoTokenizer
117
 
118
 
119
- model_id="nanochat-students/chat-d20"
120
  max_new_tokens=64
121
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
122
 
@@ -137,11 +137,13 @@ inputs = tokenizer.apply_chat_template(
137
 
138
  with torch.no_grad():
139
  outputs = model.generate(
140
- inputs,
141
  max_new_tokens=max_new_tokens,
142
  )
143
 
144
- print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
145
  ```
146
 
147
 
 
116
  from transformers import AutoModelForCausalLM, AutoTokenizer
117
 
118
 
119
+ model_id="nanochat-students/d20-chat-transformers"
120
  max_new_tokens=64
121
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
122
 
 
137
 
138
  with torch.no_grad():
139
  outputs = model.generate(
140
+ **inputs,
141
  max_new_tokens=max_new_tokens,
142
  )
143
 
144
+ # Decode only the generated tokens (excluding the input prompt)
145
+ generated_tokens = outputs[0, inputs.input_ids.shape[1]:]
146
+ print(tokenizer.decode(generated_tokens, skip_special_tokens=True))
147
  ```
148
 
149