Update README.md
Browse files
README.md
CHANGED
|
@@ -138,11 +138,11 @@ Here is example code using HuggingFace Transformers to inference the model (note
|
|
| 138 |
|
| 139 |
import torch
|
| 140 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 141 |
-
from transformers import LlamaTokenizer,
|
| 142 |
import bitsandbytes, flash_attn
|
| 143 |
|
| 144 |
tokenizer = LlamaTokenizer.from_pretrained('NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', trust_remote_code=True)
|
| 145 |
-
model =
|
| 146 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 147 |
torch_dtype=torch.float16,
|
| 148 |
device_map="auto",
|
|
|
|
| 138 |
|
| 139 |
import torch
|
| 140 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 141 |
+
from transformers import LlamaTokenizer, MixtralForCausalLM
|
| 142 |
import bitsandbytes, flash_attn
|
| 143 |
|
| 144 |
tokenizer = LlamaTokenizer.from_pretrained('NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', trust_remote_code=True)
|
| 145 |
+
model = MixtralForCausalLM.from_pretrained(
|
| 146 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 147 |
torch_dtype=torch.float16,
|
| 148 |
device_map="auto",
|