Spaces:
Running
on
Zero
Running
on
Zero
qinghuazhou
commited on
Commit
·
4587a76
1
Parent(s):
01fc33a
updated demo
Browse files- util/utils.py +2 -2
util/utils.py
CHANGED
|
@@ -35,7 +35,7 @@ def load_tok(model_name="gpt2-xl"):
|
|
| 35 |
|
| 36 |
elif model_name == 'llama-3-8b':
|
| 37 |
|
| 38 |
-
model = "
|
| 39 |
tok = AutoTokenizer.from_pretrained(model)
|
| 40 |
tok.pad_token = tok.eos_token
|
| 41 |
|
|
@@ -75,7 +75,7 @@ def load_model_tok(model_name="gpt2-xl"):
|
|
| 75 |
|
| 76 |
elif model_name == 'llama-3-8b':
|
| 77 |
|
| 78 |
-
model = "
|
| 79 |
tok = AutoTokenizer.from_pretrained(model)
|
| 80 |
model = AutoModelForCausalLM.from_pretrained(
|
| 81 |
model,
|
|
|
|
| 35 |
|
| 36 |
elif model_name == 'llama-3-8b':
|
| 37 |
|
| 38 |
+
model = "meta-llama/Meta-Llama-3-8B"
|
| 39 |
tok = AutoTokenizer.from_pretrained(model)
|
| 40 |
tok.pad_token = tok.eos_token
|
| 41 |
|
|
|
|
| 75 |
|
| 76 |
elif model_name == 'llama-3-8b':
|
| 77 |
|
| 78 |
+
model = "meta-llama/Meta-Llama-3-8B"
|
| 79 |
tok = AutoTokenizer.from_pretrained(model)
|
| 80 |
model = AutoModelForCausalLM.from_pretrained(
|
| 81 |
model,
|