edit generation settings
Browse files- index.html +4 -3
index.html
CHANGED
|
@@ -190,9 +190,10 @@
|
|
| 190 |
// Generate
|
| 191 |
// Note: The model is a causal LM, we use the chat template to format input
|
| 192 |
const output = await generator(messages, {
|
| 193 |
-
max_new_tokens:
|
| 194 |
-
temperature: 0.
|
| 195 |
-
|
|
|
|
| 196 |
do_sample: true,
|
| 197 |
});
|
| 198 |
|
|
|
|
| 190 |
// Generate
|
| 191 |
// Note: The model is a causal LM, we use the chat template to format input
|
| 192 |
const output = await generator(messages, {
|
| 193 |
+
max_new_tokens: 2048,
|
| 194 |
+
temperature: 0.5,
|
| 195 |
+
top_p: 1.0,
|
| 196 |
+
min_p: 0.1,
|
| 197 |
do_sample: true,
|
| 198 |
});
|
| 199 |
|