Update README.md
Browse files
README.md
CHANGED
|
@@ -259,10 +259,10 @@ All training artifacts are publicly available:
|
|
| 259 |
|
| 260 |
|
| 261 |
## Usage Examples
|
| 262 |
-
|
| 263 |
-
### Quantization
|
| 264 |
<details>
|
| 265 |
-
<summary>Click to expand <strong>
|
|
|
|
|
|
|
| 266 |
import torch
|
| 267 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
| 268 |
|
|
@@ -293,6 +293,8 @@ with torch.no_grad():
|
|
| 293 |
|
| 294 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 295 |
print(f"Generated text: {generated_text}")
|
|
|
|
|
|
|
| 296 |
</summary>
|
| 297 |
|
| 298 |
### Encoder: Masked Language Modeling
|
|
|
|
| 259 |
|
| 260 |
|
| 261 |
## Usage Examples
|
|
|
|
|
|
|
| 262 |
<details>
|
| 263 |
+
<summary>Click to expand <strong>quantization</strong> usage examples</summary>
|
| 264 |
+
|
| 265 |
+
```
|
| 266 |
import torch
|
| 267 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
| 268 |
|
|
|
|
| 293 |
|
| 294 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 295 |
print(f"Generated text: {generated_text}")
|
| 296 |
+
```
|
| 297 |
+
|
| 298 |
</summary>
|
| 299 |
|
| 300 |
### Encoder: Masked Language Modeling
|