Upload folder using huggingface_hub
Browse files- Mistral-7B-Instruct-v0.1-Q2_K.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q3_K_L.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q3_K_M.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q3_K_S.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q4_0.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q4_K_M.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q4_K_S.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q5_0.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q5_K_M.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q5_K_S.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q6_K.gguf +2 -2
- Mistral-7B-Instruct-v0.1-Q8_0.gguf +2 -2
- README.md +9 -15
Mistral-7B-Instruct-v0.1-Q2_K.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:287801ec5b0fdddff68bacfa5dc55a5c90125cdc4f96a0dd904235e400b9af18
|
| 3 |
+
size 2719242912
|
Mistral-7B-Instruct-v0.1-Q3_K_L.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b2c9f7a3c6a92369f738022fa26650e7cd4be896463679f151ae1870bd6fe28
|
| 3 |
+
size 3822025376
|
Mistral-7B-Instruct-v0.1-Q3_K_M.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad0c0fd805d9d0299f6f64234aa758d92544e1ee6499ffb3b0eb8c26841c008d
|
| 3 |
+
size 3518986912
|
Mistral-7B-Instruct-v0.1-Q3_K_S.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a80a456dbe8267f610977fcd0acfb1f0f1580f3e7df8a0aaafaf8b1a9605157
|
| 3 |
+
size 3164568224
|
Mistral-7B-Instruct-v0.1-Q4_0.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cafa2854c8024bfe35737dc72067b59183db6363134f0806282dd271b020c589
|
| 3 |
+
size 4108917408
|
Mistral-7B-Instruct-v0.1-Q4_K_M.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12ab12f8f17913214cd5424dacae6735ec4bdfd28da23656d17062be9002a4d3
|
| 3 |
+
size 4368439968
|
Mistral-7B-Instruct-v0.1-Q4_K_S.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:62272c420445e7e653909347313d9a40da2cb3e27b6ce989cd49a8488b66355b
|
| 3 |
+
size 4140374688
|
Mistral-7B-Instruct-v0.1-Q5_0.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79ae82beb830a5bd91524862238dead176081404d17da1e5b1bb65840e372fc8
|
| 3 |
+
size 4997716640
|
Mistral-7B-Instruct-v0.1-Q5_K_M.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a947b0d47e7b85486f68b2370aab7d00b280f464e02f0b140e42490015a9eb7
|
| 3 |
+
size 5131410080
|
Mistral-7B-Instruct-v0.1-Q5_K_S.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4189a0f4bbaf02be60e4edeca2aea56eb30423d0a5717037b770c10400cc792c
|
| 3 |
+
size 4997716640
|
Mistral-7B-Instruct-v0.1-Q6_K.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d042db2524c2409cda1f3f02eba4cc8a546776f9f7cd266df40199653b7c8a26
|
| 3 |
+
size 5942065824
|
Mistral-7B-Instruct-v0.1-Q8_0.gguf
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f319bab8e7858b959c898ef9838d4c44979d00972896faca16766c97c1ac1c6
|
| 3 |
+
size 7695858336
|
README.md
CHANGED
|
@@ -1,18 +1,14 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
|
|
|
| 3 |
tags:
|
| 4 |
- finetuned
|
| 5 |
- TensorBlock
|
| 6 |
- GGUF
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
- messages:
|
| 12 |
-
- role: user
|
| 13 |
-
content: What is your favorite condiment?
|
| 14 |
-
extra_gated_description: If you want to learn more about how we process your personal
|
| 15 |
-
data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
| 16 |
---
|
| 17 |
|
| 18 |
<div style="width: auto; margin-left: auto; margin-right: auto">
|
|
@@ -26,11 +22,11 @@ extra_gated_description: If you want to learn more about how we process your per
|
|
| 26 |
</div>
|
| 27 |
</div>
|
| 28 |
|
| 29 |
-
##
|
| 30 |
|
| 31 |
-
This repo contains GGUF format model files for [
|
| 32 |
|
| 33 |
-
The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit
|
| 34 |
|
| 35 |
<div style="text-align: left; margin: 20px 0;">
|
| 36 |
<a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
|
|
@@ -41,9 +37,7 @@ The files were quantized using machines provided by [TensorBlock](https://tensor
|
|
| 41 |
## Prompt template
|
| 42 |
|
| 43 |
```
|
| 44 |
-
<s>
|
| 45 |
-
|
| 46 |
-
{prompt} [/INST]
|
| 47 |
```
|
| 48 |
|
| 49 |
## Model file specification
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
+
pipeline_tag: text-generation
|
| 4 |
tags:
|
| 5 |
- finetuned
|
| 6 |
- TensorBlock
|
| 7 |
- GGUF
|
| 8 |
+
inference:
|
| 9 |
+
parameters:
|
| 10 |
+
temperature: 0.7
|
| 11 |
+
base_model: sanchit-gandhi/Mistral-7B-Instruct-v0.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
<div style="width: auto; margin-left: auto; margin-right: auto">
|
|
|
|
| 22 |
</div>
|
| 23 |
</div>
|
| 24 |
|
| 25 |
+
## sanchit-gandhi/Mistral-7B-Instruct-v0.1 - GGUF
|
| 26 |
|
| 27 |
+
This repo contains GGUF format model files for [sanchit-gandhi/Mistral-7B-Instruct-v0.1](https://huggingface.co/sanchit-gandhi/Mistral-7B-Instruct-v0.1).
|
| 28 |
|
| 29 |
+
The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
|
| 30 |
|
| 31 |
<div style="text-align: left; margin: 20px 0;">
|
| 32 |
<a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
|
|
|
|
| 37 |
## Prompt template
|
| 38 |
|
| 39 |
```
|
| 40 |
+
<s>[INST] {prompt} [/INST]
|
|
|
|
|
|
|
| 41 |
```
|
| 42 |
|
| 43 |
## Model file specification
|