Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- LICENSE.DeepSeek +21 -0
- config.json +70 -0
- configuration_deepseek.py +210 -0
- model-00001-of-000163.safetensors +3 -0
- model-00002-of-000163.safetensors +3 -0
- model-00005-of-000163.safetensors +3 -0
- model-00006-of-000163.safetensors +3 -0
- model-00019-of-000163.safetensors +3 -0
- model-00021-of-000163.safetensors +3 -0
- model-00026-of-000163.safetensors +3 -0
- model-00027-of-000163.safetensors +3 -0
- model-00028-of-000163.safetensors +3 -0
- model-00031-of-000163.safetensors +3 -0
- model-00033-of-000163.safetensors +3 -0
- model-00034-of-000163.safetensors +3 -0
- model-00035-of-000163.safetensors +3 -0
- model-00041-of-000163.safetensors +3 -0
- model-00044-of-000163.safetensors +3 -0
- model-00051-of-000163.safetensors +3 -0
- model-00052-of-000163.safetensors +3 -0
- model-00053-of-000163.safetensors +3 -0
- model-00055-of-000163.safetensors +3 -0
- model-00057-of-000163.safetensors +3 -0
- model-00060-of-000163.safetensors +3 -0
- model-00068-of-000163.safetensors +3 -0
- model-00072-of-000163.safetensors +3 -0
- model-00075-of-000163.safetensors +3 -0
- model-00078-of-000163.safetensors +3 -0
- model-00091-of-000163.safetensors +3 -0
- model-00094-of-000163.safetensors +3 -0
- model-00095-of-000163.safetensors +3 -0
- model-00096-of-000163.safetensors +3 -0
- model-00100-of-000163.safetensors +3 -0
- model-00111-of-000163.safetensors +3 -0
- model-00112-of-000163.safetensors +3 -0
- model-00121-of-000163.safetensors +3 -0
- model-00137-of-000163.safetensors +3 -0
- model-00144-of-000163.safetensors +3 -0
- model-00149-of-000163.safetensors +3 -0
- model-00154-of-000163.safetensors +3 -0
- model-00155-of-000163.safetensors +3 -0
- model-00157-of-000163.safetensors +3 -0
- model-00160-of-000163.safetensors +3 -0
- model-00161-of-000163.safetensors +3 -0
- model-00162-of-000163.safetensors +3 -0
- model-00163-of-000163.safetensors +3 -0
- model.safetensors.index.json +0 -0
- tokenizer.json +0 -0
- tokenizer_config.json +35 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
paper/assembly_of_experts.pdf filter=lfs diff=lfs merge=lfs -text
|
LICENSE.DeepSeek
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
config.json
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"DeepseekV3ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_deepseek.DeepseekV3Config",
|
| 9 |
+
"AutoModel": "modeling_deepseek.DeepseekV3Model",
|
| 10 |
+
"AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
|
| 11 |
+
},
|
| 12 |
+
"aux_loss_alpha": 0.001,
|
| 13 |
+
"bos_token_id": 0,
|
| 14 |
+
"eos_token_id": 1,
|
| 15 |
+
"ep_size": 1,
|
| 16 |
+
"first_k_dense_replace": 3,
|
| 17 |
+
"hidden_act": "silu",
|
| 18 |
+
"hidden_size": 7168,
|
| 19 |
+
"initializer_range": 0.02,
|
| 20 |
+
"intermediate_size": 18432,
|
| 21 |
+
"kv_lora_rank": 512,
|
| 22 |
+
"max_position_embeddings": 163840,
|
| 23 |
+
"model_type": "deepseek_v3",
|
| 24 |
+
"moe_intermediate_size": 2048,
|
| 25 |
+
"moe_layer_freq": 1,
|
| 26 |
+
"n_group": 8,
|
| 27 |
+
"n_routed_experts": 256,
|
| 28 |
+
"n_shared_experts": 1,
|
| 29 |
+
"norm_topk_prob": true,
|
| 30 |
+
"num_attention_heads": 128,
|
| 31 |
+
"num_experts_per_tok": 8,
|
| 32 |
+
"num_hidden_layers": 61,
|
| 33 |
+
"num_key_value_heads": 128,
|
| 34 |
+
"num_nextn_predict_layers": 1,
|
| 35 |
+
"pretraining_tp": 1,
|
| 36 |
+
"q_lora_rank": 1536,
|
| 37 |
+
"qk_nope_head_dim": 128,
|
| 38 |
+
"qk_rope_head_dim": 64,
|
| 39 |
+
"quantization_config": {
|
| 40 |
+
"activation_scheme": "dynamic",
|
| 41 |
+
"fmt": "e4m3",
|
| 42 |
+
"quant_method": "fp8",
|
| 43 |
+
"weight_block_size": [
|
| 44 |
+
128,
|
| 45 |
+
128
|
| 46 |
+
]
|
| 47 |
+
},
|
| 48 |
+
"rms_norm_eps": 1e-06,
|
| 49 |
+
"rope_scaling": {
|
| 50 |
+
"beta_fast": 32,
|
| 51 |
+
"beta_slow": 1,
|
| 52 |
+
"factor": 40,
|
| 53 |
+
"mscale": 1.0,
|
| 54 |
+
"mscale_all_dim": 1.0,
|
| 55 |
+
"original_max_position_embeddings": 4096,
|
| 56 |
+
"type": "yarn"
|
| 57 |
+
},
|
| 58 |
+
"rope_theta": 10000,
|
| 59 |
+
"routed_scaling_factor": 2.5,
|
| 60 |
+
"scoring_func": "sigmoid",
|
| 61 |
+
"seq_aux": true,
|
| 62 |
+
"tie_word_embeddings": false,
|
| 63 |
+
"topk_group": 4,
|
| 64 |
+
"topk_method": "noaux_tc",
|
| 65 |
+
"torch_dtype": "bfloat16",
|
| 66 |
+
"transformers_version": "4.46.3",
|
| 67 |
+
"use_cache": true,
|
| 68 |
+
"v_head_dim": 128,
|
| 69 |
+
"vocab_size": 129280
|
| 70 |
+
}
|
configuration_deepseek.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 2 |
+
from transformers.utils import logging
|
| 3 |
+
|
| 4 |
+
logger = logging.get_logger(__name__)
|
| 5 |
+
|
| 6 |
+
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
| 7 |
+
class DeepseekV3Config(PretrainedConfig):
|
| 8 |
+
r"""
|
| 9 |
+
This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
|
| 10 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 11 |
+
defaults will yield a similar configuration to that of the DeepSeek-V3.
|
| 12 |
+
|
| 13 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 14 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
vocab_size (`int`, *optional*, defaults to 129280):
|
| 19 |
+
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
|
| 20 |
+
`inputs_ids` passed when calling [`DeepseekV3Model`]
|
| 21 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 22 |
+
Dimension of the hidden representations.
|
| 23 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
| 24 |
+
Dimension of the MLP representations.
|
| 25 |
+
moe_intermediate_size (`int`, *optional*, defaults to 1407):
|
| 26 |
+
Dimension of the MoE representations.
|
| 27 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 28 |
+
Number of hidden layers in the Transformer decoder.
|
| 29 |
+
num_nextn_predict_layers (`int`, *optional*, defaults to 1):
|
| 30 |
+
Number of nextn predict layers in the DeepSeekV3 Model.
|
| 31 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 32 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
| 33 |
+
n_shared_experts (`int`, *optional*, defaults to None):
|
| 34 |
+
Number of shared experts, None means dense model.
|
| 35 |
+
n_routed_experts (`int`, *optional*, defaults to None):
|
| 36 |
+
Number of routed experts, None means dense model.
|
| 37 |
+
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
|
| 38 |
+
Scaling factor or routed experts.
|
| 39 |
+
topk_method (`str`, *optional*, defaults to `gready`):
|
| 40 |
+
Topk method used in routed gate.
|
| 41 |
+
n_group (`int`, *optional*, defaults to None):
|
| 42 |
+
Number of groups for routed experts.
|
| 43 |
+
topk_group (`int`, *optional*, defaults to None):
|
| 44 |
+
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
|
| 45 |
+
num_experts_per_tok (`int`, *optional*, defaults to None):
|
| 46 |
+
Number of selected experts, None means dense model.
|
| 47 |
+
moe_layer_freq (`int`, *optional*, defaults to 1):
|
| 48 |
+
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
|
| 49 |
+
first_k_dense_replace (`int`, *optional*, defaults to 0):
|
| 50 |
+
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
|
| 51 |
+
\--k dense layers--/
|
| 52 |
+
norm_topk_prob (`bool`, *optional*, defaults to False):
|
| 53 |
+
Whether to normalize the weights of the routed experts.
|
| 54 |
+
scoring_func (`str`, *optional*, defaults to 'softmax'):
|
| 55 |
+
Method of computing expert weights.
|
| 56 |
+
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
|
| 57 |
+
Auxiliary loss weight coefficient.
|
| 58 |
+
seq_aux = (`bool`, *optional*, defaults to True):
|
| 59 |
+
Whether to compute the auxiliary loss for each individual sample.
|
| 60 |
+
num_key_value_heads (`int`, *optional*):
|
| 61 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 62 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 63 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 64 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 65 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 66 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
| 67 |
+
`num_attention_heads`.
|
| 68 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 69 |
+
The non-linear activation function (function or string) in the decoder.
|
| 70 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
| 71 |
+
The maximum sequence length that this model might ever be used with.
|
| 72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 74 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 75 |
+
The epsilon used by the rms normalization layers.
|
| 76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 78 |
+
relevant if `config.is_decoder=True`.
|
| 79 |
+
pad_token_id (`int`, *optional*):
|
| 80 |
+
Padding token id.
|
| 81 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 82 |
+
Beginning of stream token id.
|
| 83 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 84 |
+
End of stream token id.
|
| 85 |
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
| 86 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
| 87 |
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
| 88 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
| 89 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
| 90 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 91 |
+
Whether to tie weight embeddings
|
| 92 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 93 |
+
The base period of the RoPE embeddings.
|
| 94 |
+
rope_scaling (`Dict`, *optional*):
|
| 95 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
| 96 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
| 97 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
| 98 |
+
`max_position_embeddings` to the expected new maximum.
|
| 99 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
| 100 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
| 101 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 102 |
+
The dropout ratio for the attention probabilities.
|
| 103 |
+
|
| 104 |
+
```python
|
| 105 |
+
>>> from transformers import DeepseekV3Model, DeepseekV3Config
|
| 106 |
+
|
| 107 |
+
>>> # Initializing a Deepseek-V3 style configuration
|
| 108 |
+
>>> configuration = DeepseekV3Config()
|
| 109 |
+
|
| 110 |
+
>>> # Accessing the model configuration
|
| 111 |
+
>>> configuration = model.config
|
| 112 |
+
```"""
|
| 113 |
+
|
| 114 |
+
model_type = "deepseek_v3"
|
| 115 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 116 |
+
|
| 117 |
+
def __init__(
|
| 118 |
+
self,
|
| 119 |
+
vocab_size=129280,
|
| 120 |
+
hidden_size=7168,
|
| 121 |
+
intermediate_size=18432,
|
| 122 |
+
moe_intermediate_size = 2048,
|
| 123 |
+
num_hidden_layers=61,
|
| 124 |
+
num_nextn_predict_layers=1,
|
| 125 |
+
num_attention_heads=128,
|
| 126 |
+
num_key_value_heads=128,
|
| 127 |
+
n_shared_experts = 1,
|
| 128 |
+
n_routed_experts = 256,
|
| 129 |
+
ep_size = 1,
|
| 130 |
+
routed_scaling_factor = 2.5,
|
| 131 |
+
kv_lora_rank = 512,
|
| 132 |
+
q_lora_rank = 1536,
|
| 133 |
+
qk_rope_head_dim = 64,
|
| 134 |
+
v_head_dim = 128,
|
| 135 |
+
qk_nope_head_dim = 128,
|
| 136 |
+
topk_method = 'noaux_tc',
|
| 137 |
+
n_group = 8,
|
| 138 |
+
topk_group = 4,
|
| 139 |
+
num_experts_per_tok = 8,
|
| 140 |
+
moe_layer_freq = 1,
|
| 141 |
+
first_k_dense_replace = 3,
|
| 142 |
+
norm_topk_prob = True,
|
| 143 |
+
scoring_func = 'sigmoid',
|
| 144 |
+
aux_loss_alpha = 0.001,
|
| 145 |
+
seq_aux = True,
|
| 146 |
+
hidden_act="silu",
|
| 147 |
+
max_position_embeddings=4096,
|
| 148 |
+
initializer_range=0.02,
|
| 149 |
+
rms_norm_eps=1e-6,
|
| 150 |
+
use_cache=True,
|
| 151 |
+
pad_token_id=None,
|
| 152 |
+
bos_token_id=0,
|
| 153 |
+
eos_token_id=1,
|
| 154 |
+
pretraining_tp=1,
|
| 155 |
+
tie_word_embeddings=False,
|
| 156 |
+
rope_theta=10000.0,
|
| 157 |
+
rope_scaling=None,
|
| 158 |
+
attention_bias=False,
|
| 159 |
+
attention_dropout=0.0,
|
| 160 |
+
**kwargs,
|
| 161 |
+
):
|
| 162 |
+
self.vocab_size = vocab_size
|
| 163 |
+
self.max_position_embeddings = max_position_embeddings
|
| 164 |
+
self.hidden_size = hidden_size
|
| 165 |
+
self.intermediate_size = intermediate_size
|
| 166 |
+
self.moe_intermediate_size = moe_intermediate_size
|
| 167 |
+
self.num_hidden_layers = num_hidden_layers
|
| 168 |
+
self.num_nextn_predict_layers = num_nextn_predict_layers
|
| 169 |
+
self.num_attention_heads = num_attention_heads
|
| 170 |
+
self.n_shared_experts = n_shared_experts
|
| 171 |
+
self.n_routed_experts = n_routed_experts
|
| 172 |
+
self.ep_size = ep_size
|
| 173 |
+
self.routed_scaling_factor = routed_scaling_factor
|
| 174 |
+
self.kv_lora_rank = kv_lora_rank
|
| 175 |
+
self.q_lora_rank = q_lora_rank
|
| 176 |
+
self.qk_rope_head_dim = qk_rope_head_dim
|
| 177 |
+
self.v_head_dim = v_head_dim
|
| 178 |
+
self.qk_nope_head_dim = qk_nope_head_dim
|
| 179 |
+
self.topk_method = topk_method
|
| 180 |
+
self.n_group = n_group
|
| 181 |
+
self.topk_group = topk_group
|
| 182 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 183 |
+
self.moe_layer_freq = moe_layer_freq
|
| 184 |
+
self.first_k_dense_replace = first_k_dense_replace
|
| 185 |
+
self.norm_topk_prob = norm_topk_prob
|
| 186 |
+
self.scoring_func = scoring_func
|
| 187 |
+
self.aux_loss_alpha = aux_loss_alpha
|
| 188 |
+
self.seq_aux = seq_aux
|
| 189 |
+
# for backward compatibility
|
| 190 |
+
if num_key_value_heads is None:
|
| 191 |
+
num_key_value_heads = num_attention_heads
|
| 192 |
+
|
| 193 |
+
self.num_key_value_heads = num_key_value_heads
|
| 194 |
+
self.hidden_act = hidden_act
|
| 195 |
+
self.initializer_range = initializer_range
|
| 196 |
+
self.rms_norm_eps = rms_norm_eps
|
| 197 |
+
self.pretraining_tp = pretraining_tp
|
| 198 |
+
self.use_cache = use_cache
|
| 199 |
+
self.rope_theta = rope_theta
|
| 200 |
+
self.rope_scaling = rope_scaling
|
| 201 |
+
self.attention_bias = attention_bias
|
| 202 |
+
self.attention_dropout = attention_dropout
|
| 203 |
+
|
| 204 |
+
super().__init__(
|
| 205 |
+
pad_token_id=pad_token_id,
|
| 206 |
+
bos_token_id=bos_token_id,
|
| 207 |
+
eos_token_id=eos_token_id,
|
| 208 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 209 |
+
**kwargs,
|
| 210 |
+
)
|
model-00001-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:673331cdfd7495679af5350feb865812f17a8cc569c077871a9dcd30342d5319
|
| 3 |
+
size 5234138288
|
model-00002-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ece9d76e6478958a6f8037ef31215a7bd68181212206e1a70c19f46b12cdcf35
|
| 3 |
+
size 4302381728
|
model-00005-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f14cea6b84f478c9a74f882e3a81cd9631434d52eb5a28846a9022955598f37
|
| 3 |
+
size 4302381912
|
model-00006-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:158cb3e3c31f8c26de0034365f80b8d0c0af7a925e33ba456163d02974ccc560
|
| 3 |
+
size 4372071352
|
model-00019-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1caa71f022ab304d867c4d557e474250072b9cf236352ab81faca5ecec74303a
|
| 3 |
+
size 4302381880
|
model-00021-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b6cd69f262bc921808bc38acfeaa064b1ea09a0a6a61b317695ec3677993c36
|
| 3 |
+
size 4302348176
|
model-00026-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5dbeaf9629bdd34c512afc280bde3f6ff121be3f681348bec823c4f12fac129d
|
| 3 |
+
size 4302348392
|
model-00027-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba66d5d16a08565c8c5935caa080fd740c98eb839d34b0323b610cfa002bf76b
|
| 3 |
+
size 4302382448
|
model-00028-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fdbae4fab1874cbbde83d5a99ea2c3178d68442d5747d56ab6b122b51df92dd
|
| 3 |
+
size 4302382720
|
model-00031-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c37003878178e291abc2c4fc6688ea3cdd9baee2b34c73225c3c8fdbb3892479
|
| 3 |
+
size 4302348600
|
model-00033-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b77ef50f467201af7c7fce8b60f1cfecff27f649811c0b005de617f9502d958
|
| 3 |
+
size 4302382720
|
model-00034-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c9395b7e7f8e4c448f85d04c8e615bc3eb9e8701347835102aa43563ec4b470
|
| 3 |
+
size 1747416576
|
model-00035-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27a57644595016dbef35b5e819403ca26c0f0b3af25088475b3214d70d5a8dca
|
| 3 |
+
size 4302315568
|
model-00041-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:72cbb3929c3e01370b046066852c2f97649f696011e60e7d9c8ae5c4a09f7ed7
|
| 3 |
+
size 4302382472
|
model-00044-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fbfbb238a18e36c2fc30965ec3cf2590f4bc99950f082bceca72cc6f95a1a56
|
| 3 |
+
size 4302382656
|
model-00051-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:892d1f4ea037b6130050d9585164aff5af820154207979bcffd4c53e3bf95c6e
|
| 3 |
+
size 4302348200
|
model-00052-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d72b7c1c3bb2e575e9c250e965df44882f51e899d4183fa8dcdf8d526586cd6
|
| 3 |
+
size 4302382640
|
model-00053-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27e66b7bcd4e1757bd85ddcec5aaaaa36495c29176e4d95abdb204af3623cbf4
|
| 3 |
+
size 4302348600
|
model-00055-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec327dd2dfdccf1205dbbcda38fc8fd9b1fc41a087dab48d1447442c906a9c2a
|
| 3 |
+
size 4302382720
|
model-00057-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a26ed97da76684b32cf3123fc4bd14c9e8face823de4eb5c1b77ccbcc43abb1
|
| 3 |
+
size 4302315568
|
model-00060-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e86f6f094276ab36b1918fa6a8f9c9a2650a81c7a857b184a219d70f25e83ba6
|
| 3 |
+
size 4302382280
|
model-00068-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d58858c7f55e1e40c80694c1ac98c1a3252dcaa0568c195f2267d037d9c5d565
|
| 3 |
+
size 4302382264
|
model-00072-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c65099a2cc94f28a02315e24de3f1fe60a1e6873b3e61245945d173301aedc
|
| 3 |
+
size 4302382720
|
model-00075-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ff05c4a6661e23371c266d7a55e6cb52fe65a9798f2c73347ec692362b9b25c
|
| 3 |
+
size 4302348600
|
model-00078-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e61bdf10cb94e4d3536bcd5cd05c65d5a859272821a275d14a0fe44664dd447d
|
| 3 |
+
size 1747416576
|
model-00091-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a3610698561e5ba5c18c3f38f419682da78332fb1097a2698fedc794145744a
|
| 3 |
+
size 4302382720
|
model-00094-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c2e7d8def440f983640de0acc2b8aae72d9e4e62bc4b66fd331ca38fb9dc616
|
| 3 |
+
size 4302382720
|
model-00095-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67ce646206a74f4a414cbb85b1dbab767ea8227e06a7975bc07c95fc049116ec
|
| 3 |
+
size 4302348200
|
model-00096-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d9b7873a4c8a0f563d76c324fd772b7f13d826c2371dc795dcb38d32e87c412
|
| 3 |
+
size 4302382640
|
model-00100-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23ddc8ab5f36f2268aa5f33617412503eb91f456b1d21b3db38d9034347e45ed
|
| 3 |
+
size 1747416576
|
model-00111-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fe1449c825a69141993c6bfeb3e67513d07000d90b185084cd60bcaca3a7422
|
| 3 |
+
size 4302348584
|
model-00112-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93d7d28e61e0dbf6e3ec327c8f822dd3999fc1860cd7240ec819868704b65c6f
|
| 3 |
+
size 4302382264
|
model-00121-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:635fa662d482aaba53b03c46e9898429b5d1cd6e2c44a9698a17c0541c698d9a
|
| 3 |
+
size 4302382720
|
model-00137-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff9a64ca0213d2dc0892aab32e9f0520ffbf85e78aac52e8d975bb76b8be9906
|
| 3 |
+
size 4302382448
|
model-00144-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69b8cac80b73a96cfc2fc94934bd335517cc55102a011fc6254a0df4ccb8c3cc
|
| 3 |
+
size 4302348568
|
model-00149-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21b955b8459839930d291fa2c8ef654724daa18c49067e6a534e011b1e8342ae
|
| 3 |
+
size 4302382720
|
model-00154-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc9fb1ed56c775125b4973ce06b1cea635f91f98ad843b481d8039b535cd1387
|
| 3 |
+
size 4302382720
|
model-00155-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5984eb2379d136e4f7a58303fc2e3fe48298ec40927d317331e3da1066b78fb2
|
| 3 |
+
size 4302348392
|
model-00157-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2be3a8ebf43171f63918183ee452a9ea509d26563f6cb15348a0842b5a370c64
|
| 3 |
+
size 4302382720
|
model-00160-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:340303aef798ea6fc624fc24a9eae973b29ca434787caa5794695bb021ace43d
|
| 3 |
+
size 5230635800
|
model-00161-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df441be5e33dfbb06e5e0c5092f22655179de604ed1972c5b0420df2a1b06092
|
| 3 |
+
size 4302382080
|
model-00162-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f44a3097f38bd66ff48e1cfd3877b4ca2d40fd63fbd3a01e01e81e6359a78ddb
|
| 3 |
+
size 4302382704
|
model-00163-of-000163.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba097aded41190d07c8d83e80631568dc9e2c9a4017571682a43f706268862e6
|
| 3 |
+
size 6584783200
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"bos_token": {
|
| 5 |
+
"__type": "AddedToken",
|
| 6 |
+
"content": "<|begin▁of▁sentence|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": true,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false
|
| 11 |
+
},
|
| 12 |
+
"clean_up_tokenization_spaces": false,
|
| 13 |
+
"eos_token": {
|
| 14 |
+
"__type": "AddedToken",
|
| 15 |
+
"content": "<|end▁of▁sentence|>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": true,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false
|
| 20 |
+
},
|
| 21 |
+
"legacy": true,
|
| 22 |
+
"model_max_length": 16384,
|
| 23 |
+
"pad_token": {
|
| 24 |
+
"__type": "AddedToken",
|
| 25 |
+
"content": "<|end▁of▁sentence|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": true,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
},
|
| 31 |
+
"sp_model_kwargs": {},
|
| 32 |
+
"unk_token": null,
|
| 33 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
| 34 |
+
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\\n\\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' in message %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if message['content'] is none %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- else %}{{'<|Assistant|>' + message['content'] + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' not in message %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}"
|
| 35 |
+
}
|