zR
		
	commited on
		
		
					Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- .gitattributes +1 -0
- chat_template.jinja +103 -0
- config.json +417 -0
- generation_config.json +10 -0
- model-00001-of-00047.safetensors +3 -0
- model-00002-of-00047.safetensors +3 -0
- model-00003-of-00047.safetensors +3 -0
- model-00004-of-00047.safetensors +3 -0
- model-00005-of-00047.safetensors +3 -0
- model-00006-of-00047.safetensors +3 -0
- model-00007-of-00047.safetensors +3 -0
- model-00008-of-00047.safetensors +3 -0
- model-00009-of-00047.safetensors +3 -0
- model-00010-of-00047.safetensors +3 -0
- model-00011-of-00047.safetensors +3 -0
- model-00012-of-00047.safetensors +3 -0
- model-00013-of-00047.safetensors +3 -0
- model-00014-of-00047.safetensors +3 -0
- model-00015-of-00047.safetensors +3 -0
- model-00016-of-00047.safetensors +3 -0
- model-00017-of-00047.safetensors +3 -0
- model-00018-of-00047.safetensors +3 -0
- model-00019-of-00047.safetensors +3 -0
- model-00020-of-00047.safetensors +3 -0
- model-00021-of-00047.safetensors +3 -0
- model-00022-of-00047.safetensors +3 -0
- model-00023-of-00047.safetensors +3 -0
- model-00024-of-00047.safetensors +3 -0
- model-00025-of-00047.safetensors +3 -0
- model-00026-of-00047.safetensors +3 -0
- model-00027-of-00047.safetensors +3 -0
- model-00028-of-00047.safetensors +3 -0
- model-00029-of-00047.safetensors +3 -0
- model-00030-of-00047.safetensors +3 -0
- model-00031-of-00047.safetensors +3 -0
- model-00032-of-00047.safetensors +3 -0
- model-00033-of-00047.safetensors +3 -0
- model-00034-of-00047.safetensors +3 -0
- model-00035-of-00047.safetensors +3 -0
- model-00036-of-00047.safetensors +3 -0
- model-00037-of-00047.safetensors +3 -0
- model-00038-of-00047.safetensors +3 -0
- model-00039-of-00047.safetensors +3 -0
- model-00041-of-00047.safetensors +3 -0
- model-00042-of-00047.safetensors +3 -0
- model-00043-of-00047.safetensors +3 -0
- model-00045-of-00047.safetensors +3 -0
- model-00047-of-00047.safetensors +3 -0
- model.safetensors.index.json +0 -0
- tokenizer.json +3 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            tokenizer.json filter=lfs diff=lfs merge=lfs -text
         | 
    	
        chat_template.jinja
    ADDED
    
    | @@ -0,0 +1,103 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [gMASK]<sop>
         | 
| 2 | 
            +
            {%- if tools -%}
         | 
| 3 | 
            +
            <|system|>
         | 
| 4 | 
            +
            # Tools
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            You may call one or more functions to assist with the user query.
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            You are provided with function signatures within <tools></tools> XML tags:
         | 
| 9 | 
            +
            <tools>
         | 
| 10 | 
            +
            {% for tool in tools %}
         | 
| 11 | 
            +
            {{ tool | tojson(ensure_ascii=False) }}
         | 
| 12 | 
            +
            {% endfor %}
         | 
| 13 | 
            +
            </tools>
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            For each function call, output the function name and arguments within the following XML format:
         | 
| 16 | 
            +
            <tool_call>{function-name}
         | 
| 17 | 
            +
            <arg_key>{arg-key-1}</arg_key>
         | 
| 18 | 
            +
            <arg_value>{arg-value-1}</arg_value>
         | 
| 19 | 
            +
            <arg_key>{arg-key-2}</arg_key>
         | 
| 20 | 
            +
            <arg_value>{arg-value-2}</arg_value>
         | 
| 21 | 
            +
            ...
         | 
| 22 | 
            +
            </tool_call>{%- endif -%}
         | 
| 23 | 
            +
            {%- macro visible_text(content) -%}
         | 
| 24 | 
            +
                {%- if content is string -%}
         | 
| 25 | 
            +
                    {{- content }}
         | 
| 26 | 
            +
                {%- elif content is iterable and content is not mapping -%}
         | 
| 27 | 
            +
                    {%- for item in content -%}
         | 
| 28 | 
            +
                        {%- if item is mapping and item.type == 'text' -%}
         | 
| 29 | 
            +
                            {{- item.text }}
         | 
| 30 | 
            +
                        {%- elif item is string -%}
         | 
| 31 | 
            +
                            {{- item }}
         | 
| 32 | 
            +
                        {%- endif -%}
         | 
| 33 | 
            +
                    {%- endfor -%}
         | 
| 34 | 
            +
                {%- else -%}
         | 
| 35 | 
            +
                    {{- content }}
         | 
| 36 | 
            +
                {%- endif -%}
         | 
| 37 | 
            +
            {%- endmacro -%}
         | 
| 38 | 
            +
            {%- set ns = namespace(last_user_index=-1) %}
         | 
| 39 | 
            +
            {%- for m in messages %}
         | 
| 40 | 
            +
                {%- if m.role == 'user' %}
         | 
| 41 | 
            +
                    {% set ns.last_user_index = loop.index0 -%}
         | 
| 42 | 
            +
                {%- endif %}
         | 
| 43 | 
            +
            {%- endfor %}
         | 
| 44 | 
            +
            {% for m in messages %}
         | 
| 45 | 
            +
            {%- if m.role == 'user' -%}<|user|>
         | 
| 46 | 
            +
            {{ visible_text(m.content) }}
         | 
| 47 | 
            +
            {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
         | 
| 48 | 
            +
            {%- elif m.role == 'assistant' -%}
         | 
| 49 | 
            +
            <|assistant|>
         | 
| 50 | 
            +
            {%- set reasoning_content = '' %}
         | 
| 51 | 
            +
            {%- set content = visible_text(m.content) %}
         | 
| 52 | 
            +
            {%- if m.reasoning_content is string %}
         | 
| 53 | 
            +
                {%- set reasoning_content = m.reasoning_content %}
         | 
| 54 | 
            +
            {%- else %}
         | 
| 55 | 
            +
                {%- if '</think>' in content %}
         | 
| 56 | 
            +
                    {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
         | 
| 57 | 
            +
                    {%- set content = content.split('</think>')[-1].lstrip('\n') %}
         | 
| 58 | 
            +
                {%- endif %}
         | 
| 59 | 
            +
            {%- endif %}
         | 
| 60 | 
            +
            {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
         | 
| 61 | 
            +
            {{ '\n<think>' + reasoning_content.strip() +  '</think>'}}
         | 
| 62 | 
            +
            {%- else -%}
         | 
| 63 | 
            +
            {{ '\n<think></think>' }}
         | 
| 64 | 
            +
            {%- endif -%}
         | 
| 65 | 
            +
            {%- if content.strip() -%}
         | 
| 66 | 
            +
            {{ '\n' + content.strip() }}
         | 
| 67 | 
            +
            {%- endif -%}
         | 
| 68 | 
            +
            {% if m.tool_calls %}
         | 
| 69 | 
            +
            {% for tc in m.tool_calls %}
         | 
| 70 | 
            +
            {%- if tc.function %}
         | 
| 71 | 
            +
                {%- set tc = tc.function %}
         | 
| 72 | 
            +
            {%- endif %}
         | 
| 73 | 
            +
            {{ '\n<tool_call>' + tc.name }}
         | 
| 74 | 
            +
            {% set _args = tc.arguments %}
         | 
| 75 | 
            +
            {% for k, v in _args.items() %}
         | 
| 76 | 
            +
            <arg_key>{{ k }}</arg_key>
         | 
| 77 | 
            +
            <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
         | 
| 78 | 
            +
            {% endfor %}
         | 
| 79 | 
            +
            </tool_call>{% endfor %}
         | 
| 80 | 
            +
            {% endif %}
         | 
| 81 | 
            +
            {%- elif m.role == 'tool' -%}
         | 
| 82 | 
            +
            {%- if m.content is string -%}
         | 
| 83 | 
            +
            {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
         | 
| 84 | 
            +
                {{- '<|observation|>' }}
         | 
| 85 | 
            +
            {%- endif %}
         | 
| 86 | 
            +
            {{- '\n<tool_response>\n' }}
         | 
| 87 | 
            +
            {{- m.content }}
         | 
| 88 | 
            +
            {{- '\n</tool_response>' }}
         | 
| 89 | 
            +
            {%- else -%}
         | 
| 90 | 
            +
            <|observation|>{% for tr in m.content %}
         | 
| 91 | 
            +
             | 
| 92 | 
            +
            <tool_response>
         | 
| 93 | 
            +
            {{ tr.output if tr.output is defined else tr }}
         | 
| 94 | 
            +
            </tool_response>{% endfor -%}
         | 
| 95 | 
            +
            {% endif -%}
         | 
| 96 | 
            +
            {%- elif m.role == 'system' -%}
         | 
| 97 | 
            +
            <|system|>
         | 
| 98 | 
            +
            {{ visible_text(m.content) }}
         | 
| 99 | 
            +
            {%- endif -%}
         | 
| 100 | 
            +
            {%- endfor -%}
         | 
| 101 | 
            +
            {%- if add_generation_prompt -%}
         | 
| 102 | 
            +
                <|assistant|>{{- '\n<think></think>' if (enable_thinking is defined and not enable_thinking) else '' -}}
         | 
| 103 | 
            +
            {%- endif -%}
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,417 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "architectures": [
         | 
| 3 | 
            +
                "Glm4MoeForCausalLM"
         | 
| 4 | 
            +
              ],
         | 
| 5 | 
            +
              "attention_bias": true,
         | 
| 6 | 
            +
              "attention_dropout": 0.0,
         | 
| 7 | 
            +
              "pad_token_id": 151329,
         | 
| 8 | 
            +
              "eos_token_id": [
         | 
| 9 | 
            +
                151329,
         | 
| 10 | 
            +
                151336,
         | 
| 11 | 
            +
                151338
         | 
| 12 | 
            +
              ],
         | 
| 13 | 
            +
              "head_dim": 128,
         | 
| 14 | 
            +
              "hidden_act": "silu",
         | 
| 15 | 
            +
              "hidden_size": 4096,
         | 
| 16 | 
            +
              "partial_rotary_factor": 0.5,
         | 
| 17 | 
            +
              "initializer_range": 0.02,
         | 
| 18 | 
            +
              "intermediate_size": 10944,
         | 
| 19 | 
            +
              "max_position_embeddings": 131072,
         | 
| 20 | 
            +
              "model_type": "glm4_moe",
         | 
| 21 | 
            +
              "moe_intermediate_size": 1408,
         | 
| 22 | 
            +
              "norm_topk_prob": true,
         | 
| 23 | 
            +
              "num_attention_heads": 96,
         | 
| 24 | 
            +
              "n_group": 1,
         | 
| 25 | 
            +
              "topk_group": 1,
         | 
| 26 | 
            +
              "n_routed_experts": 128,
         | 
| 27 | 
            +
              "n_shared_experts": 1,
         | 
| 28 | 
            +
              "routed_scaling_factor": 1.0,
         | 
| 29 | 
            +
              "num_experts_per_tok": 8,
         | 
| 30 | 
            +
              "first_k_dense_replace": 1,
         | 
| 31 | 
            +
              "num_hidden_layers": 46,
         | 
| 32 | 
            +
              "num_key_value_heads": 8,
         | 
| 33 | 
            +
              "rms_norm_eps": 1e-05,
         | 
| 34 | 
            +
              "rope_scaling": null,
         | 
| 35 | 
            +
              "rope_theta": 1000000,
         | 
| 36 | 
            +
              "num_nextn_predict_layers": 1,
         | 
| 37 | 
            +
              "tie_word_embeddings": false,
         | 
| 38 | 
            +
              "torch_dtype": "bfloat16",
         | 
| 39 | 
            +
              "transformers_version": "4.54.0",
         | 
| 40 | 
            +
              "use_cache": true,
         | 
| 41 | 
            +
              "use_qk_norm": false,
         | 
| 42 | 
            +
              "vocab_size": 151552,
         | 
| 43 | 
            +
              "quantization_config": {
         | 
| 44 | 
            +
                "config_groups": {
         | 
| 45 | 
            +
                  "group_0": {
         | 
| 46 | 
            +
                    "input_activations": {
         | 
| 47 | 
            +
                      "actorder": null,
         | 
| 48 | 
            +
                      "block_structure": null,
         | 
| 49 | 
            +
                      "dynamic": true,
         | 
| 50 | 
            +
                      "group_size": null,
         | 
| 51 | 
            +
                      "num_bits": 8,
         | 
| 52 | 
            +
                      "observer": null,
         | 
| 53 | 
            +
                      "observer_kwargs": {},
         | 
| 54 | 
            +
                      "strategy": "token",
         | 
| 55 | 
            +
                      "symmetric": true,
         | 
| 56 | 
            +
                      "type": "float"
         | 
| 57 | 
            +
                    },
         | 
| 58 | 
            +
                    "output_activations": null,
         | 
| 59 | 
            +
                    "targets": [
         | 
| 60 | 
            +
                      "Linear"
         | 
| 61 | 
            +
                    ],
         | 
| 62 | 
            +
                    "weights": {
         | 
| 63 | 
            +
                      "actorder": null,
         | 
| 64 | 
            +
                      "block_structure": null,
         | 
| 65 | 
            +
                      "dynamic": false,
         | 
| 66 | 
            +
                      "group_size": null,
         | 
| 67 | 
            +
                      "num_bits": 8,
         | 
| 68 | 
            +
                      "observer": "minmax",
         | 
| 69 | 
            +
                      "observer_kwargs": {},
         | 
| 70 | 
            +
                      "strategy": "channel",
         | 
| 71 | 
            +
                      "symmetric": true,
         | 
| 72 | 
            +
                      "type": "float"
         | 
| 73 | 
            +
                    }
         | 
| 74 | 
            +
                  }
         | 
| 75 | 
            +
                },
         | 
| 76 | 
            +
                "format": "float-quantized",
         | 
| 77 | 
            +
                "ignore": [
         | 
| 78 | 
            +
                  "model.layers.9.mlp.gate",
         | 
| 79 | 
            +
                  "model.layers.22.self_attn.q_proj.bias",
         | 
| 80 | 
            +
                  "model.layers.21.post_attention_layernorm",
         | 
| 81 | 
            +
                  "model.layers.46.post_attention_layernorm",
         | 
| 82 | 
            +
                  "model.layers.33.mlp.gate.e_score_correction_bias",
         | 
| 83 | 
            +
                  "model.layers.19.input_layernorm",
         | 
| 84 | 
            +
                  "model.layers.23.input_layernorm",
         | 
| 85 | 
            +
                  "model.layers.26.input_layernorm",
         | 
| 86 | 
            +
                  "model.layers.16.input_layernorm",
         | 
| 87 | 
            +
                  "model.layers.2.input_layernorm",
         | 
| 88 | 
            +
                  "model.layers.43.mlp.gate.e_score_correction_bias",
         | 
| 89 | 
            +
                  "model.layers.17.self_attn.v_proj.bias",
         | 
| 90 | 
            +
                  "model.layers.4.self_attn.k_proj.bias",
         | 
| 91 | 
            +
                  "model.layers.24.mlp.gate",
         | 
| 92 | 
            +
                  "model.layers.42.self_attn.q_proj.bias",
         | 
| 93 | 
            +
                  "model.layers.19.self_attn.v_proj.bias",
         | 
| 94 | 
            +
                  "model.layers.36.self_attn.k_proj.bias",
         | 
| 95 | 
            +
                  "model.layers.43.self_attn.k_proj.bias",
         | 
| 96 | 
            +
                  "model.layers.26.self_attn.v_proj.bias",
         | 
| 97 | 
            +
                  "model.layers.33.mlp.gate",
         | 
| 98 | 
            +
                  "model.layers.31.input_layernorm",
         | 
| 99 | 
            +
                  "model.layers.8.input_layernorm",
         | 
| 100 | 
            +
                  "model.layers.9.mlp.gate.e_score_correction_bias",
         | 
| 101 | 
            +
                  "model.layers.19.mlp.gate.e_score_correction_bias",
         | 
| 102 | 
            +
                  "model.layers.14.self_attn.v_proj.bias",
         | 
| 103 | 
            +
                  "model.layers.19.mlp.gate",
         | 
| 104 | 
            +
                  "model.layers.12.mlp.gate",
         | 
| 105 | 
            +
                  "model.layers.37.mlp.gate.e_score_correction_bias",
         | 
| 106 | 
            +
                  "model.layers.39.mlp.gate",
         | 
| 107 | 
            +
                  "model.layers.20.self_attn.v_proj.bias",
         | 
| 108 | 
            +
                  "model.layers.1.self_attn.q_proj.bias",
         | 
| 109 | 
            +
                  "model.layers.7.self_attn.k_proj.bias",
         | 
| 110 | 
            +
                  "model.layers.19.self_attn.k_proj.bias",
         | 
| 111 | 
            +
                  "model.layers.30.mlp.gate.e_score_correction_bias",
         | 
| 112 | 
            +
                  "model.layers.37.input_layernorm",
         | 
| 113 | 
            +
                  "model.layers.40.self_attn.q_proj.bias",
         | 
| 114 | 
            +
                  "model.layers.30.input_layernorm",
         | 
| 115 | 
            +
                  "model.layers.10.post_attention_layernorm",
         | 
| 116 | 
            +
                  "model.layers.10.mlp.gate",
         | 
| 117 | 
            +
                  "model.layers.21.mlp.gate",
         | 
| 118 | 
            +
                  "model.layers.39.post_attention_layernorm",
         | 
| 119 | 
            +
                  "model.layers.20.self_attn.q_proj.bias",
         | 
| 120 | 
            +
                  "model.layers.20.input_layernorm",
         | 
| 121 | 
            +
                  "model.layers.16.self_attn.v_proj.bias",
         | 
| 122 | 
            +
                  "model.layers.5.self_attn.k_proj.bias",
         | 
| 123 | 
            +
                  "model.layers.18.input_layernorm",
         | 
| 124 | 
            +
                  "model.layers.35.input_layernorm",
         | 
| 125 | 
            +
                  "model.layers.43.mlp.gate",
         | 
| 126 | 
            +
                  "model.layers.5.self_attn.v_proj.bias",
         | 
| 127 | 
            +
                  "model.layers.28.mlp.gate",
         | 
| 128 | 
            +
                  "model.layers.4.self_attn.v_proj.bias",
         | 
| 129 | 
            +
                  "model.layers.46.mlp.gate.e_score_correction_bias",
         | 
| 130 | 
            +
                  "model.layers.25.mlp.gate.e_score_correction_bias",
         | 
| 131 | 
            +
                  "model.layers.46.enorm",
         | 
| 132 | 
            +
                  "model.layers.30.self_attn.k_proj.bias",
         | 
| 133 | 
            +
                  "model.layers.27.self_attn.v_proj.bias",
         | 
| 134 | 
            +
                  "model.layers.23.mlp.gate",
         | 
| 135 | 
            +
                  "model.layers.38.post_attention_layernorm",
         | 
| 136 | 
            +
                  "model.layers.0.post_attention_layernorm",
         | 
| 137 | 
            +
                  "model.layers.46.self_attn.k_proj.bias",
         | 
| 138 | 
            +
                  "model.layers.26.post_attention_layernorm",
         | 
| 139 | 
            +
                  "model.layers.26.self_attn.q_proj.bias",
         | 
| 140 | 
            +
                  "model.layers.46.embed_tokens",
         | 
| 141 | 
            +
                  "model.layers.24.input_layernorm",
         | 
| 142 | 
            +
                  "model.layers.41.post_attention_layernorm",
         | 
| 143 | 
            +
                  "model.layers.20.mlp.gate",
         | 
| 144 | 
            +
                  "model.layers.3.mlp.gate",
         | 
| 145 | 
            +
                  "model.layers.22.input_layernorm",
         | 
| 146 | 
            +
                  "model.layers.15.post_attention_layernorm",
         | 
| 147 | 
            +
                  "model.layers.39.input_layernorm",
         | 
| 148 | 
            +
                  "model.layers.42.mlp.gate.e_score_correction_bias",
         | 
| 149 | 
            +
                  "model.layers.34.mlp.gate",
         | 
| 150 | 
            +
                  "model.layers.13.mlp.gate",
         | 
| 151 | 
            +
                  "model.layers.38.input_layernorm",
         | 
| 152 | 
            +
                  "model.layers.15.self_attn.q_proj.bias",
         | 
| 153 | 
            +
                  "model.layers.7.post_attention_layernorm",
         | 
| 154 | 
            +
                  "model.layers.28.self_attn.v_proj.bias",
         | 
| 155 | 
            +
                  "model.layers.36.post_attention_layernorm",
         | 
| 156 | 
            +
                  "model.layers.34.self_attn.k_proj.bias",
         | 
| 157 | 
            +
                  "model.layers.23.post_attention_layernorm",
         | 
| 158 | 
            +
                  "model.layers.43.input_layernorm",
         | 
| 159 | 
            +
                  "model.layers.39.self_attn.k_proj.bias",
         | 
| 160 | 
            +
                  "model.layers.18.post_attention_layernorm",
         | 
| 161 | 
            +
                  "model.layers.29.mlp.gate.e_score_correction_bias",
         | 
| 162 | 
            +
                  "model.layers.34.mlp.gate.e_score_correction_bias",
         | 
| 163 | 
            +
                  "model.layers.28.self_attn.q_proj.bias",
         | 
| 164 | 
            +
                  "model.layers.46.eh_proj",
         | 
| 165 | 
            +
                  "model.layers.14.mlp.gate.e_score_correction_bias",
         | 
| 166 | 
            +
                  "model.layers.7.mlp.gate.e_score_correction_bias",
         | 
| 167 | 
            +
                  "model.layers.35.self_attn.k_proj.bias",
         | 
| 168 | 
            +
                  "model.layers.13.post_attention_layernorm",
         | 
| 169 | 
            +
                  "model.layers.22.self_attn.k_proj.bias",
         | 
| 170 | 
            +
                  "model.layers.4.self_attn.q_proj.bias",
         | 
| 171 | 
            +
                  "model.layers.41.self_attn.k_proj.bias",
         | 
| 172 | 
            +
                  "model.layers.12.post_attention_layernorm",
         | 
| 173 | 
            +
                  "model.layers.37.self_attn.q_proj.bias",
         | 
| 174 | 
            +
                  "model.layers.46.input_layernorm",
         | 
| 175 | 
            +
                  "model.layers.24.self_attn.k_proj.bias",
         | 
| 176 | 
            +
                  "model.layers.5.mlp.gate",
         | 
| 177 | 
            +
                  "model.layers.9.self_attn.k_proj.bias",
         | 
| 178 | 
            +
                  "model.layers.10.self_attn.v_proj.bias",
         | 
| 179 | 
            +
                  "model.layers.42.self_attn.v_proj.bias",
         | 
| 180 | 
            +
                  "model.embed_tokens",
         | 
| 181 | 
            +
                  "model.layers.2.self_attn.q_proj.bias",
         | 
| 182 | 
            +
                  "model.layers.28.mlp.gate.e_score_correction_bias",
         | 
| 183 | 
            +
                  "model.layers.24.self_attn.v_proj.bias",
         | 
| 184 | 
            +
                  "model.layers.15.input_layernorm",
         | 
| 185 | 
            +
                  "model.layers.9.input_layernorm",
         | 
| 186 | 
            +
                  "model.layers.33.input_layernorm",
         | 
| 187 | 
            +
                  "model.layers.45.self_attn.v_proj.bias",
         | 
| 188 | 
            +
                  "model.layers.31.self_attn.q_proj.bias",
         | 
| 189 | 
            +
                  "model.layers.34.input_layernorm",
         | 
| 190 | 
            +
                  "model.layers.14.input_layernorm",
         | 
| 191 | 
            +
                  "model.layers.17.post_attention_layernorm",
         | 
| 192 | 
            +
                  "model.layers.0.self_attn.k_proj.bias",
         | 
| 193 | 
            +
                  "model.layers.37.self_attn.v_proj.bias",
         | 
| 194 | 
            +
                  "model.norm",
         | 
| 195 | 
            +
                  "model.layers.9.self_attn.q_proj.bias",
         | 
| 196 | 
            +
                  "model.layers.4.input_layernorm",
         | 
| 197 | 
            +
                  "model.layers.45.self_attn.q_proj.bias",
         | 
| 198 | 
            +
                  "model.layers.7.self_attn.q_proj.bias",
         | 
| 199 | 
            +
                  "model.layers.32.self_attn.v_proj.bias",
         | 
| 200 | 
            +
                  "model.layers.22.self_attn.v_proj.bias",
         | 
| 201 | 
            +
                  "model.layers.45.post_attention_layernorm",
         | 
| 202 | 
            +
                  "model.layers.40.mlp.gate",
         | 
| 203 | 
            +
                  "model.layers.29.self_attn.v_proj.bias",
         | 
| 204 | 
            +
                  "model.layers.3.mlp.gate.e_score_correction_bias",
         | 
| 205 | 
            +
                  "model.layers.31.post_attention_layernorm",
         | 
| 206 | 
            +
                  "model.layers.41.self_attn.v_proj.bias",
         | 
| 207 | 
            +
                  "model.layers.5.input_layernorm",
         | 
| 208 | 
            +
                  "model.layers.13.self_attn.v_proj.bias",
         | 
| 209 | 
            +
                  "model.layers.26.self_attn.k_proj.bias",
         | 
| 210 | 
            +
                  "model.layers.28.post_attention_layernorm",
         | 
| 211 | 
            +
                  "model.layers.17.mlp.gate",
         | 
| 212 | 
            +
                  "model.layers.42.mlp.gate",
         | 
| 213 | 
            +
                  "model.layers.34.self_attn.v_proj.bias",
         | 
| 214 | 
            +
                  "model.layers.1.mlp.gate.e_score_correction_bias",
         | 
| 215 | 
            +
                  "model.layers.21.input_layernorm",
         | 
| 216 | 
            +
                  "model.layers.21.self_attn.k_proj.bias",
         | 
| 217 | 
            +
                  "model.layers.29.self_attn.k_proj.bias",
         | 
| 218 | 
            +
                  "model.layers.20.post_attention_layernorm",
         | 
| 219 | 
            +
                  "model.layers.14.post_attention_layernorm",
         | 
| 220 | 
            +
                  "model.layers.34.post_attention_layernorm",
         | 
| 221 | 
            +
                  "model.layers.27.self_attn.k_proj.bias",
         | 
| 222 | 
            +
                  "model.layers.24.mlp.gate.e_score_correction_bias",
         | 
| 223 | 
            +
                  "model.layers.31.mlp.gate.e_score_correction_bias",
         | 
| 224 | 
            +
                  "model.layers.2.self_attn.k_proj.bias",
         | 
| 225 | 
            +
                  "model.layers.25.self_attn.v_proj.bias",
         | 
| 226 | 
            +
                  "model.layers.1.post_attention_layernorm",
         | 
| 227 | 
            +
                  "model.layers.10.self_attn.q_proj.bias",
         | 
| 228 | 
            +
                  "model.layers.16.mlp.gate.e_score_correction_bias",
         | 
| 229 | 
            +
                  "model.layers.16.self_attn.q_proj.bias",
         | 
| 230 | 
            +
                  "model.layers.38.mlp.gate.e_score_correction_bias",
         | 
| 231 | 
            +
                  "model.layers.46.self_attn.q_proj.bias",
         | 
| 232 | 
            +
                  "model.layers.23.self_attn.k_proj.bias",
         | 
| 233 | 
            +
                  "model.layers.42.post_attention_layernorm",
         | 
| 234 | 
            +
                  "model.layers.33.self_attn.k_proj.bias",
         | 
| 235 | 
            +
                  "model.layers.30.mlp.gate",
         | 
| 236 | 
            +
                  "model.layers.34.self_attn.q_proj.bias",
         | 
| 237 | 
            +
                  "model.layers.4.post_attention_layernorm",
         | 
| 238 | 
            +
                  "model.layers.13.self_attn.k_proj.bias",
         | 
| 239 | 
            +
                  "model.layers.2.post_attention_layernorm",
         | 
| 240 | 
            +
                  "model.layers.40.post_attention_layernorm",
         | 
| 241 | 
            +
                  "model.layers.38.self_attn.k_proj.bias",
         | 
| 242 | 
            +
                  "model.layers.1.self_attn.k_proj.bias",
         | 
| 243 | 
            +
                  "model.layers.10.mlp.gate.e_score_correction_bias",
         | 
| 244 | 
            +
                  "model.layers.43.self_attn.v_proj.bias",
         | 
| 245 | 
            +
                  "model.layers.11.input_layernorm",
         | 
| 246 | 
            +
                  "model.layers.42.input_layernorm",
         | 
| 247 | 
            +
                  "model.layers.19.self_attn.q_proj.bias",
         | 
| 248 | 
            +
                  "model.layers.24.post_attention_layernorm",
         | 
| 249 | 
            +
                  "model.layers.12.input_layernorm",
         | 
| 250 | 
            +
                  "model.layers.42.self_attn.k_proj.bias",
         | 
| 251 | 
            +
                  "model.layers.12.self_attn.k_proj.bias",
         | 
| 252 | 
            +
                  "model.layers.0.self_attn.v_proj.bias",
         | 
| 253 | 
            +
                  "model.layers.1.mlp.gate",
         | 
| 254 | 
            +
                  "model.layers.39.self_attn.v_proj.bias",
         | 
| 255 | 
            +
                  "model.layers.14.mlp.gate",
         | 
| 256 | 
            +
                  "model.layers.44.post_attention_layernorm",
         | 
| 257 | 
            +
                  "model.layers.37.mlp.gate",
         | 
| 258 | 
            +
                  "model.layers.31.mlp.gate",
         | 
| 259 | 
            +
                  "model.layers.8.post_attention_layernorm",
         | 
| 260 | 
            +
                  "model.layers.2.mlp.gate.e_score_correction_bias",
         | 
| 261 | 
            +
                  "model.layers.36.input_layernorm",
         | 
| 262 | 
            +
                  "model.layers.30.post_attention_layernorm",
         | 
| 263 | 
            +
                  "model.layers.46.shared_head.norm",
         | 
| 264 | 
            +
                  "model.layers.4.mlp.gate",
         | 
| 265 | 
            +
                  "model.layers.6.mlp.gate",
         | 
| 266 | 
            +
                  "model.layers.29.mlp.gate",
         | 
| 267 | 
            +
                  "model.layers.7.mlp.gate",
         | 
| 268 | 
            +
                  "model.layers.0.self_attn.q_proj.bias",
         | 
| 269 | 
            +
                  "model.layers.44.mlp.gate",
         | 
| 270 | 
            +
                  "model.layers.32.self_attn.k_proj.bias",
         | 
| 271 | 
            +
                  "model.layers.4.mlp.gate.e_score_correction_bias",
         | 
| 272 | 
            +
                  "model.layers.18.self_attn.v_proj.bias",
         | 
| 273 | 
            +
                  "model.layers.30.self_attn.q_proj.bias",
         | 
| 274 | 
            +
                  "model.layers.21.mlp.gate.e_score_correction_bias",
         | 
| 275 | 
            +
                  "model.layers.32.post_attention_layernorm",
         | 
| 276 | 
            +
                  "model.layers.19.post_attention_layernorm",
         | 
| 277 | 
            +
                  "model.layers.22.mlp.gate",
         | 
| 278 | 
            +
                  "model.layers.13.mlp.gate.e_score_correction_bias",
         | 
| 279 | 
            +
                  "model.layers.8.mlp.gate",
         | 
| 280 | 
            +
                  "model.layers.36.self_attn.v_proj.bias",
         | 
| 281 | 
            +
                  "model.layers.5.post_attention_layernorm",
         | 
| 282 | 
            +
                  "model.layers.32.input_layernorm",
         | 
| 283 | 
            +
                  "model.layers.33.post_attention_layernorm",
         | 
| 284 | 
            +
                  "model.layers.21.self_attn.v_proj.bias",
         | 
| 285 | 
            +
                  "model.layers.2.mlp.gate",
         | 
| 286 | 
            +
                  "model.layers.13.input_layernorm",
         | 
| 287 | 
            +
                  "model.layers.15.self_attn.v_proj.bias",
         | 
| 288 | 
            +
                  "model.layers.16.self_attn.k_proj.bias",
         | 
| 289 | 
            +
                  "model.layers.2.self_attn.v_proj.bias",
         | 
| 290 | 
            +
                  "model.layers.43.post_attention_layernorm",
         | 
| 291 | 
            +
                  "model.layers.7.input_layernorm",
         | 
| 292 | 
            +
                  "model.layers.29.post_attention_layernorm",
         | 
| 293 | 
            +
                  "model.layers.20.self_attn.k_proj.bias",
         | 
| 294 | 
            +
                  "model.layers.38.mlp.gate",
         | 
| 295 | 
            +
                  "model.layers.18.mlp.gate.e_score_correction_bias",
         | 
| 296 | 
            +
                  "model.layers.25.input_layernorm",
         | 
| 297 | 
            +
                  "model.layers.1.input_layernorm",
         | 
| 298 | 
            +
                  "model.layers.46.hnorm",
         | 
| 299 | 
            +
                  "model.layers.31.self_attn.v_proj.bias",
         | 
| 300 | 
            +
                  "model.layers.14.self_attn.q_proj.bias",
         | 
| 301 | 
            +
                  "model.layers.18.self_attn.q_proj.bias",
         | 
| 302 | 
            +
                  "model.layers.8.self_attn.q_proj.bias",
         | 
| 303 | 
            +
                  "model.layers.35.self_attn.v_proj.bias",
         | 
| 304 | 
            +
                  "model.layers.45.mlp.gate.e_score_correction_bias",
         | 
| 305 | 
            +
                  "model.layers.9.post_attention_layernorm",
         | 
| 306 | 
            +
                  "model.layers.30.self_attn.v_proj.bias",
         | 
| 307 | 
            +
                  "model.layers.15.mlp.gate",
         | 
| 308 | 
            +
                  "model.layers.10.input_layernorm",
         | 
| 309 | 
            +
                  "model.layers.6.self_attn.q_proj.bias",
         | 
| 310 | 
            +
                  "model.layers.11.mlp.gate.e_score_correction_bias",
         | 
| 311 | 
            +
                  "model.layers.41.input_layernorm",
         | 
| 312 | 
            +
                  "model.layers.22.mlp.gate.e_score_correction_bias",
         | 
| 313 | 
            +
                  "model.layers.15.mlp.gate.e_score_correction_bias",
         | 
| 314 | 
            +
                  "model.layers.21.self_attn.q_proj.bias",
         | 
| 315 | 
            +
                  "model.layers.17.mlp.gate.e_score_correction_bias",
         | 
| 316 | 
            +
                  "model.layers.16.mlp.gate",
         | 
| 317 | 
            +
                  "model.layers.25.self_attn.q_proj.bias",
         | 
| 318 | 
            +
                  "model.layers.6.input_layernorm",
         | 
| 319 | 
            +
                  "model.layers.17.input_layernorm",
         | 
| 320 | 
            +
                  "model.layers.26.mlp.gate.e_score_correction_bias",
         | 
| 321 | 
            +
                  "model.layers.35.mlp.gate.e_score_correction_bias",
         | 
| 322 | 
            +
                  "model.layers.0.input_layernorm",
         | 
| 323 | 
            +
                  "model.layers.3.post_attention_layernorm",
         | 
| 324 | 
            +
                  "model.layers.6.self_attn.v_proj.bias",
         | 
| 325 | 
            +
                  "model.layers.27.mlp.gate.e_score_correction_bias",
         | 
| 326 | 
            +
                  "model.layers.18.mlp.gate",
         | 
| 327 | 
            +
                  "model.layers.28.input_layernorm",
         | 
| 328 | 
            +
                  "model.layers.9.self_attn.v_proj.bias",
         | 
| 329 | 
            +
                  "model.layers.31.self_attn.k_proj.bias",
         | 
| 330 | 
            +
                  "model.layers.40.self_attn.v_proj.bias",
         | 
| 331 | 
            +
                  "model.layers.12.self_attn.q_proj.bias",
         | 
| 332 | 
            +
                  "model.layers.41.mlp.gate",
         | 
| 333 | 
            +
                  "model.layers.5.self_attn.q_proj.bias",
         | 
| 334 | 
            +
                  "model.layers.11.self_attn.v_proj.bias",
         | 
| 335 | 
            +
                  "model.layers.36.mlp.gate",
         | 
| 336 | 
            +
                  "model.layers.27.self_attn.q_proj.bias",
         | 
| 337 | 
            +
                  "model.layers.40.self_attn.k_proj.bias",
         | 
| 338 | 
            +
                  "model.layers.11.post_attention_layernorm",
         | 
| 339 | 
            +
                  "model.layers.27.input_layernorm",
         | 
| 340 | 
            +
                  "model.layers.12.self_attn.v_proj.bias",
         | 
| 341 | 
            +
                  "model.layers.46.mlp.gate",
         | 
| 342 | 
            +
                  "model.layers.17.self_attn.k_proj.bias",
         | 
| 343 | 
            +
                  "model.layers.3.input_layernorm",
         | 
| 344 | 
            +
                  "model.layers.44.input_layernorm",
         | 
| 345 | 
            +
                  "model.layers.10.self_attn.k_proj.bias",
         | 
| 346 | 
            +
                  "model.layers.41.mlp.gate.e_score_correction_bias",
         | 
| 347 | 
            +
                  "model.layers.7.self_attn.v_proj.bias",
         | 
| 348 | 
            +
                  "model.layers.18.self_attn.k_proj.bias",
         | 
| 349 | 
            +
                  "model.layers.1.self_attn.v_proj.bias",
         | 
| 350 | 
            +
                  "model.layers.26.mlp.gate",
         | 
| 351 | 
            +
                  "model.layers.45.input_layernorm",
         | 
| 352 | 
            +
                  "model.layers.23.self_attn.v_proj.bias",
         | 
| 353 | 
            +
                  "model.layers.39.mlp.gate.e_score_correction_bias",
         | 
| 354 | 
            +
                  "model.layers.12.mlp.gate.e_score_correction_bias",
         | 
| 355 | 
            +
                  "model.layers.37.post_attention_layernorm",
         | 
| 356 | 
            +
                  "model.layers.46.self_attn.v_proj.bias",
         | 
| 357 | 
            +
                  "model.layers.36.mlp.gate.e_score_correction_bias",
         | 
| 358 | 
            +
                  "model.layers.5.mlp.gate.e_score_correction_bias",
         | 
| 359 | 
            +
                  "model.layers.35.mlp.gate",
         | 
| 360 | 
            +
                  "model.layers.44.self_attn.k_proj.bias",
         | 
| 361 | 
            +
                  "model.layers.3.self_attn.k_proj.bias",
         | 
| 362 | 
            +
                  "model.layers.11.mlp.gate",
         | 
| 363 | 
            +
                  "model.layers.11.self_attn.q_proj.bias",
         | 
| 364 | 
            +
                  "model.layers.17.self_attn.q_proj.bias",
         | 
| 365 | 
            +
                  "model.layers.32.self_attn.q_proj.bias",
         | 
| 366 | 
            +
                  "model.layers.11.self_attn.k_proj.bias",
         | 
| 367 | 
            +
                  "model.layers.40.mlp.gate.e_score_correction_bias",
         | 
| 368 | 
            +
                  "model.layers.41.self_attn.q_proj.bias",
         | 
| 369 | 
            +
                  "model.layers.15.self_attn.k_proj.bias",
         | 
| 370 | 
            +
                  "model.layers.44.self_attn.v_proj.bias",
         | 
| 371 | 
            +
                  "model.layers.25.self_attn.k_proj.bias",
         | 
| 372 | 
            +
                  "model.layers.25.post_attention_layernorm",
         | 
| 373 | 
            +
                  "model.layers.29.input_layernorm",
         | 
| 374 | 
            +
                  "model.layers.44.self_attn.q_proj.bias",
         | 
| 375 | 
            +
                  "model.layers.16.post_attention_layernorm",
         | 
| 376 | 
            +
                  "model.layers.6.mlp.gate.e_score_correction_bias",
         | 
| 377 | 
            +
                  "model.layers.38.self_attn.v_proj.bias",
         | 
| 378 | 
            +
                  "model.layers.40.input_layernorm",
         | 
| 379 | 
            +
                  "model.layers.6.post_attention_layernorm",
         | 
| 380 | 
            +
                  "model.layers.22.post_attention_layernorm",
         | 
| 381 | 
            +
                  "model.layers.8.self_attn.k_proj.bias",
         | 
| 382 | 
            +
                  "model.layers.37.self_attn.k_proj.bias",
         | 
| 383 | 
            +
                  "model.layers.23.mlp.gate.e_score_correction_bias",
         | 
| 384 | 
            +
                  "model.layers.27.mlp.gate",
         | 
| 385 | 
            +
                  "model.layers.8.mlp.gate.e_score_correction_bias",
         | 
| 386 | 
            +
                  "model.layers.28.self_attn.k_proj.bias",
         | 
| 387 | 
            +
                  "model.layers.24.self_attn.q_proj.bias",
         | 
| 388 | 
            +
                  "model.layers.39.self_attn.q_proj.bias",
         | 
| 389 | 
            +
                  "model.layers.36.self_attn.q_proj.bias",
         | 
| 390 | 
            +
                  "model.layers.45.self_attn.k_proj.bias",
         | 
| 391 | 
            +
                  "model.layers.32.mlp.gate.e_score_correction_bias",
         | 
| 392 | 
            +
                  "model.layers.35.self_attn.q_proj.bias",
         | 
| 393 | 
            +
                  "model.layers.33.self_attn.q_proj.bias",
         | 
| 394 | 
            +
                  "model.layers.14.self_attn.k_proj.bias",
         | 
| 395 | 
            +
                  "lm_head",
         | 
| 396 | 
            +
                  "model.layers.3.self_attn.v_proj.bias",
         | 
| 397 | 
            +
                  "model.layers.44.mlp.gate.e_score_correction_bias",
         | 
| 398 | 
            +
                  "model.layers.45.mlp.gate",
         | 
| 399 | 
            +
                  "model.layers.32.mlp.gate",
         | 
| 400 | 
            +
                  "model.layers.33.self_attn.v_proj.bias",
         | 
| 401 | 
            +
                  "model.layers.29.self_attn.q_proj.bias",
         | 
| 402 | 
            +
                  "model.layers.3.self_attn.q_proj.bias",
         | 
| 403 | 
            +
                  "model.layers.35.post_attention_layernorm",
         | 
| 404 | 
            +
                  "model.layers.6.self_attn.k_proj.bias",
         | 
| 405 | 
            +
                  "model.layers.43.self_attn.q_proj.bias",
         | 
| 406 | 
            +
                  "model.layers.20.mlp.gate.e_score_correction_bias",
         | 
| 407 | 
            +
                  "model.layers.8.self_attn.v_proj.bias",
         | 
| 408 | 
            +
                  "model.layers.13.self_attn.q_proj.bias",
         | 
| 409 | 
            +
                  "model.layers.27.post_attention_layernorm",
         | 
| 410 | 
            +
                  "model.layers.38.self_attn.q_proj.bias",
         | 
| 411 | 
            +
                  "model.layers.25.mlp.gate",
         | 
| 412 | 
            +
                  "model.layers.23.self_attn.q_proj.bias"
         | 
| 413 | 
            +
                ],
         | 
| 414 | 
            +
                "quant_method": "compressed-tensors",
         | 
| 415 | 
            +
                "quantization_status": "compressed"
         | 
| 416 | 
            +
              }
         | 
| 417 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,10 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_from_model_config": true,
         | 
| 3 | 
            +
              "eos_token_id": [
         | 
| 4 | 
            +
                151329,
         | 
| 5 | 
            +
                151336,
         | 
| 6 | 
            +
                151338
         | 
| 7 | 
            +
              ],
         | 
| 8 | 
            +
              "pad_token_id": 151329,
         | 
| 9 | 
            +
              "transformers_version": "4.54.0"
         | 
| 10 | 
            +
            }
         | 
    	
        model-00001-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:a402a8fac66c090eddd032e70aaa5ab9c87307aa44451c659e1b650f9c60ad53
         | 
| 3 | 
            +
            size 2726784880
         | 
    	
        model-00002-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0d3801d622ff4292be51939389c2e30aac77dfc0d6728d777077b1d892c8eb7f
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00003-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:370acfd3919de83ddde52b13520200c889233d776546a9b8f6834ce09163af72
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00004-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0ee5f8494b76e349c0065073df819272b4abf799a56815ac0faa216251cd9075
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00005-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fdc1fe9b9550617ee72997b29fc8f13d391a7bb1439d1df1d5ba5c159bcd5c78
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00006-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ee91fb563f9d471d1870f99496bbe576dd7435127d506cd251d8a40a0581ea25
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00007-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:b56b80408c45881d8b1aa234ebe4c4826af24abc26038051cd92ce0aa2212070
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00008-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f78687be00665850826872a5666ac3fb72c15cb70ffb6f8ab7da2e402ddfa307
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00009-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:65c39636822e6fa307ca0e27c4cc14e8045a80799f801d638a1ef6138534db37
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00010-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4dd40942500a67c1c7f3eee752a6777876923eea9b00fe3abe898794f5b44e28
         | 
| 3 | 
            +
            size 2345777464
         | 
    	
        model-00011-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e400a5ca71912ac0ca8da5fc103c0ee6a7dd25eb68816620b4f30eb48e30138c
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00012-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ef8b28bbe7d35824f3fcd79833333c1427dd844f9fa5872fb9b4e237a4b923e9
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00013-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:6b08854bbbeebd8df2cca69c83036698950b0ab0aca37497d4db5af1da55e021
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00014-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:97df7e09418afdbeb89677b8baa1a8456a45c70f92597f307ea710c7fc71e5b5
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00015-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:eb3cd4497d95e3fc0d679a3344d7bcf8ef50bcf9074eed34b5596099e6061624
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00016-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e9e55954e25fda8f2b1c224a39e2350ed29595659786ed9db1fdf30b03623ba9
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00017-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fdda21d70b7bf6e067a7da0b9abfbde5d7fbb33740fa0b4d2839b316f61f2f69
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00018-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:235e3a593ac9bccd5e6ee9ba1d8d5539c78b30632dafa2ce39ede0a6e7ce8a5f
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00019-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:75fe5acbcb61b0c243f52d7e2a3436f73c4b41f2b7645779d7f092cac8d6c122
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00020-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e68c695f2d732a714c09ab07832dca8443ddcdc745bc4fc19c49962a1ff69a79
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00021-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:8bcd058b41a7fef9d6c05cb024e776ee78f7cd00cf8c73b5e912fc28e32e7454
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00022-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:31225cc96950af59d7775e287a7af7ffdf2a13ce8073114cef082a06e5c58083
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00023-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d71c2b109c55b79c61e98af7b80148a91a008770de217fee16483ec9f1eb8eef
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00024-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:6077ad7f4ef58045de15cb4bdf66ca773d9917eb8179f1b5160ed65931be07cf
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00025-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4fa816ebe9f85d1a21c26182a4f2620b090604066136e2dee997d7c16c62e845
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00026-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ede66e0f34d047a685d0c88d4392b2ea7320c1f9ec58a91cdf86a3aa3187c111
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00027-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ae3404cd91efa7632e7761cef0e36d287531043b8293e8ca231f80556de0c943
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00028-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:bc866b2ceb0c216a0e34f0a5d7d7178e28db4f1170778ea2ee813c487adad690
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00029-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:86c0aeefe2ccf3db32de08f211c89b2dac92f08ab9fce79a9cd0dac3a7168c94
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00030-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:934d52a70fce1b5350a6097a60b6e7d56e5c62e74bbd1f978b06b2cb05eda181
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00031-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e1ae794e253939c3ace1438e156ee049c8380294a7cf2ceb71df6d24f3db7529
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00032-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:be9c3a1a94eb73278eee04f20536984049a19d22cfec99842f130f9a0124b679
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00033-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:2fe977ee1930c5371511e72358fe28b6652153c144682f0d0b25985396a2d9f3
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00034-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:56e8edad38c7825a51eb2d95e696779b126571861a23570d3c830d5145af88d5
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00035-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1987ca16a5051008d42528a6090e0858cca95e96635c17630fc4b42be64fba2b
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00036-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4d96fd512b5e06a9ba547a9075c0c92ed87d48f58bd55e70cd1d36ab370e953d
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00037-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:043d5e8e4fce151634a4bcb8a06a782eacd12cbe10617c4e6dad70036040b6d6
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00038-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:abd38bbfa42f52515791133a1f613fdb59bfbc7c36eb5494c56367545c025df8
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00039-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e9d9984281c5587e6749846705853d14287a6bf13ac4f55cc8eb45db25d85123
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00041-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:dc0dbeba62a43bc2bb5420e32c2fbbd0b246f3422210c45338a34567f814e166
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00042-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:140b23ed6313b17d3bb83dfa408489f416e2b4fb69ab213a0fa95cdaa9266022
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00043-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:405cc1ce09cb6f98b93465847c673266b6e2769322929cb7a89a4bad1f23995f
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00045-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:660f9a5d8ce0fc3c6bb06dadbe7ef096737a56583222f127219185d6636490c9
         | 
| 3 | 
            +
            size 2345778256
         | 
    	
        model-00047-of-00047.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:3e40754924a9ee2229bd1f5ec0a2191a831f6dbf9f9d6f5ef534c6a2a1e8f7cb
         | 
| 3 | 
            +
            size 2412912168
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:9340665016419c825c4bdabbcc9acc43b7ca2c68ce142724afa829abb1be5efd
         | 
| 3 | 
            +
            size 19970699
         | 
