Upload folder using huggingface_hub
Browse files- README.md +199 -3
- chat_template.jinja +1 -1
- config.json +39 -6
- generation_config.json +11 -4
- model.safetensors +2 -2
- preprocessor_config.json +3 -1
- processor_config.json +2 -2
- tokenizer_config.json +1 -0
    	
        README.md
    CHANGED
    
    | @@ -1,3 +1,199 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
             | 
| 3 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            library_name: transformers
         | 
| 3 | 
            +
            tags: []
         | 
| 4 | 
            +
            ---
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # Model Card for Model ID
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            <!-- Provide a quick summary of what the model is/does. -->
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
             | 
| 12 | 
            +
            ## Model Details
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            ### Model Description
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            <!-- Provide a longer summary of what this model is. -->
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            - **Developed by:** [More Information Needed]
         | 
| 21 | 
            +
            - **Funded by [optional]:** [More Information Needed]
         | 
| 22 | 
            +
            - **Shared by [optional]:** [More Information Needed]
         | 
| 23 | 
            +
            - **Model type:** [More Information Needed]
         | 
| 24 | 
            +
            - **Language(s) (NLP):** [More Information Needed]
         | 
| 25 | 
            +
            - **License:** [More Information Needed]
         | 
| 26 | 
            +
            - **Finetuned from model [optional]:** [More Information Needed]
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            ### Model Sources [optional]
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            <!-- Provide the basic links for the model. -->
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            - **Repository:** [More Information Needed]
         | 
| 33 | 
            +
            - **Paper [optional]:** [More Information Needed]
         | 
| 34 | 
            +
            - **Demo [optional]:** [More Information Needed]
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            ## Uses
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            ### Direct Use
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            [More Information Needed]
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            ### Downstream Use [optional]
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            [More Information Needed]
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            ### Out-of-Scope Use
         | 
| 53 | 
            +
             | 
| 54 | 
            +
            <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
         | 
| 55 | 
            +
             | 
| 56 | 
            +
            [More Information Needed]
         | 
| 57 | 
            +
             | 
| 58 | 
            +
            ## Bias, Risks, and Limitations
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            <!-- This section is meant to convey both technical and sociotechnical limitations. -->
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            [More Information Needed]
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            ### Recommendations
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
         | 
| 69 | 
            +
             | 
| 70 | 
            +
            ## How to Get Started with the Model
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            Use the code below to get started with the model.
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            [More Information Needed]
         | 
| 75 | 
            +
             | 
| 76 | 
            +
            ## Training Details
         | 
| 77 | 
            +
             | 
| 78 | 
            +
            ### Training Data
         | 
| 79 | 
            +
             | 
| 80 | 
            +
            <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            [More Information Needed]
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            ### Training Procedure
         | 
| 85 | 
            +
             | 
| 86 | 
            +
            <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
         | 
| 87 | 
            +
             | 
| 88 | 
            +
            #### Preprocessing [optional]
         | 
| 89 | 
            +
             | 
| 90 | 
            +
            [More Information Needed]
         | 
| 91 | 
            +
             | 
| 92 | 
            +
             | 
| 93 | 
            +
            #### Training Hyperparameters
         | 
| 94 | 
            +
             | 
| 95 | 
            +
            - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
         | 
| 96 | 
            +
             | 
| 97 | 
            +
            #### Speeds, Sizes, Times [optional]
         | 
| 98 | 
            +
             | 
| 99 | 
            +
            <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
         | 
| 100 | 
            +
             | 
| 101 | 
            +
            [More Information Needed]
         | 
| 102 | 
            +
             | 
| 103 | 
            +
            ## Evaluation
         | 
| 104 | 
            +
             | 
| 105 | 
            +
            <!-- This section describes the evaluation protocols and provides the results. -->
         | 
| 106 | 
            +
             | 
| 107 | 
            +
            ### Testing Data, Factors & Metrics
         | 
| 108 | 
            +
             | 
| 109 | 
            +
            #### Testing Data
         | 
| 110 | 
            +
             | 
| 111 | 
            +
            <!-- This should link to a Dataset Card if possible. -->
         | 
| 112 | 
            +
             | 
| 113 | 
            +
            [More Information Needed]
         | 
| 114 | 
            +
             | 
| 115 | 
            +
            #### Factors
         | 
| 116 | 
            +
             | 
| 117 | 
            +
            <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
         | 
| 118 | 
            +
             | 
| 119 | 
            +
            [More Information Needed]
         | 
| 120 | 
            +
             | 
| 121 | 
            +
            #### Metrics
         | 
| 122 | 
            +
             | 
| 123 | 
            +
            <!-- These are the evaluation metrics being used, ideally with a description of why. -->
         | 
| 124 | 
            +
             | 
| 125 | 
            +
            [More Information Needed]
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            ### Results
         | 
| 128 | 
            +
             | 
| 129 | 
            +
            [More Information Needed]
         | 
| 130 | 
            +
             | 
| 131 | 
            +
            #### Summary
         | 
| 132 | 
            +
             | 
| 133 | 
            +
             | 
| 134 | 
            +
             | 
| 135 | 
            +
            ## Model Examination [optional]
         | 
| 136 | 
            +
             | 
| 137 | 
            +
            <!-- Relevant interpretability work for the model goes here -->
         | 
| 138 | 
            +
             | 
| 139 | 
            +
            [More Information Needed]
         | 
| 140 | 
            +
             | 
| 141 | 
            +
            ## Environmental Impact
         | 
| 142 | 
            +
             | 
| 143 | 
            +
            <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
         | 
| 144 | 
            +
             | 
| 145 | 
            +
            Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
         | 
| 146 | 
            +
             | 
| 147 | 
            +
            - **Hardware Type:** [More Information Needed]
         | 
| 148 | 
            +
            - **Hours used:** [More Information Needed]
         | 
| 149 | 
            +
            - **Cloud Provider:** [More Information Needed]
         | 
| 150 | 
            +
            - **Compute Region:** [More Information Needed]
         | 
| 151 | 
            +
            - **Carbon Emitted:** [More Information Needed]
         | 
| 152 | 
            +
             | 
| 153 | 
            +
            ## Technical Specifications [optional]
         | 
| 154 | 
            +
             | 
| 155 | 
            +
            ### Model Architecture and Objective
         | 
| 156 | 
            +
             | 
| 157 | 
            +
            [More Information Needed]
         | 
| 158 | 
            +
             | 
| 159 | 
            +
            ### Compute Infrastructure
         | 
| 160 | 
            +
             | 
| 161 | 
            +
            [More Information Needed]
         | 
| 162 | 
            +
             | 
| 163 | 
            +
            #### Hardware
         | 
| 164 | 
            +
             | 
| 165 | 
            +
            [More Information Needed]
         | 
| 166 | 
            +
             | 
| 167 | 
            +
            #### Software
         | 
| 168 | 
            +
             | 
| 169 | 
            +
            [More Information Needed]
         | 
| 170 | 
            +
             | 
| 171 | 
            +
            ## Citation [optional]
         | 
| 172 | 
            +
             | 
| 173 | 
            +
            <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
         | 
| 174 | 
            +
             | 
| 175 | 
            +
            **BibTeX:**
         | 
| 176 | 
            +
             | 
| 177 | 
            +
            [More Information Needed]
         | 
| 178 | 
            +
             | 
| 179 | 
            +
            **APA:**
         | 
| 180 | 
            +
             | 
| 181 | 
            +
            [More Information Needed]
         | 
| 182 | 
            +
             | 
| 183 | 
            +
            ## Glossary [optional]
         | 
| 184 | 
            +
             | 
| 185 | 
            +
            <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
         | 
| 186 | 
            +
             | 
| 187 | 
            +
            [More Information Needed]
         | 
| 188 | 
            +
             | 
| 189 | 
            +
            ## More Information [optional]
         | 
| 190 | 
            +
             | 
| 191 | 
            +
            [More Information Needed]
         | 
| 192 | 
            +
             | 
| 193 | 
            +
            ## Model Card Authors [optional]
         | 
| 194 | 
            +
             | 
| 195 | 
            +
            [More Information Needed]
         | 
| 196 | 
            +
             | 
| 197 | 
            +
            ## Model Card Contact
         | 
| 198 | 
            +
             | 
| 199 | 
            +
            [More Information Needed]
         | 
    	
        chat_template.jinja
    CHANGED
    
    | @@ -1 +1 @@ | |
| 1 | 
            -
            {% set image_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|image_pad|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}
         | 
|  | |
| 1 | 
            +
            {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|image_pad|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}
         | 
    	
        config.json
    CHANGED
    
    | @@ -1,7 +1,9 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "architectures": [
         | 
| 3 | 
            -
                " | 
| 4 | 
             
              ],
         | 
|  | |
|  | |
| 5 | 
             
              "model_type": "mistral3",
         | 
| 6 | 
             
              "multimodal_projector_bias": false,
         | 
| 7 | 
             
              "projector_hidden_act": "gelu",
         | 
| @@ -10,26 +12,58 @@ | |
| 10 | 
             
                "architectures": [
         | 
| 11 | 
             
                  "Qwen3ForCausalLM"
         | 
| 12 | 
             
                ],
         | 
|  | |
| 13 | 
             
                "attention_dropout": 0,
         | 
| 14 | 
             
                "head_dim": 128,
         | 
| 15 | 
             
                "hidden_act": "silu",
         | 
| 16 | 
             
                "hidden_size": 1024,
         | 
| 17 | 
             
                "initializer_range": 0.02,
         | 
| 18 | 
             
                "intermediate_size": 3072,
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 19 | 
             
                "max_position_embeddings": 40960,
         | 
|  | |
| 20 | 
             
                "model_type": "qwen3",
         | 
| 21 | 
             
                "num_attention_heads": 16,
         | 
| 22 | 
             
                "num_hidden_layers": 28,
         | 
| 23 | 
             
                "num_key_value_heads": 8,
         | 
| 24 | 
             
                "rms_norm_eps": 1e-06,
         | 
|  | |
| 25 | 
             
                "rope_theta": 1000000,
         | 
| 26 | 
             
                "sliding_window": null,
         | 
| 27 | 
             
                "use_cache": true,
         | 
| 28 | 
             
                "use_sliding_window": false,
         | 
| 29 | 
             
                "vocab_size": 151936
         | 
| 30 | 
             
              },
         | 
| 31 | 
            -
              " | 
| 32 | 
            -
              "transformers_version": "4.50.0.dev0",
         | 
| 33 | 
             
              "vision_config": {
         | 
| 34 | 
             
                "attention_dropout": 0,
         | 
| 35 | 
             
                "head_dim": 64,
         | 
| @@ -45,6 +79,5 @@ | |
| 45 | 
             
                "patch_size": 14,
         | 
| 46 | 
             
                "rope_theta": 10000
         | 
| 47 | 
             
              },
         | 
| 48 | 
            -
              "vision_feature_layer": -1 | 
| 49 | 
            -
             | 
| 50 | 
            -
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "architectures": [
         | 
| 3 | 
            +
                "LightOnOCRForConditionalGeneration"
         | 
| 4 | 
             
              ],
         | 
| 5 | 
            +
              "dtype": "float32",
         | 
| 6 | 
            +
              "image_token_id": 151655,
         | 
| 7 | 
             
              "model_type": "mistral3",
         | 
| 8 | 
             
              "multimodal_projector_bias": false,
         | 
| 9 | 
             
              "projector_hidden_act": "gelu",
         | 
|  | |
| 12 | 
             
                "architectures": [
         | 
| 13 | 
             
                  "Qwen3ForCausalLM"
         | 
| 14 | 
             
                ],
         | 
| 15 | 
            +
                "attention_bias": false,
         | 
| 16 | 
             
                "attention_dropout": 0,
         | 
| 17 | 
             
                "head_dim": 128,
         | 
| 18 | 
             
                "hidden_act": "silu",
         | 
| 19 | 
             
                "hidden_size": 1024,
         | 
| 20 | 
             
                "initializer_range": 0.02,
         | 
| 21 | 
             
                "intermediate_size": 3072,
         | 
| 22 | 
            +
                "layer_types": [
         | 
| 23 | 
            +
                  "full_attention",
         | 
| 24 | 
            +
                  "full_attention",
         | 
| 25 | 
            +
                  "full_attention",
         | 
| 26 | 
            +
                  "full_attention",
         | 
| 27 | 
            +
                  "full_attention",
         | 
| 28 | 
            +
                  "full_attention",
         | 
| 29 | 
            +
                  "full_attention",
         | 
| 30 | 
            +
                  "full_attention",
         | 
| 31 | 
            +
                  "full_attention",
         | 
| 32 | 
            +
                  "full_attention",
         | 
| 33 | 
            +
                  "full_attention",
         | 
| 34 | 
            +
                  "full_attention",
         | 
| 35 | 
            +
                  "full_attention",
         | 
| 36 | 
            +
                  "full_attention",
         | 
| 37 | 
            +
                  "full_attention",
         | 
| 38 | 
            +
                  "full_attention",
         | 
| 39 | 
            +
                  "full_attention",
         | 
| 40 | 
            +
                  "full_attention",
         | 
| 41 | 
            +
                  "full_attention",
         | 
| 42 | 
            +
                  "full_attention",
         | 
| 43 | 
            +
                  "full_attention",
         | 
| 44 | 
            +
                  "full_attention",
         | 
| 45 | 
            +
                  "full_attention",
         | 
| 46 | 
            +
                  "full_attention",
         | 
| 47 | 
            +
                  "full_attention",
         | 
| 48 | 
            +
                  "full_attention",
         | 
| 49 | 
            +
                  "full_attention",
         | 
| 50 | 
            +
                  "full_attention"
         | 
| 51 | 
            +
                ],
         | 
| 52 | 
             
                "max_position_embeddings": 40960,
         | 
| 53 | 
            +
                "max_window_layers": 28,
         | 
| 54 | 
             
                "model_type": "qwen3",
         | 
| 55 | 
             
                "num_attention_heads": 16,
         | 
| 56 | 
             
                "num_hidden_layers": 28,
         | 
| 57 | 
             
                "num_key_value_heads": 8,
         | 
| 58 | 
             
                "rms_norm_eps": 1e-06,
         | 
| 59 | 
            +
                "rope_scaling": null,
         | 
| 60 | 
             
                "rope_theta": 1000000,
         | 
| 61 | 
             
                "sliding_window": null,
         | 
| 62 | 
             
                "use_cache": true,
         | 
| 63 | 
             
                "use_sliding_window": false,
         | 
| 64 | 
             
                "vocab_size": 151936
         | 
| 65 | 
             
              },
         | 
| 66 | 
            +
              "transformers_version": "4.57.0.dev0",
         | 
|  | |
| 67 | 
             
              "vision_config": {
         | 
| 68 | 
             
                "attention_dropout": 0,
         | 
| 69 | 
             
                "head_dim": 64,
         | 
|  | |
| 79 | 
             
                "patch_size": 14,
         | 
| 80 | 
             
                "rope_theta": 10000
         | 
| 81 | 
             
              },
         | 
| 82 | 
            +
              "vision_feature_layer": -1
         | 
| 83 | 
            +
            }
         | 
|  | 
    	
        generation_config.json
    CHANGED
    
    | @@ -1,6 +1,13 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
            -
             | 
| 3 | 
            -
             | 
| 4 | 
            -
             | 
| 5 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 6 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
            +
                "bos_token_id": 151643,
         | 
| 3 | 
            +
                "do_sample": true,
         | 
| 4 | 
            +
                "eos_token_id": [
         | 
| 5 | 
            +
                    151645,
         | 
| 6 | 
            +
                    151643
         | 
| 7 | 
            +
                ],
         | 
| 8 | 
            +
                "pad_token_id": 151643,
         | 
| 9 | 
            +
                "temperature": 0.2,
         | 
| 10 | 
            +
                "top_k": 0,
         | 
| 11 | 
            +
                "top_p": 0.9,
         | 
| 12 | 
            +
                "transformers_version": "4.55.3"
         | 
| 13 | 
             
            }
         | 
    	
        model.safetensors
    CHANGED
    
    | @@ -1,3 +1,3 @@ | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256: | 
| 3 | 
            -
            size  | 
|  | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c588f8ef5e616dea80f0cb57cf9e82725d98571fdbce3e1e7f507859bb8140d7
         | 
| 3 | 
            +
            size 2322532696
         | 
    	
        preprocessor_config.json
    CHANGED
    
    | @@ -7,6 +7,7 @@ | |
| 7 | 
             
              "do_center_crop": null,
         | 
| 8 | 
             
              "do_convert_rgb": true,
         | 
| 9 | 
             
              "do_normalize": true,
         | 
|  | |
| 10 | 
             
              "do_rescale": true,
         | 
| 11 | 
             
              "do_resize": true,
         | 
| 12 | 
             
              "image_mean": [
         | 
| @@ -21,8 +22,9 @@ | |
| 21 | 
             
                0.27577711
         | 
| 22 | 
             
              ],
         | 
| 23 | 
             
              "input_data_format": null,
         | 
|  | |
| 24 | 
             
              "patch_size": 14,
         | 
| 25 | 
            -
              "processor_class": " | 
| 26 | 
             
              "resample": 3,
         | 
| 27 | 
             
              "rescale_factor": 0.00392156862745098,
         | 
| 28 | 
             
              "return_tensors": null,
         | 
|  | |
| 7 | 
             
              "do_center_crop": null,
         | 
| 8 | 
             
              "do_convert_rgb": true,
         | 
| 9 | 
             
              "do_normalize": true,
         | 
| 10 | 
            +
              "do_pad": null,
         | 
| 11 | 
             
              "do_rescale": true,
         | 
| 12 | 
             
              "do_resize": true,
         | 
| 13 | 
             
              "image_mean": [
         | 
|  | |
| 22 | 
             
                0.27577711
         | 
| 23 | 
             
              ],
         | 
| 24 | 
             
              "input_data_format": null,
         | 
| 25 | 
            +
              "pad_size": null,
         | 
| 26 | 
             
              "patch_size": 14,
         | 
| 27 | 
            +
              "processor_class": "LightOnOCRProcessor",
         | 
| 28 | 
             
              "resample": 3,
         | 
| 29 | 
             
              "rescale_factor": 0.00392156862745098,
         | 
| 30 | 
             
              "return_tensors": null,
         | 
    	
        processor_config.json
    CHANGED
    
    | @@ -3,6 +3,6 @@ | |
| 3 | 
             
              "image_end_token": "<|vision_end|>",
         | 
| 4 | 
             
              "image_token": "<|image_pad|>",
         | 
| 5 | 
             
              "patch_size": 14,
         | 
| 6 | 
            -
              "processor_class": " | 
| 7 | 
             
              "spatial_merge_size": 2
         | 
| 8 | 
            -
            }
         | 
|  | |
| 3 | 
             
              "image_end_token": "<|vision_end|>",
         | 
| 4 | 
             
              "image_token": "<|image_pad|>",
         | 
| 5 | 
             
              "patch_size": 14,
         | 
| 6 | 
            +
              "processor_class": "LightOnOCRProcessor",
         | 
| 7 | 
             
              "spatial_merge_size": 2
         | 
| 8 | 
            +
            }
         | 
    	
        tokenizer_config.json
    CHANGED
    
    | @@ -233,6 +233,7 @@ | |
| 233 | 
             
              "extra_special_tokens": {},
         | 
| 234 | 
             
              "model_max_length": 131072,
         | 
| 235 | 
             
              "pad_token": "<|endoftext|>",
         | 
|  | |
| 236 | 
             
              "split_special_tokens": false,
         | 
| 237 | 
             
              "tokenizer_class": "Qwen2Tokenizer",
         | 
| 238 | 
             
              "unk_token": null
         | 
|  | |
| 233 | 
             
              "extra_special_tokens": {},
         | 
| 234 | 
             
              "model_max_length": 131072,
         | 
| 235 | 
             
              "pad_token": "<|endoftext|>",
         | 
| 236 | 
            +
              "processor_class": "LightOnOCRProcessor",
         | 
| 237 | 
             
              "split_special_tokens": false,
         | 
| 238 | 
             
              "tokenizer_class": "Qwen2Tokenizer",
         | 
| 239 | 
             
              "unk_token": null
         | 
