Upload processor
Browse files- added_tokens.json +3 -0
- preprocessor_config.json +24 -0
- qformer_tokenizer/added_tokens.json +3 -0
- qformer_tokenizer/special_tokens_map.json +8 -0
- qformer_tokenizer/tokenizer.json +0 -0
- qformer_tokenizer/tokenizer_config.json +14 -0
- qformer_tokenizer/vocab.txt +0 -0
- special_tokens_map.json +6 -0
- tokenizer.model +3 -0
- tokenizer_config.json +36 -0
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "[PAD]": 32000
         | 
| 3 | 
            +
            }
         | 
    	
        preprocessor_config.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "do_convert_rgb": true,
         | 
| 3 | 
            +
              "do_normalize": true,
         | 
| 4 | 
            +
              "do_rescale": true,
         | 
| 5 | 
            +
              "do_resize": true,
         | 
| 6 | 
            +
              "image_mean": [
         | 
| 7 | 
            +
                0.48145466,
         | 
| 8 | 
            +
                0.4578275,
         | 
| 9 | 
            +
                0.40821073
         | 
| 10 | 
            +
              ],
         | 
| 11 | 
            +
              "image_processor_type": "BlipImageProcessor",
         | 
| 12 | 
            +
              "image_std": [
         | 
| 13 | 
            +
                0.26862954,
         | 
| 14 | 
            +
                0.26130258,
         | 
| 15 | 
            +
                0.27577711
         | 
| 16 | 
            +
              ],
         | 
| 17 | 
            +
              "processor_class": "InstructBlipProcessor",
         | 
| 18 | 
            +
              "resample": 3,
         | 
| 19 | 
            +
              "rescale_factor": 0.00392156862745098,
         | 
| 20 | 
            +
              "size": {
         | 
| 21 | 
            +
                "height": 224,
         | 
| 22 | 
            +
                "width": 224
         | 
| 23 | 
            +
              }
         | 
| 24 | 
            +
            }
         | 
    	
        qformer_tokenizer/added_tokens.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "[DEC]": 30522
         | 
| 3 | 
            +
            }
         | 
    	
        qformer_tokenizer/special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": "[DEC]",
         | 
| 3 | 
            +
              "cls_token": "[CLS]",
         | 
| 4 | 
            +
              "mask_token": "[MASK]",
         | 
| 5 | 
            +
              "pad_token": "[PAD]",
         | 
| 6 | 
            +
              "sep_token": "[SEP]",
         | 
| 7 | 
            +
              "unk_token": "[UNK]"
         | 
| 8 | 
            +
            }
         | 
    	
        qformer_tokenizer/tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        qformer_tokenizer/tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "clean_up_tokenization_spaces": true,
         | 
| 3 | 
            +
              "cls_token": "[CLS]",
         | 
| 4 | 
            +
              "do_lower_case": true,
         | 
| 5 | 
            +
              "mask_token": "[MASK]",
         | 
| 6 | 
            +
              "model_max_length": 512,
         | 
| 7 | 
            +
              "pad_token": "[PAD]",
         | 
| 8 | 
            +
              "sep_token": "[SEP]",
         | 
| 9 | 
            +
              "strip_accents": null,
         | 
| 10 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 11 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 12 | 
            +
              "truncation_side": "left",
         | 
| 13 | 
            +
              "unk_token": "[UNK]"
         | 
| 14 | 
            +
            }
         | 
    	
        qformer_tokenizer/vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": "</s>",
         | 
| 3 | 
            +
              "eos_token": "</s>",
         | 
| 4 | 
            +
              "pad_token": "[PAD]",
         | 
| 5 | 
            +
              "unk_token": "</s>"
         | 
| 6 | 
            +
            }
         | 
    	
        tokenizer.model
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
         | 
| 3 | 
            +
            size 499723
         | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,36 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_bos_token": true,
         | 
| 3 | 
            +
              "add_eos_token": false,
         | 
| 4 | 
            +
              "bos_token": {
         | 
| 5 | 
            +
                "__type": "AddedToken",
         | 
| 6 | 
            +
                "content": "<s>",
         | 
| 7 | 
            +
                "lstrip": false,
         | 
| 8 | 
            +
                "normalized": true,
         | 
| 9 | 
            +
                "rstrip": false,
         | 
| 10 | 
            +
                "single_word": false
         | 
| 11 | 
            +
              },
         | 
| 12 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 13 | 
            +
              "eos_token": {
         | 
| 14 | 
            +
                "__type": "AddedToken",
         | 
| 15 | 
            +
                "content": "</s>",
         | 
| 16 | 
            +
                "lstrip": false,
         | 
| 17 | 
            +
                "normalized": true,
         | 
| 18 | 
            +
                "rstrip": false,
         | 
| 19 | 
            +
                "single_word": false
         | 
| 20 | 
            +
              },
         | 
| 21 | 
            +
              "model_max_length": 2048,
         | 
| 22 | 
            +
              "pad_token": null,
         | 
| 23 | 
            +
              "processor_class": "InstructBlipProcessor",
         | 
| 24 | 
            +
              "sp_model_kwargs": {},
         | 
| 25 | 
            +
              "tokenizer_class": "LlamaTokenizer",
         | 
| 26 | 
            +
              "truncation_side": "left",
         | 
| 27 | 
            +
              "unk_token": {
         | 
| 28 | 
            +
                "__type": "AddedToken",
         | 
| 29 | 
            +
                "content": "<unk>",
         | 
| 30 | 
            +
                "lstrip": false,
         | 
| 31 | 
            +
                "normalized": true,
         | 
| 32 | 
            +
                "rstrip": false,
         | 
| 33 | 
            +
                "single_word": false
         | 
| 34 | 
            +
              },
         | 
| 35 | 
            +
              "use_fast": false
         | 
| 36 | 
            +
            }
         | 

