First commit
Browse files- config.json +67 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.txt +0 -0
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,67 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "aggregation_labels": {
         | 
| 3 | 
            +
                "0": "NONE",
         | 
| 4 | 
            +
                "1": "SUM",
         | 
| 5 | 
            +
                "2": "AVERAGE",
         | 
| 6 | 
            +
                "3": "COUNT"
         | 
| 7 | 
            +
              },
         | 
| 8 | 
            +
              "aggregation_loss_weight": 1.0,
         | 
| 9 | 
            +
              "aggregation_temperature": 1.0,
         | 
| 10 | 
            +
              "allow_empty_column_selection": false,
         | 
| 11 | 
            +
              "answer_loss_cutoff": 0.664694,
         | 
| 12 | 
            +
              "answer_loss_importance": 1.0,
         | 
| 13 | 
            +
              "architectures": [
         | 
| 14 | 
            +
                "TapasForQuestionAnswering"
         | 
| 15 | 
            +
              ],
         | 
| 16 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 17 | 
            +
              "average_approximation_function": "ratio",
         | 
| 18 | 
            +
              "average_logits_per_cell": false,
         | 
| 19 | 
            +
              "cell_selection_preference": 0.207951,
         | 
| 20 | 
            +
              "disable_per_token_loss": false,
         | 
| 21 | 
            +
              "gradient_checkpointing": false,
         | 
| 22 | 
            +
              "hidden_act": "gelu",
         | 
| 23 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 24 | 
            +
              "hidden_size": 512,
         | 
| 25 | 
            +
              "huber_loss_delta": 0.121194,
         | 
| 26 | 
            +
              "init_cell_selection_weights_to_zero": true,
         | 
| 27 | 
            +
              "initializer_range": 0.02,
         | 
| 28 | 
            +
              "intermediate_size": 2048,
         | 
| 29 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 30 | 
            +
              "max_num_columns": 32,
         | 
| 31 | 
            +
              "max_num_rows": 64,
         | 
| 32 | 
            +
              "max_position_embeddings": 512,
         | 
| 33 | 
            +
              "model_type": "tapas",
         | 
| 34 | 
            +
              "no_aggregation_label_index": 0,
         | 
| 35 | 
            +
              "num_aggregation_labels": 4,
         | 
| 36 | 
            +
              "num_attention_heads": 8,
         | 
| 37 | 
            +
              "num_hidden_layers": 8,
         | 
| 38 | 
            +
              "pad_token_id": 0,
         | 
| 39 | 
            +
              "positive_label_weight": 10.0,
         | 
| 40 | 
            +
              "reset_position_index_per_cell": true,
         | 
| 41 | 
            +
              "select_one_column": true,
         | 
| 42 | 
            +
              "softmax_temperature": 1.0,
         | 
| 43 | 
            +
              "temperature": 0.0352513,
         | 
| 44 | 
            +
              "type_vocab_size": [
         | 
| 45 | 
            +
                3,
         | 
| 46 | 
            +
                256,
         | 
| 47 | 
            +
                256,
         | 
| 48 | 
            +
                2,
         | 
| 49 | 
            +
                256,
         | 
| 50 | 
            +
                256,
         | 
| 51 | 
            +
                10
         | 
| 52 | 
            +
              ],
         | 
| 53 | 
            +
              "type_vocab_sizes": [
         | 
| 54 | 
            +
                3,
         | 
| 55 | 
            +
                256,
         | 
| 56 | 
            +
                256,
         | 
| 57 | 
            +
                2,
         | 
| 58 | 
            +
                256,
         | 
| 59 | 
            +
                256,
         | 
| 60 | 
            +
                10
         | 
| 61 | 
            +
              ],
         | 
| 62 | 
            +
              "use_answer_as_supervision": true,
         | 
| 63 | 
            +
              "use_gumbel_for_aggregation": false,
         | 
| 64 | 
            +
              "use_gumbel_for_cells": false,
         | 
| 65 | 
            +
              "use_normalized_answer_loss": false,
         | 
| 66 | 
            +
              "vocab_size": 30522
         | 
| 67 | 
            +
            }
         | 
    	
        pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:235fa9be12ecee5f38a0f888fdb6b52772132750b6ba862b13b3708651c480f4
         | 
| 3 | 
            +
            size 167688391
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "additional_special_tokens": ["[EMPTY]"]}
         | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "empty_token": "[EMPTY]", "tokenize_chinese_chars": true, "strip_accents": null, "cell_trim_length": -1, "max_column_id": null, "max_row_id": null, "strip_column_names": false, "update_answer_coordinates": false, "drop_rows_to_fit": false, "model_max_length": 512, "additional_special_tokens": ["[EMPTY]"]}
         | 
    	
        vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 

