Commit From AutoTrain
Browse files- .gitattributes +2 -0
- README.md +56 -0
- config.json +41 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- vocab.json +0 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -29,3 +29,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 29 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | 
|  | |
| 29 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            +
            *.bin.* filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            +
            *.tar.gz filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,56 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            tags:
         | 
| 3 | 
            +
            - autotrain
         | 
| 4 | 
            +
            - text-classification
         | 
| 5 | 
            +
            language:
         | 
| 6 | 
            +
            - unk
         | 
| 7 | 
            +
            widget:
         | 
| 8 | 
            +
            - text: "I love AutoTrain 🤗"
         | 
| 9 | 
            +
            datasets:
         | 
| 10 | 
            +
            - sasha/autotrain-data-RobertaBaseTweetEval
         | 
| 11 | 
            +
            co2_eq_emissions:
         | 
| 12 | 
            +
              emissions: 28.053963781460215
         | 
| 13 | 
            +
            ---
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            # Model Trained Using AutoTrain
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            - Problem type: Multi-class Classification
         | 
| 18 | 
            +
            - Model ID: 1281048989
         | 
| 19 | 
            +
            - CO2 Emissions (in grams): 28.0540
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            ## Validation Metrics
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            - Loss: 0.587
         | 
| 24 | 
            +
            - Accuracy: 0.751
         | 
| 25 | 
            +
            - Macro F1: 0.719
         | 
| 26 | 
            +
            - Micro F1: 0.751
         | 
| 27 | 
            +
            - Weighted F1: 0.746
         | 
| 28 | 
            +
            - Macro Precision: 0.761
         | 
| 29 | 
            +
            - Micro Precision: 0.751
         | 
| 30 | 
            +
            - Weighted Precision: 0.753
         | 
| 31 | 
            +
            - Macro Recall: 0.699
         | 
| 32 | 
            +
            - Micro Recall: 0.751
         | 
| 33 | 
            +
            - Weighted Recall: 0.751
         | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            ## Usage
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            You can use cURL to access this model:
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            ```
         | 
| 41 | 
            +
            $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/sasha/autotrain-RobertaBaseTweetEval-1281048989
         | 
| 42 | 
            +
            ```
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            Or Python API:
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            ```
         | 
| 47 | 
            +
            from transformers import AutoModelForSequenceClassification, AutoTokenizer
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            model = AutoModelForSequenceClassification.from_pretrained("sasha/autotrain-RobertaBaseTweetEval-1281048989", use_auth_token=True)
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            tokenizer = AutoTokenizer.from_pretrained("sasha/autotrain-RobertaBaseTweetEval-1281048989", use_auth_token=True)
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            inputs = tokenizer("I love AutoTrain", return_tensors="pt")
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            outputs = model(**inputs)
         | 
| 56 | 
            +
            ```
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,41 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "AutoTrain",
         | 
| 3 | 
            +
              "_num_labels": 3,
         | 
| 4 | 
            +
              "architectures": [
         | 
| 5 | 
            +
                "RobertaForSequenceClassification"
         | 
| 6 | 
            +
              ],
         | 
| 7 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 8 | 
            +
              "bos_token_id": 0,
         | 
| 9 | 
            +
              "classifier_dropout": null,
         | 
| 10 | 
            +
              "eos_token_id": 2,
         | 
| 11 | 
            +
              "hidden_act": "gelu",
         | 
| 12 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 13 | 
            +
              "hidden_size": 768,
         | 
| 14 | 
            +
              "id2label": {
         | 
| 15 | 
            +
                "0": "negative",
         | 
| 16 | 
            +
                "1": "neutral",
         | 
| 17 | 
            +
                "2": "positive"
         | 
| 18 | 
            +
              },
         | 
| 19 | 
            +
              "initializer_range": 0.02,
         | 
| 20 | 
            +
              "intermediate_size": 3072,
         | 
| 21 | 
            +
              "label2id": {
         | 
| 22 | 
            +
                "negative": 0,
         | 
| 23 | 
            +
                "neutral": 1,
         | 
| 24 | 
            +
                "positive": 2
         | 
| 25 | 
            +
              },
         | 
| 26 | 
            +
              "layer_norm_eps": 1e-05,
         | 
| 27 | 
            +
              "max_length": 256,
         | 
| 28 | 
            +
              "max_position_embeddings": 514,
         | 
| 29 | 
            +
              "model_type": "roberta",
         | 
| 30 | 
            +
              "num_attention_heads": 12,
         | 
| 31 | 
            +
              "num_hidden_layers": 12,
         | 
| 32 | 
            +
              "pad_token_id": 1,
         | 
| 33 | 
            +
              "padding": "max_length",
         | 
| 34 | 
            +
              "position_embedding_type": "absolute",
         | 
| 35 | 
            +
              "problem_type": "single_label_classification",
         | 
| 36 | 
            +
              "torch_dtype": "float32",
         | 
| 37 | 
            +
              "transformers_version": "4.20.0",
         | 
| 38 | 
            +
              "type_vocab_size": 1,
         | 
| 39 | 
            +
              "use_cache": true,
         | 
| 40 | 
            +
              "vocab_size": 50265
         | 
| 41 | 
            +
            }
         | 
    	
        merges.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d369ac7dc853ee7d90f4b2d374cd1a9c35dafd1b99dacf3567819fb8501446e5
         | 
| 3 | 
            +
            size 498663405
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,15 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": "<s>",
         | 
| 3 | 
            +
              "cls_token": "<s>",
         | 
| 4 | 
            +
              "eos_token": "</s>",
         | 
| 5 | 
            +
              "mask_token": {
         | 
| 6 | 
            +
                "content": "<mask>",
         | 
| 7 | 
            +
                "lstrip": true,
         | 
| 8 | 
            +
                "normalized": false,
         | 
| 9 | 
            +
                "rstrip": false,
         | 
| 10 | 
            +
                "single_word": false
         | 
| 11 | 
            +
              },
         | 
| 12 | 
            +
              "pad_token": "<pad>",
         | 
| 13 | 
            +
              "sep_token": "</s>",
         | 
| 14 | 
            +
              "unk_token": "<unk>"
         | 
| 15 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,16 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_prefix_space": false,
         | 
| 3 | 
            +
              "bos_token": "<s>",
         | 
| 4 | 
            +
              "cls_token": "<s>",
         | 
| 5 | 
            +
              "eos_token": "</s>",
         | 
| 6 | 
            +
              "errors": "replace",
         | 
| 7 | 
            +
              "mask_token": "<mask>",
         | 
| 8 | 
            +
              "model_max_length": 512,
         | 
| 9 | 
            +
              "name_or_path": "AutoTrain",
         | 
| 10 | 
            +
              "pad_token": "<pad>",
         | 
| 11 | 
            +
              "sep_token": "</s>",
         | 
| 12 | 
            +
              "special_tokens_map_file": null,
         | 
| 13 | 
            +
              "tokenizer_class": "RobertaTokenizer",
         | 
| 14 | 
            +
              "trim_offsets": true,
         | 
| 15 | 
            +
              "unk_token": "<unk>"
         | 
| 16 | 
            +
            }
         | 
    	
        vocab.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 

