| { | |
| "added_tokens_decoder": { | |
| "0": { | |
| "content": "<s>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "1": { | |
| "content": "<pad>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "2": { | |
| "content": "</s>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "3": { | |
| "content": "<unk>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| } | |
| }, | |
| "additional_special_tokens": [], | |
| "bos_token": "<s>", | |
| "clean_up_tokenization_spaces": false, | |
| "do_lower_case": true, | |
| "do_upper_case": false, | |
| "eos_token": "</s>", | |
| "extra_special_tokens": {}, | |
| "lang_codes": null, | |
| "model_max_length": 20, | |
| "pad_token": "<pad>", | |
| "processor_class": "Speech2TextProcessor", | |
| "sp_model_kwargs": {}, | |
| "tgt_lang": null, | |
| "tokenizer_class": "Speech2TextTokenizer", | |
| "unk_token": "<unk>" | |
| } | |