Update with commit 0c9a72e4576fe4c84077f066e585129c97bfd4e6
Browse filesSee: https://github.com/huggingface/transformers/commit/0c9a72e4576fe4c84077f066e585129c97bfd4e6
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -181,6 +181,7 @@
|
|
| 181 |
{"model_type":"led","pytorch":true,"processor":"AutoTokenizer"}
|
| 182 |
{"model_type":"levit","pytorch":true,"processor":"AutoImageProcessor"}
|
| 183 |
{"model_type":"lfm2","pytorch":true,"processor":"AutoTokenizer"}
|
|
|
|
| 184 |
{"model_type":"lfm2_vl","pytorch":true,"processor":"AutoProcessor"}
|
| 185 |
{"model_type":"lightglue","pytorch":true,"processor":"AutoImageProcessor"}
|
| 186 |
{"model_type":"lilt","pytorch":true,"processor":"AutoTokenizer"}
|
|
|
|
| 181 |
{"model_type":"led","pytorch":true,"processor":"AutoTokenizer"}
|
| 182 |
{"model_type":"levit","pytorch":true,"processor":"AutoImageProcessor"}
|
| 183 |
{"model_type":"lfm2","pytorch":true,"processor":"AutoTokenizer"}
|
| 184 |
+
{"model_type":"lfm2_moe","pytorch":true,"processor":"AutoTokenizer"}
|
| 185 |
{"model_type":"lfm2_vl","pytorch":true,"processor":"AutoProcessor"}
|
| 186 |
{"model_type":"lightglue","pytorch":true,"processor":"AutoImageProcessor"}
|
| 187 |
{"model_type":"lilt","pytorch":true,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
|
@@ -624,6 +624,8 @@
|
|
| 624 |
{"model_class":"LevitModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
| 625 |
{"model_class":"Lfm2ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 626 |
{"model_class":"Lfm2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
| 627 |
{"model_class":"Lfm2VlForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 628 |
{"model_class":"Lfm2VlModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 629 |
{"model_class":"LightGlueForKeypointMatching","pipeline_tag":"keypoint-matching","auto_class":"AutoModelForKeypointMatching"}
|
|
|
|
| 624 |
{"model_class":"LevitModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
| 625 |
{"model_class":"Lfm2ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 626 |
{"model_class":"Lfm2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 627 |
+
{"model_class":"Lfm2MoeForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 628 |
+
{"model_class":"Lfm2MoeModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 629 |
{"model_class":"Lfm2VlForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 630 |
{"model_class":"Lfm2VlModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 631 |
{"model_class":"LightGlueForKeypointMatching","pipeline_tag":"keypoint-matching","auto_class":"AutoModelForKeypointMatching"}
|