| { | |
| "model_type": "llama", | |
| "architectures": ["MultimodalLFM2Model"], | |
| "_name_or_path": "GoofyLM/N2.2-Eye-1.3B", | |
| "auto_map": { | |
| "AutoModelForCausalLM": "modeling_n2_eye.MultimodalLFM2Model", | |
| "AutoConfig": "modeling_n2_eye.MultimodalLFM2Config" | |
| }, | |
| "lfm2_model_name": "LiquidAI/LFM2-1.2B", | |
| "clip_model_name": "openai/clip-vit-base-patch32", | |
| "vision_projection_dim": 512, | |
| "language_model_config": { | |
| "model_type": "lfm2", | |
| "hidden_size": 1536, | |
| "vocab_size": 50257, | |
| "max_position_embeddings": 2048 | |
| }, | |
| "vision_encoder_config": { | |
| "model_type": "clip_vision_model", | |
| "hidden_size": 768 | |
| }, | |
| "projection_config": { | |
| "vision_hidden_size": 768, | |
| "vision_projection_dim": 512, | |
| "language_hidden_size": 1536, | |
| "dropout": 0.1 | |
| }, | |
| "image_token": "<image>", | |
| "max_length": 2048, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.46.0", | |
| "use_cache": true, | |
| "chat_template": "{% set system_message = 'You are a helpful assistant trained by Liquid AI. You can see and understand images.' %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}<|im_start|>system\n{{ system_message }}<|im_end|>\n<image>\n{% for message in loop_messages %}<|im_start|>{{ message['role'] }}\n{{ message['content'] }}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" | |
| } |