File size: 1,221 Bytes
95516ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
{
"cano_pose_type": 1,
"dense_sample_pts": 40000,
"encoder_feat_dim": 1024,
"encoder_freeze": false,
"encoder_grad_ckpt": true,
"encoder_model_name": "dinov2_vitl14_reg",
"encoder_type": "dinov2_fusion",
"expr_param_dim": 100,
"facesr": true,
"fine_encoder_feat_dim": 1536,
"fine_encoder_freeze": true,
"fine_encoder_model_name": "./pretrained_models/sapiens/pretrained/checkpoints/sapiens_1b/sapiens_1b_epoch_173_torchscript.pt2",
"fine_encoder_type": "sapiens",
"fix_opacity": false,
"fix_rotation": false,
"gs_clip_scaling": [
100,
0.01,
0.05,
3000
],
"gs_mlp_network_config": {
"activation": "silu",
"n_hidden_layers": 2,
"n_neurons": 512
},
"gs_query_dim": 1024,
"gs_sh": 3,
"gs_use_rgb": true,
"gs_xyz_offset_max_step": 1.0,
"human_model_path": "./pretrained_models/human_model_files",
"latent_query_points_type": "e2e_smplx_sub1",
"model_name": "SapDinoLRMBHSD3_5",
"pcl_dim": 1024,
"shape_param_dim": 10,
"smplx_subdivide_num": 1,
"smplx_type": "smplx_2",
"tf_grad_ckpt": true,
"transformer_dim": 1024,
"transformer_heads": 16,
"transformer_layers": 15,
"transformer_type": "sd3_mm_bh_cond",
"use_face_id": true
} |