default_stage: default_modifiers: SpinQuantModifier: rotations: [R1, R2, R4] transform_type: hadamard randomize: false learnable: false precision: torch.float64 transform_block_size: 16 transform_config: config_groups: R1: type: hadamard apply: - targets: ['re:.*embed_tokens$', 're:.*o_proj$', 're:.*down_proj$'] location: weight_output inverse: false ignore: [] - targets: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$', 're:.*up_proj$', 're:.*gate_proj$', lm_head] location: weight_input inverse: true ignore: [] randomize: false requires_grad: false head_dim: 16 precision: torch.float64 R2: type: hadamard apply: - targets: ['re:.*v_proj$'] location: weight_output inverse: false ignore: [] - targets: ['re:.*o_proj$'] location: weight_input inverse: true ignore: [] randomize: false requires_grad: false head_dim: 16 precision: torch.float64 R4: type: hadamard apply: - targets: ['re:.*down_proj$'] location: input inverse: false ignore: [] - targets: ['re:.*down_proj$'] location: weight_input inverse: true ignore: [] randomize: false requires_grad: false head_dim: 16 precision: torch.float64 QuantizationModifier: targets: [Linear] ignore: [lm_head] scheme: NVFP4A16