| models: | |
| - model: Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated | |
| parameters: | |
| weight: 1.2 # Slightly favor | |
| density: 0.9 # Sparsified a bit to reduce noise | |
| - model: Nexesenex/pankajmathur_orca_mini_v9_6_1B-instruct-Abliterated-LPL | |
| parameters: | |
| weight: 1.0 | |
| density: 0.9 | |
| - model: prithivMLmods/Bellatrix-Tiny-1B-v3-abliterated | |
| parameters: | |
| weight: 1.0 | |
| density: 0.9 | |
| merge_method: sce # SCE for adaptive weighting | |
| base_model: huihui-ai/Llama-3.2-1B-Instruct-abliterated | |
| parameters: | |
| normalize: true | |
| int8_mask: true | |
| rescale: true | |
| filter_wise: false | |
| smooth: false | |
| allow_negative_weights: false | |
| lambda: 1.0 | |
| select_topk: 0.1 # Retain the top 10% high-variance elements | |
| tokenizer: | |
| source: union # Union to combine vocabularies | |
| chat_template: auto | |
| dtype: bfloat16 | |
| out_dtype: bfloat16 |