Add new SentenceTransformer model (#3)
Browse files- Add new SentenceTransformer model (8b4c685c8970becdc74e51bcde44b007a7b6b407)
- Undo README changes (ad207b7b3d9f3a0b92fa4813457af703402d9cac)
- Update README outputs + dim (768 -> 1024) (96fbb87a02f10fea60382bebdfed945b381fe776)
Co-authored-by: Tom Aarsen <[email protected]>
- 1_Pooling/config.json +10 -0
 - README.md +12 -12
 - config.json +1 -1
 - config_sentence_transformers.json +10 -0
 - modules.json +20 -0
 - sentence_bert_config.json +4 -0
 
    	
        1_Pooling/config.json
    ADDED
    
    | 
         @@ -0,0 +1,10 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "word_embedding_dimension": 1024,
         
     | 
| 3 | 
         
            +
              "pooling_mode_cls_token": false,
         
     | 
| 4 | 
         
            +
              "pooling_mode_mean_tokens": true,
         
     | 
| 5 | 
         
            +
              "pooling_mode_max_tokens": false,
         
     | 
| 6 | 
         
            +
              "pooling_mode_mean_sqrt_len_tokens": false,
         
     | 
| 7 | 
         
            +
              "pooling_mode_weightedmean_tokens": false,
         
     | 
| 8 | 
         
            +
              "pooling_mode_lasttoken": false,
         
     | 
| 9 | 
         
            +
              "include_prompt": true
         
     | 
| 10 | 
         
            +
            }
         
     | 
    	
        README.md
    CHANGED
    
    | 
         @@ -2950,12 +2950,12 @@ doc_embeddings = model.encode([ 
     | 
|
| 2950 | 
         
             
                "search_document: TSNE is a dimensionality reduction algorithm created by Laurens van Der Maaten",
         
     | 
| 2951 | 
         
             
            ])
         
     | 
| 2952 | 
         
             
            print(query_embeddings.shape, doc_embeddings.shape)
         
     | 
| 2953 | 
         
            -
            # (2,  
     | 
| 2954 | 
         | 
| 2955 | 
         
             
            similarities = model.similarity(query_embeddings, doc_embeddings)
         
     | 
| 2956 | 
         
             
            print(similarities)
         
     | 
| 2957 | 
         
            -
            # tensor([[0. 
     | 
| 2958 | 
         
            -
            #         [0. 
     | 
| 2959 | 
         
             
            ```
         
     | 
| 2960 | 
         | 
| 2961 | 
         
             
            <details><summary>Click to see Sentence Transformers usage with Matryoshka Truncation</summary>
         
     | 
| 
         @@ -2979,8 +2979,8 @@ print(query_embeddings.shape, doc_embeddings.shape) 
     | 
|
| 2979 | 
         | 
| 2980 | 
         
             
            similarities = model.similarity(query_embeddings, doc_embeddings)
         
     | 
| 2981 | 
         
             
            print(similarities)
         
     | 
| 2982 | 
         
            -
            # tensor([[0. 
     | 
| 2983 | 
         
            -
            #         [0. 
     | 
| 2984 | 
         
             
            ```
         
     | 
| 2985 | 
         | 
| 2986 | 
         
             
            Note the small differences compared to the full 1024-dimensional similarities.
         
     | 
| 
         @@ -3023,12 +3023,12 @@ query_embeddings = F.normalize(query_embeddings, p=2, dim=1) 
     | 
|
| 3023 | 
         
             
            doc_embeddings = mean_pooling(documents_outputs, encoded_documents["attention_mask"])
         
     | 
| 3024 | 
         
             
            doc_embeddings = F.normalize(doc_embeddings, p=2, dim=1)
         
     | 
| 3025 | 
         
             
            print(query_embeddings.shape, doc_embeddings.shape)
         
     | 
| 3026 | 
         
            -
            # torch.Size([2,  
     | 
| 3027 | 
         | 
| 3028 | 
         
             
            similarities = query_embeddings @ doc_embeddings.T
         
     | 
| 3029 | 
         
             
            print(similarities)
         
     | 
| 3030 | 
         
            -
            # tensor([[0. 
     | 
| 3031 | 
         
            -
            #         [0. 
     | 
| 3032 | 
         
             
            ```
         
     | 
| 3033 | 
         | 
| 3034 | 
         
             
            <details><summary>Click to see Transformers usage with Matryoshka Truncation</summary>
         
     | 
| 
         @@ -3076,11 +3076,11 @@ print(query_embeddings.shape, doc_embeddings.shape) 
     | 
|
| 3076 | 
         | 
| 3077 | 
         
             
            similarities = query_embeddings @ doc_embeddings.T
         
     | 
| 3078 | 
         
             
            print(similarities)
         
     | 
| 3079 | 
         
            -
            # tensor([[0. 
     | 
| 3080 | 
         
            -
            #         [0. 
     | 
| 3081 | 
         
             
            ```
         
     | 
| 3082 | 
         | 
| 3083 | 
         
            -
            Note the small differences compared to the full  
     | 
| 3084 | 
         | 
| 3085 | 
         
             
            </details>
         
     | 
| 3086 | 
         | 
| 
         @@ -3116,7 +3116,7 @@ const doc_embeddings = await extractor([ 
     | 
|
| 3116 | 
         | 
| 3117 | 
         
             
            // Compute similarity scores
         
     | 
| 3118 | 
         
             
            const similarities = await matmul(query_embeddings, doc_embeddings.transpose(1, 0));
         
     | 
| 3119 | 
         
            -
            console.log(similarities.tolist()); 
     | 
| 3120 | 
         
             
            ```
         
     | 
| 3121 | 
         | 
| 3122 | 
         | 
| 
         | 
|
| 2950 | 
         
             
                "search_document: TSNE is a dimensionality reduction algorithm created by Laurens van Der Maaten",
         
     | 
| 2951 | 
         
             
            ])
         
     | 
| 2952 | 
         
             
            print(query_embeddings.shape, doc_embeddings.shape)
         
     | 
| 2953 | 
         
            +
            # (2, 1024) (1, 1024)
         
     | 
| 2954 | 
         | 
| 2955 | 
         
             
            similarities = model.similarity(query_embeddings, doc_embeddings)
         
     | 
| 2956 | 
         
             
            print(similarities)
         
     | 
| 2957 | 
         
            +
            # tensor([[0.6518],
         
     | 
| 2958 | 
         
            +
            #         [0.4237]])
         
     | 
| 2959 | 
         
             
            ```
         
     | 
| 2960 | 
         | 
| 2961 | 
         
             
            <details><summary>Click to see Sentence Transformers usage with Matryoshka Truncation</summary>
         
     | 
| 
         | 
|
| 2979 | 
         | 
| 2980 | 
         
             
            similarities = model.similarity(query_embeddings, doc_embeddings)
         
     | 
| 2981 | 
         
             
            print(similarities)
         
     | 
| 2982 | 
         
            +
            # tensor([[0.6835],
         
     | 
| 2983 | 
         
            +
            #         [0.3982]])
         
     | 
| 2984 | 
         
             
            ```
         
     | 
| 2985 | 
         | 
| 2986 | 
         
             
            Note the small differences compared to the full 1024-dimensional similarities.
         
     | 
| 
         | 
|
| 3023 | 
         
             
            doc_embeddings = mean_pooling(documents_outputs, encoded_documents["attention_mask"])
         
     | 
| 3024 | 
         
             
            doc_embeddings = F.normalize(doc_embeddings, p=2, dim=1)
         
     | 
| 3025 | 
         
             
            print(query_embeddings.shape, doc_embeddings.shape)
         
     | 
| 3026 | 
         
            +
            # torch.Size([2, 1024]) torch.Size([1, 1024])
         
     | 
| 3027 | 
         | 
| 3028 | 
         
             
            similarities = query_embeddings @ doc_embeddings.T
         
     | 
| 3029 | 
         
             
            print(similarities)
         
     | 
| 3030 | 
         
            +
            # tensor([[0.6518],
         
     | 
| 3031 | 
         
            +
            #         [0.4237]])
         
     | 
| 3032 | 
         
             
            ```
         
     | 
| 3033 | 
         | 
| 3034 | 
         
             
            <details><summary>Click to see Transformers usage with Matryoshka Truncation</summary>
         
     | 
| 
         | 
|
| 3076 | 
         | 
| 3077 | 
         
             
            similarities = query_embeddings @ doc_embeddings.T
         
     | 
| 3078 | 
         
             
            print(similarities)
         
     | 
| 3079 | 
         
            +
            # tensor([[0.6835],
         
     | 
| 3080 | 
         
            +
            #         [0.3982]])
         
     | 
| 3081 | 
         
             
            ```
         
     | 
| 3082 | 
         | 
| 3083 | 
         
            +
            Note the small differences compared to the full 1024-dimensional similarities.
         
     | 
| 3084 | 
         | 
| 3085 | 
         
             
            </details>
         
     | 
| 3086 | 
         | 
| 
         | 
|
| 3116 | 
         | 
| 3117 | 
         
             
            // Compute similarity scores
         
     | 
| 3118 | 
         
             
            const similarities = await matmul(query_embeddings, doc_embeddings.transpose(1, 0));
         
     | 
| 3119 | 
         
            +
            console.log(similarities.tolist());
         
     | 
| 3120 | 
         
             
            ```
         
     | 
| 3121 | 
         | 
| 3122 | 
         | 
    	
        config.json
    CHANGED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            -
              "_name_or_path": "lightonai/modernbert-embed-large 
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "ModernBertModel"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            +
              "_name_or_path": "lightonai/modernbert-embed-large",
         
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "ModernBertModel"
         
     | 
| 5 | 
         
             
              ],
         
     | 
    	
        config_sentence_transformers.json
    ADDED
    
    | 
         @@ -0,0 +1,10 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "__version__": {
         
     | 
| 3 | 
         
            +
                "sentence_transformers": "3.4.0.dev0",
         
     | 
| 4 | 
         
            +
                "transformers": "4.48.0.dev0",
         
     | 
| 5 | 
         
            +
                "pytorch": "2.6.0.dev20241112+cu121"
         
     | 
| 6 | 
         
            +
              },
         
     | 
| 7 | 
         
            +
              "prompts": {},
         
     | 
| 8 | 
         
            +
              "default_prompt_name": null,
         
     | 
| 9 | 
         
            +
              "similarity_fn_name": "cosine"
         
     | 
| 10 | 
         
            +
            }
         
     | 
    	
        modules.json
    ADDED
    
    | 
         @@ -0,0 +1,20 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            [
         
     | 
| 2 | 
         
            +
              {
         
     | 
| 3 | 
         
            +
                "idx": 0,
         
     | 
| 4 | 
         
            +
                "name": "0",
         
     | 
| 5 | 
         
            +
                "path": "",
         
     | 
| 6 | 
         
            +
                "type": "sentence_transformers.models.Transformer"
         
     | 
| 7 | 
         
            +
              },
         
     | 
| 8 | 
         
            +
              {
         
     | 
| 9 | 
         
            +
                "idx": 1,
         
     | 
| 10 | 
         
            +
                "name": "1",
         
     | 
| 11 | 
         
            +
                "path": "1_Pooling",
         
     | 
| 12 | 
         
            +
                "type": "sentence_transformers.models.Pooling"
         
     | 
| 13 | 
         
            +
              },
         
     | 
| 14 | 
         
            +
              {
         
     | 
| 15 | 
         
            +
                "idx": 2,
         
     | 
| 16 | 
         
            +
                "name": "2",
         
     | 
| 17 | 
         
            +
                "path": "2_Normalize",
         
     | 
| 18 | 
         
            +
                "type": "sentence_transformers.models.Normalize"
         
     | 
| 19 | 
         
            +
              }
         
     | 
| 20 | 
         
            +
            ]
         
     | 
    	
        sentence_bert_config.json
    ADDED
    
    | 
         @@ -0,0 +1,4 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "max_seq_length": 8192,
         
     | 
| 3 | 
         
            +
              "do_lower_case": false
         
     | 
| 4 | 
         
            +
            }
         
     |