import os import time import fasttext # import matplotlib.pyplot as plt import pandas as pd import scipy.stats as stats import numpy as np import pandas as pd import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from tqdm import tqdm from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification from transformers.utils.hub import cached_file try: from utils.regression_head import RegressionHead from utils.embedder import get_embedder_instance embedder = get_embedder_instance('Snowflake/snowflake-arctic-embed-m-v2.0', "cuda" if torch.cuda.is_available() else "cpu", torch.bfloat16) regression_head_checkpoints = { 'Edu-JQL-Gemma-SF': cached_file('Jackal-AI/JQL-Edu-Heads', 'checkpoints/edu-gemma-snowflake-balanced.ckpt'), 'Edu-JQL-Mistral-SF': cached_file('Jackal-AI/JQL-Edu-Heads', 'checkpoints/edu-mistral-snowflake-balanced.ckpt'), 'Edu-JQL-Llama-SF': cached_file('Jackal-AI/JQL-Edu-Heads', 'checkpoints/edu-llama-snowflake-balanced.ckpt'), } USE_JQL = True except ImportError: import warning warning.warn("Code needs to run from https://github.com/JQL-AI/JQL-Annotation-Pipeline/src to be able to use JQL models", RuntimeWarning) USE_JQL = False # ----------------------------- # Model List # ----------------------------- model_names = [ # JQL models using different annotators "Edu-JQL-Gemma-SF", "Edu-JQL-Mistral-SF", "Edu-JQL-Llama-SF", # ad-hoc using Kimi K2 annotations "versae/norbert3-base-edu-scorer-lr3e4-bs32", "versae/nb-sbert-base-edu-scorer-lr3e4-bs32", "versae/gte-multilingual-base-edu-scorer-lr3e4-bs32", "versae/multilingual-e5-base-edu-scorer-lr3e4-bs32", "versae/bge-m3-edu-scorer-lr3e5-bs32", "versae/multilingual-e5-large-instruct-edu-scorer-lr5e5-bs32", "versae/fasttext-edu-scorer-binary-norwegian", "versae/fasttext-edu-scorer-norwegian-2", "versae/fasttext-edu-scorer-norwegian", "versae/multilingual-e5-small-edu-scorer-lr3e4-bs32", "versae/snowflake-arctic-embed-m-edu-scorer-lr3e4-bs32", "versae/nb-bert-edu-scorer-lr3e4-bs32", "versae/nb-bert-edu-scorer-lr2e5-bs32", "versae/nb-bert-edu-scorer", "versae/fineweb-edu-scorer-norwegian", # using other annotators, mostly Llama 70B 'ibm-granite/GneissWeb.Edu_classifier', 'HuggingFaceTB/fineweb-edu-classifier', 'nvidia/nemocurator-fineweb-mixtral-edu-classifier', 'nvidia/nemocurator-fineweb-nemotron-4-edu-classifier', 'NbAiLab/nb-education-quality-evaluator', 'NbAiLab/nb-linguistic-quality-evaluator', 'versae/fw-classifier-no-new', 'versae/no-edu-scorer-hplt2', 'ScandLM/fw-classifier-no-70b', ] # ----------------------------- # Load Dataset # ----------------------------- dataset = load_dataset("versae/nb-fineweb2-edu-bokmaal-scores", split="test") # ----------------------------- # Model Loader # ----------------------------- device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_model(model_name): if USE_JQL and "JQL" in model_name: path = regression_head_checkpoints[model_name] model = RegressionHead.load_from_checkpoint(path, map_location="cuda" if torch.cuda.is_available() else "cpu").to(torch.bfloat16) return "JQL", (model, embedder) elif "fasttext" in model_name: bin_path = hf_hub_download(repo_id=model_name, filename="model.bin") model = fasttext.load_model(bin_path) return "fasttext", model elif "GneissWeb" in model_name: bin_path = hf_hub_download(repo_id=model_name, filename="fasttext_education.bin") model = fasttext.load_model(bin_path) return "fasttext", model else: config = AutoConfig.from_pretrained(model_name, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_name, config=config, torch_dtype=torch.bfloat16, trust_remote_code=True) model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config, trust_remote_code=True) model.to(device) return "transformers", (model, tokenizer, device) # ----------------------------- # Model Scorer # ----------------------------- def score_text(model_type, model_data, text, model_name): if model_type == "JQL": model, embedder = model_data embeddings = embedder.embed([text]) with torch.no_grad(): return min(max(float(model(embeddings).cpu().squeeze(1)[0]), 0), 5) elif model_type == "fasttext": labels, probs = model_data.predict(text.replace("\n", " "), k=-1) if "binary" in model_name: scores = dict(zip(labels, probs)) return scores.get("__label__1", 0.0) if "GneissWeb" in model_name: scores = dict(zip(labels, probs)) return scores.get("__label__education", 0.0) else: return int(labels[np.argmax(probs)][-1:]) else: model, tokenizer, device = model_data inputs = tokenizer(text, return_tensors="pt", padding="longest", truncation=True, max_length=512).to(device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits.squeeze(-1).float().cpu().numpy() return logits.item() # ----------------------------- # Annotate Dataset and Collect Stats # ----------------------------- timing_stats = [] for model_name in model_names: print(f"\n-> Scoring with: {model_name}") try: model_type, model_data = load_model(model_name) col_name = f"score_{model_name}" def annotate(example): example[col_name] = score_text(model_type, model_data, example["text"], model_name) return example print("Model loaded. Annotating...") start_time = time.time() dataset = dataset.map(annotate, batched=False, cache_file_name=f"/tmp/{model_type}__{model_name}_") end_time = time.time() total_time = end_time - start_time avg_time = total_time / len(dataset) timing_stats.append({ "model": model_name, "total_time": total_time, "avg_time": avg_time, "examples": len(dataset) }) except Exception as e: print(f"⚠️ Skipping {model_name} due to error: {e}") # ----------------------------- # Print Timing Summary # ----------------------------- timing_stats.sort(key=lambda x: x["avg_time"]) # Prepare table data rows = [] for stat in timing_stats: docs_per_sec = stat["examples"] / stat["total_time"] rows.append({ "Model": f'[{stat["model"]}](https://hf.co/{stat["model"]})', "Total Time (s)": f"{stat['total_time']:.2f}", "Avg Time / Doc (s)": f"{stat['avg_time']:.4f}", "Docs / Sec": f"{docs_per_sec:.2f}" }) # Convert to DataFrame for pretty printing timing_df = pd.DataFrame(rows) print(timing_df.to_markdown(index=False)) # ----------------------------- # Push to Hub # ----------------------------- dataset.push_to_hub("versae/nb-fineweb2-edu-bokmaal-scores", split="test") # ----------------------------- # Calculate correlations # ----------------------------- # Convert HuggingFace dataset to pandas DataFrame df = dataset.to_pandas() # Extract score columns for correlation target = 'score' score_columns = [col for col in df.columns if col.startswith("score_")] score_columns.insert(0, target) score_df = df[score_columns] # Calculate Pearson correlations correlations = [] for col in score_columns[1:]: corr, p_value = stats.pearsonr(score_df[target], score_df[col]) correlations.append({ 'Model': f'[{col[6:]}](https://hf.co/{col[6:]})', # remove 'score_' prefix 'Correlation with score': corr, 'p-value': p_value }) correlation_df = pd.DataFrame(correlations) # Merge by model name merged_df = pd.merge(correlation_df, timing_df, on="Model", how="inner") # Sort by correlation ascending merged_df = merged_df.sort_values(by="Correlation with score", ascending=False) # Print full table print("\n===================== MERGED SUMMARY =====================") print(merged_df.to_markdown(index=False)) print("==========================================================\n") # Plot: Correlation vs Docs/sec #plt.figure(figsize=(10, 6)) #plt.scatter(merged_df["Correlation with score"], merged_df["Docs / Sec"]) #plt.xlabel("Correlation with score") #plt.ylabel("Documents per second") #plt.title("Correlation vs Documents per Second") #plt.grid(True) #plt.tight_layout() #plt.show()