Spaces:
Sleeping
Sleeping
File size: 6,739 Bytes
0d5ac71 dc9d63b 2dd4294 dc9d63b 0d5ac71 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 0d5ac71 9fa019c 2dd4294 9fa019c 2dd4294 9fa019c 2dd4294 9fa019c dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 9fa019c ec83815 dc9d63b c8dd6f4 dc9d63b 9fa019c dc9d63b 0d5ac71 dc9d63b ec83815 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b ec83815 2dd4294 dc9d63b c8dd6f4 dc9d63b ec83815 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b ec83815 2dd4294 dc9d63b c8dd6f4 dc9d63b 2dd4294 dc9d63b ec83815 2dd4294 c8dd6f4 dc9d63b ec83815 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 2dd4294 dc9d63b 0d5ac71 2dd4294 0d5ac71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
import asyncio
import json
import os
from typing import Any, List, Dict
import mcp.types as types
from mcp import CreateMessageResult
from mcp.server import Server
from mcp.server.stdio import stdio_server
from ourllm import genratequestionnaire, gradeanswers
from database_module import init_db
from database_module import get_all_models_handler, search_models_handler
# Initialize data directory and database
DATA_DIR = "data"
os.makedirs(DATA_DIR, exist_ok=True)
init_db()
app = Server("mcp-drift-server")
# === Sampling Helper ===
async def sample(messages: List[types.SamplingMessage], max_tokens: int = 300) -> CreateMessageResult:
return await app.request_context.session.create_message(
messages=messages,
max_tokens=max_tokens,
temperature=0.7,
)
# === Baseline File Helpers ===
def get_baseline_path(model_name: str) -> str:
return os.path.join(DATA_DIR, f"{model_name}_baseline.json")
def get_response_path(model_name: str) -> str:
return os.path.join(DATA_DIR, f"{model_name}_latest.json")
# === Tool Manifest ===
@app.list_tools()
async def list_tools() -> List[types.Tool]:
return [
types.Tool(
name="run_initial_diagnostics",
description="Generate and store baseline diagnostics for a connected LLM.",
inputSchema={
"type": "object",
"properties": {
"model": {"type": "string", "description": "The name of the model to run diagnostics on"},
"model_capabilities": {"type": "string", "description": "Full description of the model's capabilities"}
},
"required": ["model", "model_capabilities"]
},
),
types.Tool(
name="check_drift",
description="Re-run diagnostics and compare to baseline for drift scoring.",
inputSchema={
"type": "object",
"properties": {"model": {"type": "string", "description": "The name of the model to run diagnostics on"}},
"required": ["model"]
},
),
types.Tool(
name="get_all_models",
description="Retrieve all registered models from the database.",
inputSchema={"type": "object", "properties": {}, "required": []}
),
types.Tool(
name="search_models",
description="Search registered models by name.",
inputSchema={
"type": "object",
"properties": {"query": {"type": "string", "description": "Substring to match model names against"}},
"required": ["query"]
}
),
]
# === Sampling Wrapper ===
async def sample(messages: list[types.SamplingMessage], max_tokens=600) -> CreateMessageResult:
return await app.request_context.session.create_message(
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
# === Baseline File Paths ===
def get_baseline_path(model_name):
return os.path.join(DATA_DIR, f"{model_name}_baseline.json")
def get_response_path(model_name):
return os.path.join(DATA_DIR, f"{model_name}_latest.json")
# === Core Logic ===
async def run_initial_diagnostics(arguments: Dict[str, Any]) -> List[types.TextContent]:
model = arguments["model"]
caps = arguments["model_capabilities"]
# 1. Generate questionnaire
questions = await genratequestionnaire(model, caps)
# 2. Ask the target LLM (client)
answers = await sample(questions)
# 3. Persist baseline
# 1. Ask the server's internal LLM to generate a questionnaire
questions = genratequestionnaire(model, arguments["model_capabilities"]) # Server-side trusted LLM
answers = []
for q in questions:
a = await sample([q])
answers.append(a)
# 3. Save Q/A pair
with open(get_baseline_path(model), "w") as f:
json.dump({
"questions": [m.content.text for m in questions],
"answers": [m.content.text for m in answers]
}, f, indent=2)
return [types.TextContent(type="text", text=f"β
Baseline stored for model: {model}")]
async def check_drift(arguments: Dict[str, Any]) -> List[types.TextContent]:
model = arguments["model"]
base_path = get_baseline_path(model)
# Ensure baseline exists
if not os.path.exists(base_path):
return [types.TextContent(type="text", text=f"β No baseline for model: {model}")]
# Load questions + old answers
with open(base_path) as f:
data = json.load(f)
questions = [
types.SamplingMessage(role="user", content=types.TextContent(type="text", text=q))
for q in data["questions"]
]
old_answers = data["answers"]
# 1. Get fresh answers
new_msgs = await sample(questions)
new_answers = [m.content.text for m in new_msgs]
# 1. Ask the model again
new_answers_msgs = []
for q in questions:
a = await sample([q])
new_answers_msgs.append(a)
new_answers = [m.content.text for m in new_answers_msgs]
# 2. Grade for drift
grading = await gradeanswers(old_answers, new_answers)
drift_score = grading[0].content.text.strip()
# 3. Save latest
grading_response = gradeanswers(old_answers, new_answers)
drift_score = grading_response[0].content.text.strip()
# 3. Save the response
with open(get_response_path(model), "w") as f:
json.dump({
"new_answers": new_answers,
"drift_score": drift_score
}, f, indent=2)
# 4. Alert threshold
try:
score_val = float(drift_score)
alert = "π¨ Significant drift!" if score_val > 50 else "β
Drift OK"
except ValueError:
alert = "β οΈ Drift score not numeric"
return [
types.TextContent(type="text", text=f"Drift score for {model}: {drift_score}"),
types.TextContent(type="text", text=alert)
]
# === Dispatcher ===
@app.call_tool()
async def dispatch_tool(name: str, arguments: Dict[str, Any] | None = None):
if name == "run_initial_diagnostics":
return await run_initial_diagnostics(arguments or {})
if name == "check_drift":
return await check_drift(arguments or {})
if name == "get_all_models":
return await get_all_models_handler()
if name == "search_models":
return await search_models_handler(arguments or {})
raise ValueError(f"Unknown tool: {name}")
# === Entrypoint ===
async def main():
async with stdio_server() as (reader, writer):
await app.run(reader, writer, app.create_initialization_options())
if __name__ == "__main__":
asyncio.run(main())
|