kenlkehl's picture
Upload app.py
3dc0c15 verified
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Clinical Trial Matching Pipeline - Gradio Web Interface
This interface allows users to:
1. Configure models (tagger, embedder, LLM)
2. Upload trial space database OR load pre-embedded trials
3. Upload patient notes or enter patient summary
4. Get ranked trial recommendations with eligibility predictions
"""
import gradio as gr
import pandas as pd
import numpy as np
import torch
import re
import os
import json
from typing import List, Tuple, Optional, Dict
from pathlib import Path
import tempfile
# HuggingFace imports
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
pipeline
)
from sentence_transformers import SentenceTransformer
from datasets import load_dataset
# Try to import configuration
try:
import config
HAS_CONFIG = True
print("✓ Found config.py - will auto-load models on startup")
except ImportError:
HAS_CONFIG = False
print("○ No config.py found - using manual model loading")
# Global state to hold loaded models and embedded trials
class AppState:
def __init__(self):
self.tagger_model = None
self.tagger_tokenizer = None
self.embedder_model = None
self.embedder_tokenizer = None
self.llm_model = None
self.llm_tokenizer = None
self.trial_checker_model = None
self.trial_checker_tokenizer = None
self.boilerplate_checker_model = None
self.boilerplate_checker_tokenizer = None
self.trial_spaces_df = None
self.trial_embeddings = None
self.trial_preview_df = None
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Store auto-load status messages to display in UI
self.auto_load_status = {
"tagger": "",
"embedder": "",
"llm": "",
"trial_checker": "",
"boilerplate_checker": "",
"trials": ""
}
def reset_trials(self):
self.trial_spaces_df = None
self.trial_embeddings = None
self.trial_preview_df = None
state = AppState()
# ============================================================================
# UTILITY FUNCTIONS
# ============================================================================
def split_into_excerpts(text: str) -> List[str]:
"""Split text into sentence-level excerpts."""
if not text or pd.isna(text):
return []
t = re.sub(r'[\n\r]+', ' ', text.strip())
t = re.sub(r'\s+', ' ', t)
if not t:
return []
t2 = t.replace(". ", "<excerpt break>")
parts = [p.strip() for p in t2.split("<excerpt break>") if p.strip()]
return parts
def truncate_text(text: str, tokenizer, max_tokens: int = 1500) -> str:
"""Truncate text to a maximum number of tokens."""
return tokenizer.decode(
tokenizer.encode(text, add_special_tokens=True, truncation=True, max_length=max_tokens),
skip_special_tokens=True
)
def format_probability_visual(val, is_exclusion=False):
"""
Helper to format probabilities with visual indicators (emojis) for the dataframe.
"""
try:
val_float = float(val)
except:
return val
# Logic for Eligibility (High is good)
if not is_exclusion:
if val_float >= 0.8:
return f"🟢 **{val_float:.2f}**"
elif val_float >= 0.5:
return f"🟡 {val_float:.2f}"
else:
return f"🔴 {val_float:.2f}"
# Logic for Exclusion (High is bad)
else:
if val_float >= 0.5:
return f"🔴 **{val_float:.2f}**" # High exclusion prob is bad
elif val_float >= 0.2:
return f"🟡 {val_float:.2f}"
else:
return f"🟢 {val_float:.2f}" # Low exclusion prob is good
# ============================================================================
# TRIAL SPACE EXTRACTION CONSTANTS
# ============================================================================
MAX_EMBEDDER_SEQ_LEN = 2500
MAX_LONGTEXT_SEQ_LEN = 110000
MAX_TRIAL_CHECKER_LENGTH = 4096
MAX_BOILERPLATE_CHECKER_LENGTH = 3192
REASONING_MARKER = "assistantfinal"
BOILERPLATE_MARKER = "Boilerplate"
TRIAL_SPACE_PROMPT_HEADER = (
"You are an expert clinical oncologist with an encyclopedic knowledge of cancer and its treatments.\n"
"Your job is to review a clinical trial document and extract a list of structured clinical spaces that are eligible for that trial.\n"
"A clinical space is defined as a unique combination of patient age range, sex (if any sex criteria), cancer primary site, histology, which treatments a patient must have received, "
"which treatments a patient must not have received, cancer burden (eg presence of metastatic disease; this also includes cancer type-specific prognostic scores, risk indices, or categories), tumor biomarkers (such as "
"germline or somatic gene mutations or alterations, or protein expression on tumor), that a patient must have or must not have to "
"be eligible for the trial. \n"
"With respect to sex criteria: For cancers originating in organs only present in one sex, you must assume the sex criteria even if not stated explicitly.\n"
"For example, a trial space for uterine, ovarian, vulvar, vaginal, or fallopian tube cancer must be assumed to be for female patients.\n"
"Similarly, a trial space for testicular, penile, or prostate cancer must be assumed to be for male patients.\n"
"For all other cancer types (including breast cancer), you shoulud assume the trial is open to both sexes unless the clinical trial document states otherwise.\n"
"Trials often specify that a particular treatment is excluded only if it was given within a short period of time, for example 14 days, "
"one month, etc , prior to trial start. This is called a washout period. Do not include this type of time-specific treatment washout "
"eligibility criteria in your output at all.\n"
"Some trials have only one space, while others have several. Do not output a space that contains multiple cancer types and/or histologies. "
"Instead, generate separate spaces for each cancer type/histology combination.\n"
"CRITICAL: Each trial space must contain all information necessary to define that space on its own. It may not refer to other previously "
"defined spaces for the same trial, since for later use, the spaces will be extracted and separated from each other. YOU MAY NOT include "
"text describing a given space that refers to a previous space; eg, \"Same as above\"-style output is not allowed!\n"
"For biomarkers, if the trial specifies whether the biomarker will be assessed during screening, note that.\n"
"Spell out cancer types; do not abbreviate them. For example, write \"non-small cell lung cancer\" rather than \"NSCLC\".\n"
"Structure your output like this, as a list of spaces, with spaces separated by newlines, as below. STRICTLY adhere to the formatting.\n"
"1. Age range allowed: <age_range_allowed>. Sex allowed: <sex_allowed>. Cancer type allowed: <cancer_type_allowed>. Histology allowed: <histology_allowed>. Cancer burden allowed: <cancer_burden_allowed>. Prior treatment required: <prior_treatments_requred>. Prior treatment excluded: <prior_treatments_excluded>. Biomarkers required: <biomarkers_required>. Biomarkers excluded: <biomarkers_excluded>. \n"
"2. Cancer type allowed: <cancer_type_allowed>, etc.\n"
"If a concept is not relevant, such as if there are no prior treatents required, simply output NA for that concept.\n"
"CRITICAL: Anytime you provide a list for a particular concept, you must be completely clear on whether \"or\" versus \"and\" logic applies "
"to the list. For example, do not output \"EGFR L858R mutant, TP53 mutant\"; if both are required, output \"EGFR L858R mutant and TP53 mutant\". "
"As another example, do not output \"ER+, PR+\"; if the patient can have either an ER or a PR positive tumor, output \"ER+ or PR+\".\n"
"NEVER put a newline within a single trial space.\n"
"After you output the trial spaces, output a newline, then the text \"Boilerplate exclusions:\" VERBATIM, then another newline.\n"
"Then, list exclusion criteria described in the trial text that are unrelated to the trial space definitions. Such exclusions tend to be common "
"to clinical trials in general.\n"
"Common boilerplate exclusion criteria include a history of pneumonitis, heart failure, renal dysfunction, liver dysfunction, uncontrolled brain "
"metastases, HIV or hepatitis, and poor performance status.\n"
"ALWAYS output plain text only. NEVER output unicode, Markdown, or tables.\n"
)
TRIAL_SPACE_PROMPT_SUFFIX = (
"Now, generate your list of the trial space(s), followed by any boilerplate exclusions, formatted as above.\n"
"Do not provide any introductory, explanatory, concluding, or disclaimer text.\n"
"Reminder: Treatment history is an important component of trial space definitions, but treatment history \"washout\" requirements that are "
"described as applying only in a given period of time prior to trial treatment MUST BE IGNORED.\n"
"CRITICAL: A given trial space MUST NEVER refer to another previously defined space. You must NEVER output text like \"same as #1\" or "
"\"same criteria as above.\" Instead, you MUST REPEAT all relevant criteria for each new space SO THAT IT STANDS ON ITS OWN. A user who later "
"looks at the text for one space will not have access to text for other spaces, and so output like \"Same criteria as #1...\" renders a space useless!"
)
DEEPER_SCREEN_TRIAL_PROMPT = (
"You are a brilliant oncologist with encyclopedic knowledge about cancer and its treatment. "
"Your job is to evaluate whether a given clinical trial is a reasonable consideration for a patient, "
"given a clinical trial summary and a patient summary.\n\n"
"Here is a summary of the clinical trial:\n{trial_space}\n"
"Here is a summary of the patient:\n{patient_summary}\n"
"Base your judgment on whether the patient generally fits the age requirements if any, sex requirements if any, cancer type(s), cancer burden, prior treatment(s), "
"and biomarker criteria specified for the trial.\n"
"You do not have to determine if the patient is actually eligible; instead please just evaluate whether it is reasonable "
"for the trial to be considered further by the patient's oncologist.\n"
"Biomarker criteria have to be considered carefully. Some trials have biomarker requirements that are not assessed until "
"formal trial screening. A trial may therefore sometimes be a reasonable consideration for a patient even if a required "
"biomarker is not known to be present in the patient.\n"
"However, if a required biomarker is known to be absent, or can be assumed to be absent based on other information, the trial "
"is not a reasonable consideration. For example, if a trial for lung cancer requires an EGFR mutation, documentation that there "
"is no EGFR mutation indicates the trial is not a reasonable consideration. Similarly, documentation of a KRAS mutation in the "
"patient indicates the trial is not a reasonable consideration, since, as you know, KRAS and EGFR driver mutations in lung cancer "
"are mutually exclusive.\n"
"Many trials describe required washout periods for prior treatments for eligibility. For example, the eligibility criteria might state "
"that patients may not have received radiation or chemotherapy in the last 14 days or 30 days. It is CRITICAL that you IGNORE these "
"eligibility criteria when considering prior treatment requirements. Assume that patients could wait for the washout period to enroll. "
"Also CRITICAL: Ignore your knowledge of today's current date. Pretend that you are evaluating the patient's evaluation based on the "
"most recent information available in their summary, at the time of that most recently available information. "
"Do not provide ethical judgments or comment on resource constraints with respect whether the trial is a reasonable clinical "
"consideration; just evaluate whether it is, given the available information.\n"
'Reason step by step, then answer the question "Is this trial a reasonable consideration for this patient?" with a one-word '
'"Yes!" or "No!" answer.\n'
"Make sure to include the exclamation point in your final one-word answer."
)
DEEPER_SCREEN_BOILERPLATE_PROMPT = (
"You are a brilliant oncologist with encyclopedic knowledge about cancer and its treatment.\n"
"Your job is to evaluate whether a patient has any underlying medical conditions that would exclude him or her from a specific clinical trial.\n\n"
"Here is an extract of the patient's history:\n{patient_boilerplate}\n"
"Here are the exclusion criteria for the trial:\n{trial_boilerplate}\n"
"Note that the extract was generated by prompting an LLM to determine whether the patient meets specific common exclusion criteria, "
"such as uncontrolled brain metastases, lack of measurable disease, congestive heart failure, pneumonitis, renal dysfunction, "
"liver dysfunction, and HIV or hepatitis infection, and to present evidence for whether the patient met the criterion.\n"
"You should therefore not assume that mention of such condition means the patient has the condition; it may represent the LLM reasoning "
"about whether the patient has the condition.\n"
"Based on the extract, you should determine whether the patient clearly meets one of the exclusion criteria for this specific trial.\n"
"Do not evaluate exclusion criteria other than those listed for this trial.\n"
"Reason through one exclusion criterion at a time. Generate a numbered list of the criteria as you go. For each one, decide whether the patient clearly "
"meets the exclusion criteron. If it is not completely clear that the patient meets the exclusion criterion, give the patient the benefit of the doubt, "
"and err on the side of deciding the patient is not excluded. A description in the patient extract that a condition is mild, low-grade, or resolved is even "
"more of a reason not to exclude the patient based on that condition.\n"
'Once you have evaluated all exclusion criteria, answer the question "Is this patient clearly excluded from this trial?" with a one-word "Yes!" or "No!" answer, '
"based on whether the patient clearly met any of the individual exclusion criteria. It is critical that your final word be either \"Yes!\" or \"No!\", verbatim, and case-sensitive.\n"
"Make sure to include the exclamation point in your final one-word answer.\n"
"No introductory text or concluding text after that final answer."
)
# ============================================================================
# AUTO-LOADING FROM CONFIG
# ============================================================================
def auto_load_models_from_config():
"""Auto-load models specified in config.py"""
if not HAS_CONFIG:
return
print("\n" + "="*70)
print("AUTO-LOADING MODELS FROM CONFIG")
print("="*70)
# Load tagger
if config.MODEL_CONFIG.get("tagger"):
print(f"\n[1/5] Loading tagger: {config.MODEL_CONFIG['tagger']}")
status, _ = load_tagger_model(config.MODEL_CONFIG["tagger"])
state.auto_load_status["tagger"] = status
print(status)
# Load embedder
if config.MODEL_CONFIG.get("embedder"):
print(f"\n[2/5] Loading embedder: {config.MODEL_CONFIG['embedder']}")
status, _, _ = load_embedder_model(config.MODEL_CONFIG["embedder"])
state.auto_load_status["embedder"] = status
print(status)
# Load LLM
if config.MODEL_CONFIG.get("llm"):
print(f"\n[3/5] Loading LLM: {config.MODEL_CONFIG['llm']}")
status, _ = load_llm_model(config.MODEL_CONFIG["llm"])
state.auto_load_status["llm"] = status
print(status)
# Load trial checker
if config.MODEL_CONFIG.get("trial_checker"):
print(f"\n[4/5] Loading trial checker: {config.MODEL_CONFIG['trial_checker']}")
status, _ = load_trial_checker(config.MODEL_CONFIG["trial_checker"])
state.auto_load_status["trial_checker"] = status
print(status)
# Load boilerplate checker
if config.MODEL_CONFIG.get("boilerplate_checker"):
print(f"\n[5/5] Loading boilerplate checker: {config.MODEL_CONFIG['boilerplate_checker']}")
status, _ = load_boilerplate_checker(config.MODEL_CONFIG["boilerplate_checker"])
state.auto_load_status["boilerplate_checker"] = status
print(status)
print("\n" + "="*70)
print("MODEL AUTO-LOADING COMPLETE")
print("="*70 + "\n")
def auto_load_trials_from_config():
"""Auto-load trial database from config.py - prefers pre-embedded over fresh embedding."""
if not HAS_CONFIG:
return
# Check for pre-embedded trials first (much faster)
if hasattr(config, 'PREEMBEDDED_TRIALS') and config.PREEMBEDDED_TRIALS:
preembed_path = config.PREEMBEDDED_TRIALS
print("\n" + "="*70)
print(f"AUTO-LOADING PRE-EMBEDDED TRIALS: {preembed_path}")
print("="*70)
status, preview = load_preembedded_trials(preembed_path)
state.auto_load_status["trials"] = status
# Store the preview so it can be displayed in the UI
state.trial_preview_df = preview
print("="*70)
print("PRE-EMBEDDED TRIALS AUTO-LOADING COMPLETE")
print("="*70 + "\n")
return
# Fall back to fresh embedding if no pre-embedded trials specified
if not hasattr(config, 'DEFAULT_TRIAL_DB') or not config.DEFAULT_TRIAL_DB:
print("○ No trial database specified in config")
return
if not os.path.exists(config.DEFAULT_TRIAL_DB):
print(f"✗ Default trial database not found: {config.DEFAULT_TRIAL_DB}")
state.auto_load_status["trials"] = f"✗ Trial database file not found: {config.DEFAULT_TRIAL_DB}"
return
if state.embedder_model is None:
print("○ Embedder not loaded yet - skipping trial database auto-load")
state.auto_load_status["trials"] = "○ Waiting for embedder model to be loaded..."
return
print("\n" + "="*70)
print(f"AUTO-LOADING TRIAL DATABASE: {config.DEFAULT_TRIAL_DB}")
print("="*70)
# Create a temporary file-like object
class FilePath:
def __init__(self, path):
self.name = path
status, preview = load_and_embed_trials(FilePath(config.DEFAULT_TRIAL_DB), show_progress=True)
state.auto_load_status["trials"] = status
# Store the preview so it can be displayed in the UI
state.trial_preview_df = preview
print("="*70)
print("TRIAL DATABASE AUTO-LOADING COMPLETE")
print("="*70 + "\n")
# ============================================================================
# MODEL LOADING FUNCTIONS
# ============================================================================
def load_tagger_model(model_path: str) -> Tuple[str, str]:
"""Load TinyBERT tagger model."""
try:
state.tagger_tokenizer = AutoTokenizer.from_pretrained(model_path)
state.tagger_model = pipeline(
"text-classification",
model=model_path,
tokenizer=state.tagger_tokenizer,
device=0 if state.device == "cuda" else -1,
truncation=True,
padding="max_length",
max_length=128
)
return f"✓ Tagger model loaded from {model_path}", ""
except Exception as e:
return f"✗ Error loading tagger model: {str(e)}", str(e)
def load_embedder_model(model_path: str) -> Tuple[str, str, str]:
"""Load sentence transformer embedder model."""
try:
# Check if trials are already loaded
will_need_reembed = state.trial_spaces_df is not None and len(state.trial_spaces_df) > 0
if will_need_reembed:
warning_msg = f"\n⚠️ Warning: {len(state.trial_spaces_df)} trials are currently loaded. They will need to be re-embedded with the new model."
else:
warning_msg = ""
state.embedder_model = SentenceTransformer(model_path, device=state.device, trust_remote_code=True)
state.embedder_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
# Set the instruction prompt
try:
state.embedder_model.prompts['query'] = (
"Instruct: Given a cancer patient summary, retrieve clinical trial options "
"that are reasonable for that patient; or, given a clinical trial option, "
"retrieve cancer patients who are reasonable candidates for that trial."
)
except:
pass
try:
state.embedder_model.max_seq_length = MAX_EMBEDDER_SEQ_LEN
except:
pass
success_msg = f"✓ Embedder model loaded from {model_path}{warning_msg}"
# If trials were loaded, invalidate embeddings
if will_need_reembed:
state.trial_embeddings = None
success_msg += "\n→ Trial embeddings cleared. Please reload trial database to re-embed."
return success_msg, "", warning_msg
except Exception as e:
return f"✗ Error loading embedder model: {str(e)}", str(e), ""
def load_llm_model(model_path: str) -> Tuple[str, str]:
"""Load LLM for patient summarization."""
try:
# Check if vLLM is available
try:
from vllm import LLM, SamplingParams
# Determine tensor parallel size
gpu_count = torch.cuda.device_count()
tp_size = min(gpu_count, 4) if gpu_count > 1 else 1
state.llm_model = LLM(
model=model_path,
tensor_parallel_size=tp_size,
gpu_memory_utilization=0.60,
max_model_len=15000,
)
state.llm_tokenizer = state.llm_model.get_tokenizer()
return f"✓ LLM loaded from {model_path} (vLLM, tp={tp_size})", ""
except ImportError:
# Fallback to HuggingFace transformers
from transformers import AutoModelForCausalLM
state.llm_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
state.llm_model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.float16 if state.device == "cuda" else torch.float32,
device_map="auto",
trust_remote_code=True
)
return f"✓ LLM loaded from {model_path} (HuggingFace)", ""
except Exception as e:
return f"✗ Error loading LLM: {str(e)}", str(e)
def load_trial_checker(model_path: str) -> Tuple[str, str]:
"""Load ModernBERT trial checker."""
try:
state.trial_checker_tokenizer = AutoTokenizer.from_pretrained(model_path)
state.trial_checker_model = AutoModelForSequenceClassification.from_pretrained(
model_path,
torch_dtype=torch.float16 if state.device == "cuda" else torch.float32
).to(state.device)
state.trial_checker_model.eval()
return f"✓ Trial checker loaded from {model_path}", ""
except Exception as e:
return f"✗ Error loading trial checker: {str(e)}", str(e)
def load_boilerplate_checker(model_path: str) -> Tuple[str, str]:
"""Load ModernBERT boilerplate checker."""
try:
state.boilerplate_checker_tokenizer = AutoTokenizer.from_pretrained(model_path)
state.boilerplate_checker_model = AutoModelForSequenceClassification.from_pretrained(
model_path,
torch_dtype=torch.float16 if state.device == "cuda" else torch.float32
).to(state.device)
state.boilerplate_checker_model.eval()
return f"✓ Boilerplate checker loaded from {model_path}", ""
except Exception as e:
return f"✗ Error loading boilerplate checker: {str(e)}", str(e)
# ============================================================================
# TRIAL SPACE PROCESSING (WITH PRE-EMBEDDING SUPPORT)
# ============================================================================
def load_preembedded_trials(path_or_url: str) -> Tuple[str, pd.DataFrame]:
"""Load pre-embedded trial database from a local parquet file or a Huggingface URL."""
try:
print(f"\n{'='*70}")
print(f"LOADING PRE-EMBEDDED TRIALS")
print(f"{'='*70}")
print(f"Loading from: {path_or_url}")
# Check if it's a URL or a local path
if path_or_url.startswith("http"):
# It's a URL, load from Huggingface
print("Detected URL, loading from Huggingface Hub...")
dataset = load_dataset("parquet", data_files=path_or_url, split='train')
df = dataset.to_pandas()
print(f"✓ Loaded {len(df)} trials from Hub")
else:
# It's a local path
parquet_path = path_or_url
if not parquet_path.endswith('.parquet'):
parquet_path = parquet_path + '.parquet'
# Check file exists
if not os.path.exists(parquet_path):
return f"✗ Pre-embedded parquet file not found: {parquet_path}", None
# Load parquet file
print(f"Loading trial dataframe with embeddings...")
df = pd.read_parquet(parquet_path)
print(f"✓ Loaded {len(df)} trials from local file")
# Check for embedding column
if 'embedding' not in df.columns:
return f"✗ Parquet file missing 'embedding' column: {path_or_url}", None
# Extract embeddings from the column and convert to numpy array
print(f"Extracting embeddings...")
embeddings = np.array(df['embedding'].tolist())
print(f"✓ Extracted embeddings: {embeddings.shape}")
# Remove embedding column from dataframe (not needed in the df itself)
df_without_embeddings = df.drop(columns=['embedding'])
# Store in state
state.trial_spaces_df = df_without_embeddings
state.trial_embeddings = embeddings
print(f"{'='*70}")
print(f"PRE-EMBEDDED TRIALS LOADED SUCCESSFULLY")
print(f"{'='*70}\n")
preview = df_without_embeddings[['nct_id', 'this_space']].head(10)
return f"✓ Loaded {len(df)} pre-embedded trials from {path_or_url}", preview
except Exception as e:
import traceback
traceback.print_exc()
return f"✗ Error loading pre-embedded trials: {str(e)}", None
def load_and_embed_trials(file, show_progress: bool = False) -> Tuple[str, pd.DataFrame]:
"""Load trial spaces CSV/Excel and embed them."""
try:
if state.embedder_model is None:
return "✗ Please load the embedder model first!", None
# Read file
if file.name.endswith('.csv'):
df = pd.read_csv(file.name)
elif file.name.endswith(('.xlsx', '.xls')):
df = pd.read_excel(file.name)
else:
return "✗ Unsupported file format. Use CSV or Excel.", None
# Check required columns
required_cols = ['nct_id', 'this_space', 'trial_text', 'trial_boilerplate_text']
missing = [col for col in required_cols if col not in df.columns]
if missing:
return f"✗ Missing required columns: {', '.join(missing)}", None
# Clean data
df = df[~df['this_space'].isnull()].copy()
df['trial_boilerplate_text'] = df['trial_boilerplate_text'].fillna('')
# Prepare texts for embedding
df['this_space_trunc'] = df['this_space'].apply(
lambda x: truncate_text(str(x), state.embedder_tokenizer, max_tokens=MAX_EMBEDDER_SEQ_LEN)
)
# Add instruction prefix
prefix = (
"Instruct: Given a cancer patient summary, retrieve clinical trial options "
"that are reasonable for that patient; or, given a clinical trial option, "
"retrieve cancer patients who are reasonable candidates for that trial. "
)
texts_to_embed = [prefix + txt for txt in df['this_space_trunc'].tolist()]
# Embed with progress
if not show_progress:
gr.Info(f"Embedding {len(df)} trial spaces...")
else:
print(f"Embedding {len(df)} trial spaces...")
with torch.no_grad():
embeddings = state.embedder_model.encode(
texts_to_embed,
batch_size=64,
convert_to_tensor=True,
normalize_embeddings=True,
show_progress_bar=show_progress,
prompt='query'
)
# Store in state
state.trial_spaces_df = df
state.trial_embeddings = embeddings.cpu().numpy()
preview = df[['nct_id', 'this_space']].head(10)
success_msg = f"✓ Loaded and embedded {len(df)} trial spaces"
if show_progress:
print(success_msg)
return success_msg, preview
except Exception as e:
return f"✗ Error processing trials: {str(e)}", None
# ============================================================================
# PATIENT NOTE PROCESSING
# ============================================================================
def process_patient_notes(file, prob_threshold: float = 0.1, progress=gr.Progress(track_tqdm=True)) -> Tuple[str, str]:
"""Process patient notes through tagger and create long note."""
try:
progress(0, desc="Starting note processing...")
gr.Info("🚀 Processing patient notes...")
if state.tagger_model is None:
return "✗ Please load the tagger model first!", ""
# Read file
progress(0.1, desc="Reading file...")
if file.name.endswith('.csv'):
df = pd.read_csv(file.name)
elif file.name.endswith(('.xlsx', '.xls')):
df = pd.read_excel(file.name)
else:
return "✗ Unsupported file format. Use CSV or Excel.", ""
# Check required columns
if 'date' not in df.columns or 'text' not in df.columns:
return "✗ File must contain 'date' and 'text' columns", ""
# Sort by date
df['date'] = pd.to_datetime(df['date'], errors='coerce')
df = df.sort_values('date').reset_index(drop=True)
progress(0.2, desc="Extracting excerpts...")
# Extract all excerpts
all_excerpts = []
all_dates = []
all_note_types = []
for idx, row in df.iterrows():
excerpts = split_into_excerpts(str(row['text']))
note_type = row.get('note_type', 'clinical_note')
for exc in excerpts:
all_excerpts.append(exc)
all_dates.append(row['date'])
all_note_types.append(note_type)
if not all_excerpts:
return "✗ No valid excerpts extracted from notes", ""
progress(0.3, desc=f"Tagging {len(all_excerpts)} excerpts...")
gr.Info(f"🏷️ Tagging {len(all_excerpts)} excerpts...")
# Run tagger
predictions = state.tagger_model(all_excerpts, batch_size=256)
progress(0.8, desc="Filtering relevant excerpts...")
# Extract positive excerpts
excerpts_df = pd.DataFrame({
'excerpt': all_excerpts,
'date': all_dates,
'note_type': all_note_types,
'label': [p['label'] for p in predictions],
'score': [p['score'] for p in predictions]
})
# Calculate positive probability
excerpts_df['positive_prob'] = np.where(
excerpts_df['label'] == 'NEGATIVE',
1.0 - excerpts_df['score'],
excerpts_df['score']
)
# Filter by threshold
keep = excerpts_df[excerpts_df['positive_prob'] > prob_threshold].copy()
# FIX: Capture the raw count of excerpts *before* grouping
raw_keep_count = len(keep)
if len(keep) == 0:
return "✗ No excerpts passed the threshold", ""
# Group by date and note type
keep['date_str'] = keep['date'].dt.strftime('%Y-%m-%d')
keep = keep.groupby(['date_str', 'note_type'])['excerpt'].agg(lambda x: ' '.join(x)).reset_index()
keep['date_text'] = (
keep['date_str'] + " " +
keep['note_type'] + " " +
keep['excerpt']
)
# Create long note
long_note = "\n".join(keep['date_text'].tolist())
progress(1.0, desc="✓ Processing complete!")
gr.Info("✅ Note processing complete!")
# FIX: Display the raw count in the stats message
stats = (
f"✓ Processed {len(df)} notes → {len(all_excerpts)} excerpts → "
f"{raw_keep_count} relevant excerpts (threshold={prob_threshold})"
)
return stats, long_note
except Exception as e:
gr.Error(f"Note processing failed: {str(e)}")
return f"✗ Error processing notes: {str(e)}", ""
def parse_summary_output(raw_text: str, show_reasoning: bool) -> Tuple[str, str]:
"""Parse the raw LLM output into summary and boilerplate components."""
if not raw_text:
return "", ""
# If NOT showing reasoning, first strip it out
text_to_process = raw_text
if not show_reasoning and REASONING_MARKER in raw_text:
text_to_process = raw_text.split(REASONING_MARKER, 1)[-1]
# Now split into summary and boilerplate
lines = text_to_process.splitlines(keepends=True)
marker_line_index = -1
for i, line in enumerate(lines):
if BOILERPLATE_MARKER in line:
marker_line_index = i
break
if marker_line_index != -1:
summary_part = "".join(lines[:marker_line_index])
boilerplate_part = "".join(lines[marker_line_index + 1:])
else:
# Fallback behavior from original code
summary_part = text_to_process
boilerplate_part = text_to_process
return summary_part.strip(), boilerplate_part.strip()
def summarize_patient_history(long_note: str, progress=gr.Progress(track_tqdm=True)) -> Tuple[str, str]:
"""Summarize patient long note using LLM and return raw output plus status."""
try:
progress(0, desc="Starting summarization...")
gr.Info("🚀 Starting patient history summarization... This may take 1-2 minutes.")
if state.llm_model is None:
return "✗ Please load the LLM model first!", "✗ LLM not loaded"
if not long_note or len(long_note.strip()) == 0:
return "✗ No patient history to summarize", "✗ No input"
progress(0.1, desc="Preparing patient text...")
# Truncate if needed
tokens = state.llm_tokenizer.encode(long_note, add_special_tokens=False)
max_tokens = MAX_LONGTEXT_SEQ_LEN # Leave room for prompt and response
if len(tokens) > max_tokens:
half = max_tokens // 2
first_part = state.llm_tokenizer.decode(tokens[:half])
last_part = state.llm_tokenizer.decode(tokens[-half:])
patient_text = first_part + " ... " + last_part
gr.Info(f"📝 Text truncated from {len(tokens)} to {max_tokens} tokens")
else:
patient_text = long_note
progress(0.2, desc="Building prompt...")
# Build prompt
messages = [
{'role': 'system', 'content': 'Reasoning: high'},
{'role': 'user', 'content': """You are an experienced clinical oncology history summarization bot.
Your job is to construct a summary of the cancer history for a patient based on an excerpt of the patient's electronic health record. The text in the excerpt is provided in chronological order. Each paragraph in the excerpt represents a summary of a clinical document written on the date indicated in the paragraph.
Document the patient's most recent age; sex; cancer type/primary site (eg breast cancer, lung cancer, etc); histology (eg adenocarcinoma, squamous carcinoma, etc); current extent (localized, advanced, metastatic, etc); biomarkers (genomic results, protein expression, etc); and treatment history (surgery, radiation, chemotherapy/targeted therapy/immunotherapy, etc, including start and stop dates and best response if known).
Do not consider localized basal cell or squamous carcinomas of the skin, or colon polyps, to be cancers for your purposes.
Do not include the patient's name, but do include relevant dates whenever documented, including dates of diagnosis and start/stop dates of each treatment.
If a patient has a history of more than one cancer, document the cancers one at a time.
CRITICAL: Format your response as free text ONLY. Do NOT output markdown, Unicode, or tables.
Also document any history of conditions that might meet "boilerplate" exclusion criteria, including uncontrolled brain metastases, lack of measurable disease, congestive heart failure, pneumonitis, renal dysfunction, liver dysfunction, and HIV or hepatitis infection. For each of these, present the evidence from the history that the patient has a history of such a condition, including dates.
Clearly separate the "boilerplate" section by labeling it "Boilerplate: " before describing any such conditions.
Here is an example of the desired output format:
Age: 70
Sex: Male
Cancer type: Lung cancer
Histology: Adenocarcinoma
Current extent: Metastatic
Biomarkers: PD-L1 75%, KRAS G12C mutant
Treatment history:
# 1/5/2020-2/5/2021: carboplatin/pemetrexed/pembrolizumab
# 1/2021: Palliative radiation to progressive spinal metastases
# 3/2021-present: docetaxel
Boilerplate:
No evidence of common boilerplate exclusion criteria
""" + "The excerpt for you to summarize is:\n" + patient_text + """\nNow, write your summary. Do not add preceding text before the abstraction, and do not add notes or commentary afterwards. This will not be used for clinical care, so do not write any disclaimers or cautionary notes."""}
]
print("Summarizing patient history with LLM...")
progress(0.3, desc="⏳ Running LLM inference (this takes ~1 min)...")
gr.Info("⏳ LLM inference in progress... Please wait.")
# Check if using vLLM or HuggingFace
if hasattr(state.llm_model, 'generate') and hasattr(state.llm_model, 'get_tokenizer'):
# vLLM
from vllm import SamplingParams
prompt = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
tokenize=False
)
response = state.llm_model.generate(
[prompt],
SamplingParams(
temperature=0.0,
top_k=1,
max_tokens=7500,
repetition_penalty=1.2
)
)
output = response[0].outputs[0].text
else:
# HuggingFace
input_ids = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
return_tensors="pt"
).to(state.device)
with torch.no_grad():
outputs = state.llm_model.generate(
input_ids,
max_new_tokens=7500,
temperature=0.00,
do_sample=True,
repetition_penalty=1.2
)
output = state.llm_tokenizer.decode(outputs[0], skip_special_tokens=True)
progress(1.0, desc="✓ Summarization complete!")
gr.Info("✅ Patient summary generated successfully!")
return output, "✓ Summarization complete"
except Exception as e:
gr.Error(f"Summarization failed: {str(e)}")
return f"✗ Error summarizing: {str(e)}", f"✗ Error: {str(e)}"
# ============================================================================
# TRIAL SPACE EXTRACTION
# ============================================================================
def extract_trial_spaces(trial_text: str, progress=gr.Progress(track_tqdm=True)) -> str:
"""Extract trial spaces and boilerplate criteria from trial text using LLM."""
try:
progress(0, desc="Starting trial space extraction...")
gr.Info("🚀 Starting trial space extraction... This may take 1-2 minutes.")
if state.llm_model is None:
return "✗ Please load the LLM model first!"
if not trial_text or len(trial_text.strip()) == 0:
return "✗ No trial text provided"
progress(0.2, desc="Building prompt...")
# Build prompt messages
messages = [
{"role": "system", "content": "Reasoning: high."},
{
"role": "user",
"content": (
TRIAL_SPACE_PROMPT_HEADER
+ "Here is a clinical trial document:\n"
+ str(trial_text)
+ "\n"
+ TRIAL_SPACE_PROMPT_SUFFIX
),
},
]
print("Extracting trial spaces with LLM...")
progress(0.3, desc="⏳ Running LLM inference (this takes ~1 min)...")
gr.Info("⏳ LLM inference in progress... Please wait.")
# Check if using vLLM or HuggingFace
if hasattr(state.llm_model, 'generate') and hasattr(state.llm_model, 'get_tokenizer'):
# vLLM
from vllm import SamplingParams
prompt = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
tokenize=False
)
response = state.llm_model.generate(
[prompt],
SamplingParams(
temperature=0.0,
top_k=1,
max_tokens=7500,
repetition_penalty=1.3
)
)
output = response[0].outputs[0].text
else:
# HuggingFace
input_ids = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
return_tensors="pt"
).to(state.device)
with torch.no_grad():
outputs = state.llm_model.generate(
input_ids,
max_new_tokens=7500,
temperature=0.0,
do_sample=False,
repetition_penalty=1.3
)
output = state.llm_tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract just the assistant response
if REASONING_MARKER in output:
output = output.split(REASONING_MARKER)[-1]
# Clean up reasoning markers if present
if REASONING_MARKER in output:
output = output.split(REASONING_MARKER, 1)[-1]
output = output.strip()
progress(1.0, desc="✓ Extraction complete!")
gr.Info("✅ Trial spaces extracted successfully!")
return output
except Exception as e:
gr.Error(f"Extraction failed: {str(e)}")
return f"✗ Error extracting trial spaces: {str(e)}"
# ============================================================================
# TRIAL MATCHING
# ============================================================================
def match_trials(patient_summary: str, patient_boilerplate: str, top_k: int = 20, progress=gr.Progress(track_tqdm=True)) -> pd.DataFrame:
"""Match patient to trials and run checkers."""
try:
progress(0, desc="Starting trial matching...")
gr.Info("🔍 Starting trial matching...")
if state.embedder_model is None:
gr.Error("Embedder model not loaded")
raise ValueError("Embedder model not loaded")
if state.trial_embeddings is None:
gr.Error("Trial spaces not loaded")
raise ValueError("Trial spaces not loaded")
if state.trial_checker_model is None:
gr.Error("Trial checker model not loaded")
raise ValueError("Trial checker model not loaded")
if state.boilerplate_checker_model is None:
gr.Error("Boilerplate checker model not loaded")
raise ValueError("Boilerplate checker model not loaded")
progress(0.1, desc="Embedding patient summary...")
# Embed patient summary
prefix = (
"Instruct: Given a cancer patient summary, retrieve clinical trial options "
"that are reasonable for that patient; or, given a clinical trial option, "
"retrieve cancer patients who are reasonable candidates for that trial. "
)
patient_text = truncate_text(patient_summary, state.embedder_tokenizer, max_tokens=MAX_EMBEDDER_SEQ_LEN)
patient_text_with_prefix = prefix + patient_text
with torch.no_grad():
patient_emb = state.embedder_model.encode(
[patient_text_with_prefix],
convert_to_tensor=True,
normalize_embeddings=True,
prompt='query'
)
progress(0.3, desc="Computing similarities...")
# Calculate similarities
patient_emb_np = patient_emb.cpu().numpy()
similarities = np.dot(state.trial_embeddings, patient_emb_np.T).squeeze()
# Get top-k
top_indices = np.argsort(similarities)[::-1][:top_k]
# Get top trials
top_trials = state.trial_spaces_df.iloc[top_indices].copy()
top_trials['similarity_score'] = similarities[top_indices]
progress(0.5, desc=f"Running eligibility checks on {len(top_trials)} trials...")
gr.Info(f"🔬 Running eligibility checks on top {len(top_trials)} trials...")
# Run trial checker
trial_check_inputs = [
f"{row['this_space']}\nNow here is the patient summary:{patient_summary}"
for _, row in top_trials.iterrows()
]
trial_check_encodings = state.trial_checker_tokenizer(
trial_check_inputs,
truncation=True,
max_length=MAX_TRIAL_CHECKER_LENGTH,
padding=True,
return_tensors='pt'
).to(state.device)
with torch.no_grad():
trial_check_outputs = state.trial_checker_model(**trial_check_encodings)
trial_probs = torch.softmax(trial_check_outputs.logits, dim=1)[:, 1].cpu().numpy()
top_trials['eligibility_probability'] = trial_probs
progress(0.75, desc="Running boilerplate checks...")
gr.Info("📋 Running boilerplate exclusion checks...")
# Run boilerplate checker
boilerplate_check_inputs = [
f"Patient history: {patient_boilerplate}\nTrial exclusions:{row['trial_boilerplate_text']}"
for _, row in top_trials.iterrows()
]
boilerplate_check_encodings = state.boilerplate_checker_tokenizer(
boilerplate_check_inputs,
truncation=True,
max_length=MAX_BOILERPLATE_CHECKER_LENGTH,
padding=True,
return_tensors='pt'
).to(state.device)
with torch.no_grad():
boilerplate_check_outputs = state.boilerplate_checker_model(**boilerplate_check_encodings)
boilerplate_probs = torch.softmax(boilerplate_check_outputs.logits, dim=1)[:, 1].cpu().numpy()
top_trials['exclusion_probability'] = boilerplate_probs
progress(0.9, desc="Formatting results...")
# Sort by eligibility probability
top_trials = top_trials.sort_values('eligibility_probability', ascending=False)
# Apply visual formatting for the display table
top_trials['eligibility_display'] = top_trials['eligibility_probability'].apply(
lambda x: format_probability_visual(x, is_exclusion=False)
)
top_trials['exclusion_display'] = top_trials['exclusion_probability'].apply(
lambda x: format_probability_visual(x, is_exclusion=True)
)
top_trials['similarity_display'] = top_trials['similarity_score'].apply(
lambda x: f"{x:.3f}"
)
# Select columns for display - use the Display versions for the UI
display_cols = [
'nct_id',
'eligibility_display',
'exclusion_display',
'similarity_display',
'this_space'
]
result_df = top_trials[display_cols].reset_index(drop=True)
# Rename columns for better UI reading
result_df.columns = [
'NCT ID',
'Eligibility',
'Exclusion',
'Similarity',
'Criteria Space'
]
progress(1.0, desc="✓ Matching complete!")
gr.Info(f"✅ Found {len(result_df)} matching trials!")
return result_df
except Exception as e:
gr.Error(f"Error matching trials: {str(e)}")
return pd.DataFrame()
def get_trial_details(df: pd.DataFrame, evt: gr.SelectData) -> Tuple[str, dict]:
"""Get full trial details when user clicks on a row."""
try:
if df is None or len(df) == 0:
return "No trial selected", None
row_idx = evt.index[0]
# Map renamed columns back to logic
nct_id = df.iloc[row_idx]['NCT ID']
this_space = df.iloc[row_idx]['Criteria Space']
# Find the specific trial space in original dataframe
# Match both NCT ID and the exact trial space text
matching_rows = state.trial_spaces_df[
(state.trial_spaces_df['nct_id'] == nct_id) &
(state.trial_spaces_df['this_space'] == this_space)
]
if len(matching_rows) == 0:
return f"Error: Could not find matching trial space for {nct_id}", None
trial_row = matching_rows.iloc[0]
# Create clinicaltrials.gov link
ct_gov_link = f"https://clinicaltrials.gov/study/{nct_id}"
details = f"""
# Trial Details: {nct_id}
**🔗 [View on ClinicalTrials.gov]({ct_gov_link})**
---
## Eligibility Criteria Summary (Selected Space)
{trial_row['this_space']}
## Full Trial Text
{trial_row['trial_text']}
## Boilerplate Exclusions
{trial_row['trial_boilerplate_text']}
"""
# Return trial data for the deeper screen
trial_data = {
"nct_id": nct_id,
"this_space": trial_row['this_space'],
"trial_boilerplate_text": trial_row['trial_boilerplate_text']
}
return details, trial_data
except Exception as e:
return f"Error retrieving trial details: {str(e)}", None
def run_deeper_screen(
trial_data: dict,
patient_summary: str,
patient_boilerplate: str,
show_reasoning: bool,
progress=gr.Progress(track_tqdm=True)
) -> Tuple[str, str, dict]:
"""Run deeper screen using LLM for trial and boilerplate checks."""
if not trial_data:
return "Please select a trial first.", "Please select a trial first.", {}
if state.llm_model is None:
return "Please load LLM model first.", "Please load LLM model first.", {}
if not patient_summary or not patient_boilerplate:
return "Missing patient summary or boilerplate.", "Missing patient summary or boilerplate.", {}
try:
progress(0, desc="Starting deeper screen...")
gr.Info("🧠 Starting deeper screen analysis... This may take 1-2 minutes.")
# 1. Generate Trial Checker Reasoning
trial_msg = [
{"role": "system", "content": "Reasoning: high."},
{"role": "user", "content": DEEPER_SCREEN_TRIAL_PROMPT.format(
trial_space=trial_data['this_space'],
patient_summary=patient_summary
)}
]
print("Running Deeper Screen: Checking Trial Criteria...")
progress(0.2, desc="⏳ Checking Trial Criteria...")
gr.Info("⏳ Analyzing trial eligibility criteria...")
# Run LLM for trial check - use n=1 but slightly higher temp/top_p as requested
trial_raw = _run_llm_inference(trial_msg, temperature=0.5, top_p=0.9)
# 2. Generate Boilerplate Reasoning
bp_msg = [
{"role": "system", "content": "Reasoning: high."},
{"role": "user", "content": DEEPER_SCREEN_BOILERPLATE_PROMPT.format(
trial_boilerplate=trial_data['trial_boilerplate_text'],
patient_boilerplate=patient_boilerplate
)}
]
print("Running Deeper Screen: Checking Boilerplate...")
progress(0.6, desc="⏳ Checking Boilerplate Exclusions...")
gr.Info("⏳ Analyzing boilerplate exclusion criteria...")
# Run LLM for boilerplate check - use n=1 but slightly higher temp/top_p as requested
bp_raw = _run_llm_inference(bp_msg, temperature=1.0, top_p=0.9)
progress(1.0, desc="✓ Deeper screen complete!")
gr.Info("✅ Deeper screen analysis complete!")
# Store raw outputs
raw_outputs = {
"trial_raw": trial_raw,
"bp_raw": bp_raw
}
# Format for display
trial_display = toggle_reasoning_display(trial_raw, show_reasoning)
bp_display = toggle_reasoning_display(bp_raw, show_reasoning)
return trial_display, bp_display, raw_outputs
except Exception as e:
err = f"Error in deeper screen: {str(e)}"
gr.Error(err)
return err, err, {}
def _run_llm_inference(messages: List[dict], temperature: float = 0.0, top_p: float = 1.0) -> str:
"""Helper to run inference with loaded LLM."""
if hasattr(state.llm_model, 'generate') and hasattr(state.llm_model, 'get_tokenizer'):
# vLLM
from vllm import SamplingParams
prompt = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
tokenize=False
)
# Ensure temperature is not 0 if top_p is used (vLLM requirement often)
# But if temp=0, top_p is ignored. The user requested temp=0.5, top_p=0.9.
response = state.llm_model.generate(
[prompt],
SamplingParams(
temperature=temperature,
top_p=top_p if temperature > 0 else 1.0,
max_tokens=7500,
repetition_penalty=1.2
)
)
return response[0].outputs[0].text
else:
# HuggingFace
input_ids = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
return_tensors="pt"
).to(state.device)
with torch.no_grad():
outputs = state.llm_model.generate(
input_ids,
max_new_tokens=7500,
temperature=temperature,
top_p=top_p if temperature > 0 else 1.0,
do_sample=(temperature > 0),
repetition_penalty=1.2
)
return state.llm_tokenizer.decode(outputs[0], skip_special_tokens=True)
def update_deeper_screen_display(raw_outputs: dict, show_reasoning: bool) -> Tuple[str, str]:
"""Toggle reasoning display for existing deeper screen results."""
if not raw_outputs:
return "", ""
trial_display = toggle_reasoning_display(raw_outputs.get("trial_raw", ""), show_reasoning)
bp_display = toggle_reasoning_display(raw_outputs.get("bp_raw", ""), show_reasoning)
return trial_display, bp_display
# ============================================================================
# LLM NOTE TAGGING
# ============================================================================
def build_tagging_messages(text: str) -> List[dict]:
"""Build prompt messages for note tagging."""
temp_patient = re.sub(r"[\n\r]", " ", text.strip())
temp_patient = re.sub(r"\s+", " ", temp_patient)
sentences = "<excerpt break>" + re.sub(r"\. ", "<excerpt break>", temp_patient) + "<excerpt break>"
system_msg = {
"role": "system",
"content": """You are an oncology clinical note data extraction bot.
Your job is to review a list of excerpts from a clinical document and extract the excerpts relevant to a list of questions.
Reasoning: high
"""
}
user_msg = {
"role": "user",
"content": (
"The list of excerpts, separated by <excerpt break>, is: " + sentences +
"""Now, list the excerpts relevant to any of the following questions.
Format your answer as JSON, tagging each excerpt that is relevant to at least one question with each tag to which it is relevant.
Here is the list of questions:
How old is the patient? (Tag: age)
What is the patient's sex? (Tag: sex)
What type of cancer (primary site and histology) does the patient have? (Tag: cancer_type )
What was the stage at diagnosis? (Tag: stage_at_diagnosis)
What treatments (including surgery, radiation, or systemic therapy) has the patient received? (Tag: treatment)
How widespread is the cancer currently? (Tag: cancer_burden)
What is the prognosis, prognostic score, or risk category? (Tag: prognosis_and_risk)
Is there response to therapy or progressive disease? (Tag: cancer_status)
Is the patient experiencing an adverse event of treatment? (Tag: adverse_event)
What biomarkers, such as protein expression and genetic mutations/alterations, does the patient's tumor have? (Tag: biomarker)
What comorbidities, or diseases other than cancer, does the patient have? (Tag: comorbidity)
Are there uncontrolled brain metastases? (Tag: uncontrolled_brain_met)
Is there measurable disease, meaning a tumor at least 1 cm across or lymph node at least 1.5 cm in short axis dimension? (Tag: measurable_disease)
Is there progressive (worsening) disease? (Tag: progressive_disease)
Is there a history of pneumonitis? (Tag: pneumonitis)
Is there a history of colitis? (Tag: colitis)
Is there a history of hepatitis or HIV? (Tag: hepatitis_or_hiv)
Is the patient anemic, with hemoglobin under 10? (Tag: anemia)
Is there a reduced renal function/creatinine clearance, with estimated GFR < 60? (Tag: renal_dysfunction)
Is there liver dysfunction, with elevated bilirubin, AST, or ALT? (Tag: liver_dysfunction)
Is there a history of heart failure? (Tag: heart_failure)
Does the patient have a poor performance status and/or ECOG performance status of 2 or more? (Tag: poor_ps)
What adverse side effects of treatment has the patient had? (Tag: adverse_event)
Here is an example of the output format:
[{"excerpt": "80M with metastatic lung adenocarcinoma.", "tags": ["age", "sex", "cancer_type", "cancer_burden"]},
{"excerpt": "The tumor was HER2 positive.", "tags": ["biomarker"]},
{"excerpt": "Imaging demonstrated new bilateral lung infiltrates.", "tags": ["pneumonitis", "adverse_event"]},
{"excerpt": "LV ejection fraction was 35%.", "tags": ["heart_failure"]}
]
Do not include excerpts that are not relevant to the questions.
Do not abbreviate or alter excerpts that you do include; copy them verbatim from the prompt.
Do not add disclaimers or introductory text.
If there are no excerpts relevant to the above questions, just output blank JSON {} .
"""
)
}
return [system_msg, user_msg]
def toggle_reasoning_display(raw_text: str, show: bool) -> str:
"""Helper to toggle reasoning display."""
if not raw_text:
return ""
if show:
return raw_text.strip()
if REASONING_MARKER in raw_text:
return raw_text.split(REASONING_MARKER, 1)[-1].strip()
return raw_text.strip()
def tag_patient_note(note_text: str, show_reasoning: bool = False, progress=gr.Progress(track_tqdm=True)) -> Tuple[str, str]:
"""Tag a patient note using the loaded LLM."""
try:
progress(0, desc="Starting note tagging...")
gr.Info("🏷️ Starting note tagging... This may take 30-60 seconds.")
if state.llm_model is None:
return "✗ Please load the LLM model first!", ""
if not note_text or not note_text.strip():
return "✗ No note text provided", ""
progress(0.2, desc="Building prompt...")
messages = build_tagging_messages(note_text)
print("Tagging patient note with LLM...")
progress(0.3, desc="⏳ Running LLM inference...")
gr.Info("⏳ LLM inference in progress... Please wait.")
# Check if using vLLM or HuggingFace
if hasattr(state.llm_model, 'generate') and hasattr(state.llm_model, 'get_tokenizer'):
# vLLM
from vllm import SamplingParams
prompt = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
tokenize=False
)
response = state.llm_model.generate(
[prompt],
SamplingParams(
temperature=0.0,
top_k=1,
max_tokens=7500,
repetition_penalty=1.2
)
)
output = response[0].outputs[0].text
else:
# HuggingFace
input_ids = state.llm_tokenizer.apply_chat_template(
conversation=messages,
add_generation_prompt=True,
return_tensors="pt"
).to(state.device)
with torch.no_grad():
outputs = state.llm_model.generate(
input_ids,
max_new_tokens=4096,
temperature=0.0,
do_sample=False,
repetition_penalty=1.2
)
output = state.llm_tokenizer.decode(outputs[0], skip_special_tokens=True)
# Store raw output
raw_output = output
progress(1.0, desc="✓ Tagging complete!")
gr.Info("✅ Note tagging complete!")
# Determine initial display based on toggle
display_output = toggle_reasoning_display(raw_output, show_reasoning)
return display_output, raw_output
except Exception as e:
gr.Error(f"Note tagging failed: {str(e)}")
return f"✗ Error tagging note: {str(e)}", ""
def load_notes_for_tagging(file) -> Tuple[pd.DataFrame, str]:
"""Load notes specifically for the tagging tab."""
try:
if file.name.endswith('.csv'):
df = pd.read_csv(file.name)
elif file.name.endswith(('.xlsx', '.xls')):
df = pd.read_excel(file.name)
else:
return None, "✗ Unsupported file format"
if 'text' not in df.columns:
return None, "✗ File must contain 'text' column"
# Add an index column for display if not present
if 'note_id' not in df.columns:
df.insert(0, 'note_id', range(1, len(df) + 1))
# Ensure text column is string
df['text'] = df['text'].astype(str)
# Select relevant columns for preview
cols = ['note_id', 'text']
if 'date' in df.columns:
cols.insert(1, 'date')
return df[cols], f"✓ Loaded {len(df)} notes"
except Exception as e:
return None, f"✗ Error loading notes: {str(e)}"
def select_note_for_tagging(df: pd.DataFrame, evt: gr.SelectData) -> str:
"""Extract text from selected row in tagging dataframe."""
try:
if df is None: return ""
row_idx = evt.index[0]
return df.iloc[row_idx]['text']
except Exception as e:
return f"Error selecting note: {e}"
# ============================================================================
# GRADIO INTERFACE
# ============================================================================
def create_interface():
# Attempt to load sample patient notes for pre-population
sample_notes_path = None
try:
gr.Info("Loading sample patient notes...")
# Correct raw URL for the CSV file
sample_url = "https://huggingface.co/datasets/ksg-dfci/mmai-synthetic/raw/main/sample_patient_notes.csv"
sample_df = pd.read_csv(sample_url)
# Create a temporary file to hold the sample notes
with tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.csv', newline='') as tmpfile:
sample_df.to_csv(tmpfile, index=False)
sample_notes_path = tmpfile.name
print(f"✓ Sample patient notes loaded and saved to temp file: {sample_notes_path}")
except Exception as e:
gr.Warning(f"Could not pre-load sample patient notes: {e}")
print(f"✗ Warning: Could not pre-load sample patient notes: {e}")
with gr.Blocks(title="MatchMiner-AI") as demo:
with gr.Row(variant="panel"):
with gr.Column(scale=4):
gr.Markdown("""
# 🏥 MatchMiner-AI
**Clinical Trial Matching Pipeline**
""")
with gr.Column(scale=1):
pass
with gr.Tabs():
# ============= TAB 1: PATIENT INPUT =============
with gr.Tab("1️⃣ Patient Input"):
with gr.Tab("Option A: Upload Clinical Notes"):
with gr.Group():
gr.Markdown("### 📄 Upload Records")
notes_file = gr.File(
label="Upload Patient Notes (CSV or Excel)",
file_types=[".csv", ".xlsx", ".xls"],
value=sample_notes_path
)
# Hidden advanced option
with gr.Accordion("Advanced Options", open=False):
prob_threshold = gr.Slider(
minimum=0.0, maximum=1.0, value=0.1, step=0.05,
label="Tagger Threshold",
info="Probability threshold for including excerpts"
)
process_notes_btn = gr.Button("Process Notes", variant="primary")
with gr.Row():
with gr.Column():
notes_status = gr.Textbox(label="Processing Status", interactive=False)
with gr.Column():
pass # Removed summary button from here
long_note_output = gr.Textbox(
label="Extracted Patient History (Long Note)",
lines=10,
interactive=False
)
process_notes_btn.click(
fn=process_patient_notes,
inputs=[notes_file, prob_threshold],
outputs=[notes_status, long_note_output]
)
with gr.Tab("Option B: Enter Patient Summary"):
gr.Markdown("Enter a patient summary directly (skip note processing)")
# Shared summary fields in a visual group
with gr.Group():
gr.Markdown("### 📝 Patient Summary & Boilerplate")
with gr.Row():
summarize_btn = gr.Button("Summarize Patient History (from Long Note)", variant="secondary")
summarize_status = gr.Textbox(label="Status", interactive=False, scale=2)
show_reasoning_summary_chk = gr.Checkbox(label="Show Reasoning (Chain of Thought)", value=False)
with gr.Row():
patient_summary = gr.Textbox(
label="Patient Summary",
lines=12,
placeholder="Enter or generate patient summary here...",
info="Age, sex, Cancer type, histology, extent, biomarkers, treatment history"
)
patient_boilerplate = gr.Textbox(
label="Patient Boilerplate Text",
lines=12,
placeholder="Mentions of exclusion criteria (brain mets, etc.)",
info="Evidence of potential boilerplate exclusions"
)
# Hidden state to store the raw output including reasoning
raw_summary_state = gr.State("")
# Wire up summarization to output to textboxes and state
summarize_btn.click(
fn=summarize_patient_history,
inputs=[long_note_output],
outputs=[raw_summary_state, summarize_status]
).then(
fn=parse_summary_output,
inputs=[raw_summary_state, show_reasoning_summary_chk],
outputs=[patient_summary, patient_boilerplate]
)
show_reasoning_summary_chk.change(
fn=parse_summary_output,
inputs=[raw_summary_state, show_reasoning_summary_chk],
outputs=[patient_summary, patient_boilerplate]
)
# ============= TAB 2: TRIAL DATABASE =============
with gr.Tab("2️⃣ Trial Database"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("""
### 🗃️ Upload Trial Space Database
Upload a CSV or Excel file containing trial information.
**Required Columns:** `nct_id`, `this_space`, `trial_text`, `trial_boilerplate_text`
**💡 TIP:** For faster loading, use pre-embedded trials defined in `config.py`.
""")
trial_file = gr.File(
label="Upload Trial Database",
file_types=[".csv", ".xlsx", ".xls"]
)
trial_upload_btn = gr.Button("Load and Embed Trials", variant="primary")
trial_status = gr.Textbox(
label="Status",
interactive=False,
value=state.auto_load_status.get("trials", "")
)
with gr.Column(scale=2):
gr.Markdown("### Preview")
trial_preview = gr.Dataframe(
label="Preview (first 10 trials)",
interactive=False,
value=state.trial_preview_df,
wrap=True
)
trial_upload_btn.click(
fn=load_and_embed_trials,
inputs=[trial_file],
outputs=[trial_status, trial_preview]
)
# ============= TAB 3: TRIAL MATCHING =============
with gr.Tab("3️⃣ Trial Matching"):
with gr.Row():
with gr.Column(scale=1):
match_btn = gr.Button("🔍 Find Matching Trials", variant="primary", size="lg")
with gr.Column(scale=3):
# Hidden advanced option for sliders
with gr.Accordion("Search Settings", open=False):
top_k_slider = gr.Slider(
minimum=5, maximum=50, value=20, step=5,
label="Number of Top Trials to Check",
info="How many top-ranked trials to run eligibility checks on"
)
gr.Markdown("### 📊 Results")
with gr.Row():
with gr.Column(scale=7):
results_df = gr.Dataframe(
label="Matched Trials",
interactive=False,
wrap=True,
datatype=["str", "markdown", "markdown", "str", "str"], # Markdown enables colored text/emojis
column_widths=["15%", "15%", "15%", "10%", "45%"]
)
with gr.Column(scale=5):
trial_details = gr.Markdown(
label="Trial Details",
value="<div style='text-align: center; padding: 50px; color: #666;'>👈 Click on a trial row to see full details here</div>"
)
gr.Markdown("---")
gr.Markdown("### 🧠 Deeper Screen (LLM)")
with gr.Row():
deeper_screen_btn = gr.Button("Run Deeper Screen", variant="secondary")
show_reasoning_chk = gr.Checkbox(label="Show Chain of Thought", value=False)
with gr.Accordion("Trial Checker Reasoning", open=True):
trial_reasoning_output = gr.Textbox(show_label=False, lines=8, placeholder="Run deeper screen to see reasoning...")
with gr.Accordion("Boilerplate Checker Reasoning", open=True):
bp_reasoning_output = gr.Textbox(show_label=False, lines=8, placeholder="Run deeper screen to see reasoning...")
# Hidden states
trial_data_state = gr.State({})
deeper_screen_raw_state = gr.State({})
# Wire up matching
match_btn.click(
fn=match_trials,
inputs=[patient_summary, patient_boilerplate, top_k_slider],
outputs=[results_df]
)
results_df.select(
fn=get_trial_details,
inputs=[results_df],
outputs=[trial_details, trial_data_state]
)
deeper_screen_btn.click(
fn=run_deeper_screen,
inputs=[trial_data_state, patient_summary, patient_boilerplate, show_reasoning_chk],
outputs=[trial_reasoning_output, bp_reasoning_output, deeper_screen_raw_state]
)
show_reasoning_chk.change(
fn=update_deeper_screen_display,
inputs=[deeper_screen_raw_state, show_reasoning_chk],
outputs=[trial_reasoning_output, bp_reasoning_output]
)
# ============= TAB 4: TRIAL SPACE EXTRACTION =============
with gr.Tab("4️⃣ Trial Space Extraction"):
gr.Markdown("""
### 🧬 Extract Trial Spaces
Paste clinical trial text (title + summary + eligibility) to extract structured spaces.
""")
with gr.Row():
with gr.Column():
trial_text_input = gr.Textbox(
label="Clinical Trial Text",
placeholder="Paste text from ClinicalTrials.gov...",
lines=15,
)
extract_btn = gr.Button("Extract Trial Spaces", variant="primary")
with gr.Column():
trial_spaces_output = gr.Textbox(
label="Extracted Results",
lines=15,
interactive=False
)
extract_btn.click(
fn=extract_trial_spaces,
inputs=[trial_text_input],
outputs=[trial_spaces_output]
)
# ============= TAB 5: NOTE TAGGING =============
with gr.Tab("5️⃣ Note Tagging"):
gr.Markdown("""
### 🏷️ LLM Note Tagging
Upload patient notes, select a note, and use the LLM to extract structured tags.
""")
with gr.Row():
with gr.Column(scale=1):
tagging_file = gr.File(
label="Upload Notes (CSV/Excel)",
file_types=[".csv", ".xlsx", ".xls"],
value=sample_notes_path
)
load_tagging_btn = gr.Button("Load Notes", variant="secondary")
tagging_status = gr.Textbox(label="Status", interactive=False)
gr.Markdown("### 📋 Select Note")
tagging_notes_df = gr.Dataframe(
label="Available Notes (Click to Select)",
interactive=False,
wrap=True
)
with gr.Column(scale=1):
selected_note_text = gr.Textbox(
label="Selected Note Text",
lines=10,
placeholder="Select a note from the table on the left..."
)
tag_note_btn = gr.Button("🏷️ Tag Note with LLM", variant="primary")
show_tagging_reasoning_chk = gr.Checkbox(label="Show Reasoning (Chain of Thought)", value=False)
tagging_output = gr.Textbox(
label="Tagging Output (JSON)",
lines=15,
interactive=False
)
# Hidden state to store the raw output including reasoning
raw_tagging_output = gr.State("")
# Wire up tagging tab
load_tagging_btn.click(
fn=load_notes_for_tagging,
inputs=[tagging_file],
outputs=[tagging_notes_df, tagging_status]
)
# Also load on startup if file is present
if sample_notes_path:
demo.load(
fn=load_notes_for_tagging,
inputs=[tagging_file],
outputs=[tagging_notes_df, tagging_status]
)
tagging_notes_df.select(
fn=select_note_for_tagging,
inputs=[tagging_notes_df],
outputs=[selected_note_text]
)
tag_note_btn.click(
fn=tag_patient_note,
inputs=[selected_note_text, show_tagging_reasoning_chk],
outputs=[tagging_output, raw_tagging_output]
)
show_tagging_reasoning_chk.change(
fn=toggle_reasoning_display,
inputs=[raw_tagging_output, show_tagging_reasoning_chk],
outputs=[tagging_output]
)
# ============= TAB 6: MODEL CONFIGURATION =============
with gr.Tab("6️⃣ Model Configuration"):
gr.Markdown("### 🧠 Model Management")
status_msg = """
**Config file detected** - Models will auto-load on startup.
""" if HAS_CONFIG else """
**No config file found** - Please load models manually below.
"""
gr.Info(status_msg)
with gr.Group():
with gr.Row():
with gr.Column():
tagger_input = gr.Textbox(label="TinyBERT Tagger Model", placeholder="prajjwal1/bert-tiny")
tagger_btn = gr.Button("Load Tagger")
tagger_status = gr.Textbox(label="Status", interactive=False, value=state.auto_load_status.get("tagger", ""), elem_classes=["model-status"])
with gr.Column():
embedder_input = gr.Textbox(label="Trial Space Embedder", placeholder="Qwen/Qwen3-Embedding-0.6B")
embedder_btn = gr.Button("Load Embedder")
embedder_status = gr.Textbox(label="Status", interactive=False, value=state.auto_load_status.get("embedder", ""), elem_classes=["model-status"])
embedder_warning = gr.Textbox(visible=False)
with gr.Group():
with gr.Row():
with gr.Column():
llm_input = gr.Textbox(label="LLM Model (Summarization)", placeholder="openai/gpt-oss-120b")
llm_btn = gr.Button("Load LLM")
llm_status = gr.Textbox(label="Status", interactive=False, value=state.auto_load_status.get("llm", ""), elem_classes=["model-status"])
with gr.Column():
trial_checker_input = gr.Textbox(label="Trial Checker Model", placeholder="answerdotai/ModernBERT-large")
trial_checker_btn = gr.Button("Load Trial Checker")
trial_checker_status = gr.Textbox(label="Status", interactive=False, value=state.auto_load_status.get("trial_checker", ""), elem_classes=["model-status"])
with gr.Row():
with gr.Column(scale=1):
boilerplate_checker_input = gr.Textbox(label="Boilerplate Checker Model", placeholder="answerdotai/ModernBERT-large")
boilerplate_checker_btn = gr.Button("Load Boilerplate Checker")
boilerplate_checker_status = gr.Textbox(label="Status", interactive=False, value=state.auto_load_status.get("boilerplate_checker", ""), elem_classes=["model-status"])
with gr.Column(scale=1):
pass
# Wire up model loading
tagger_btn.click(fn=load_tagger_model, inputs=[tagger_input], outputs=[tagger_status, gr.Textbox(visible=False)])
embedder_btn.click(fn=load_embedder_model, inputs=[embedder_input], outputs=[embedder_status, gr.Textbox(visible=False), embedder_warning])
llm_btn.click(fn=load_llm_model, inputs=[llm_input], outputs=[llm_status, gr.Textbox(visible=False)])
trial_checker_btn.click(fn=load_trial_checker, inputs=[trial_checker_input], outputs=[trial_checker_status, gr.Textbox(visible=False)])
boilerplate_checker_btn.click(fn=load_boilerplate_checker, inputs=[boilerplate_checker_input], outputs=[boilerplate_checker_status, gr.Textbox(visible=False)])
return demo
# ============================================================================
# MAIN
# ============================================================================
if __name__ == "__main__":
print(f"Device: {state.device}")
print(f"GPU Available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"GPU Count: {torch.cuda.device_count()}")
# Auto-load models from config if available
if HAS_CONFIG:
auto_load_models_from_config()
# Auto-load trials after embedder is ready
if state.embedder_model is not None or (hasattr(config, 'PREEMBEDDED_TRIALS') and config.PREEMBEDDED_TRIALS):
auto_load_trials_from_config()
# Custom theme and CSS for a cleaner, modern look
theme = gr.themes.Soft(
primary_hue="blue",
secondary_hue="slate",
).set(
body_background_fill="*neutral_50",
block_background_fill="white",
block_border_width="1px",
block_label_background_fill="*primary_50",
)
custom_css = """
.gradio-container { font-family: 'Inter', Arial, sans-serif !important; }
.model-status { min-height: 80px !important; font-size: 0.9em; }
.status-box { background: #f9fafb; border: 1px solid #e5e7eb; border-radius: 8px; padding: 10px; }
h1 { color: #1e3a8a; }
"""
demo = create_interface()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
theme=theme,
css=custom_css
)