VictorLJZ commited on
Commit
d5baad4
·
1 Parent(s): 752b42b

working RAG implementation

Browse files
main.py CHANGED
@@ -129,8 +129,7 @@ if __name__ == "__main__":
129
  # Example: initialize with only specific tools
130
  # Here three tools are commented out, you can uncomment them to use them
131
  selected_tools = [
132
- "ImageVisualizerTool", # For displaying images in the UI
133
- "WebBrowserTool", # For web browsing and search capabilities
134
  # "DicomProcessorTool", # For processing DICOM medical image files
135
  # "TorchXRayVisionClassifierTool", # For classifying chest X-ray images using TorchXRayVision
136
  # "ArcPlusClassifierTool", # For advanced chest X-ray classification using ArcPlus
@@ -140,20 +139,23 @@ if __name__ == "__main__":
140
  # "LlavaMedTool", # For multimodal medical image understanding
141
  # "XRayPhraseGroundingTool", # For locating described features in X-rays
142
  # "ChestXRayGeneratorTool", # For generating synthetic chest X-rays
143
- # "MedicalRAGTool", # For retrieval-augmented generation with medical knowledge
 
144
  ]
145
 
146
  # Configure the Retrieval Augmented Generation (RAG) system
147
  # This allows the agent to access and use medical knowledge documents
148
  rag_config = RAGConfig(
149
- model="embed-v4.0", # Set COHERE_API_KEY in .env
150
- temperature=0.7,
 
151
  pinecone_index_name="medrax", # Name for the Pinecone index
152
- chunk_size=1000,
153
- chunk_overlap=100,
154
- retriever_k=3,
155
  local_docs_dir="medrax/rag/docs", # Change this to the path of the documents for RAG
156
- use_medrag_textbooks=True, # Set to True if you want to use the MedRAG textbooks dataset
 
157
  )
158
 
159
  # Prepare any additional model-specific kwargs
@@ -173,7 +175,7 @@ if __name__ == "__main__":
173
  model_dir="/model-weights",
174
  temp_dir="temp", # Change this to the path of the temporary directory
175
  device="cuda",
176
- model="gpt-4.1-2025-04-14", # Change this to the model you want to use, e.g. gpt-4.1-2025-04-14, gemini-2.5-pro
177
  temperature=0.7,
178
  top_p=0.95,
179
  model_kwargs=model_kwargs,
 
129
  # Example: initialize with only specific tools
130
  # Here three tools are commented out, you can uncomment them to use them
131
  selected_tools = [
132
+ # "ImageVisualizerTool", # For displaying images in the UI
 
133
  # "DicomProcessorTool", # For processing DICOM medical image files
134
  # "TorchXRayVisionClassifierTool", # For classifying chest X-ray images using TorchXRayVision
135
  # "ArcPlusClassifierTool", # For advanced chest X-ray classification using ArcPlus
 
139
  # "LlavaMedTool", # For multimodal medical image understanding
140
  # "XRayPhraseGroundingTool", # For locating described features in X-rays
141
  # "ChestXRayGeneratorTool", # For generating synthetic chest X-rays
142
+ "WebBrowserTool", # For web browsing and search capabilities
143
+ "MedicalRAGTool", # For retrieval-augmented generation with medical knowledge
144
  ]
145
 
146
  # Configure the Retrieval Augmented Generation (RAG) system
147
  # This allows the agent to access and use medical knowledge documents
148
  rag_config = RAGConfig(
149
+ model="command-r-plus", # Chat model for generating responses
150
+ embedding_model="embed-v4.0", # Set COHERE_API_KEY in .env
151
+ temperature=0.3,
152
  pinecone_index_name="medrax", # Name for the Pinecone index
153
+ chunk_size=1500,
154
+ chunk_overlap=300,
155
+ retriever_k=7,
156
  local_docs_dir="medrax/rag/docs", # Change this to the path of the documents for RAG
157
+ huggingface_datasets=["VictorLJZ/medrax"], # List of HuggingFace datasets to load
158
+ dataset_split="train", # Which split of the datasets to use
159
  )
160
 
161
  # Prepare any additional model-specific kwargs
 
175
  model_dir="/model-weights",
176
  temp_dir="temp", # Change this to the path of the temporary directory
177
  device="cuda",
178
+ model="gpt-4o", # Change this to the model you want to use, e.g. gpt-4.1-2025-04-14, gemini-2.5-pro
179
  temperature=0.7,
180
  top_p=0.95,
181
  model_kwargs=model_kwargs,
medrax/models/model_factory.py CHANGED
@@ -115,9 +115,16 @@ class ModelFactory:
115
 
116
  # Strip the provider prefix from the model name
117
  # For example, 'openrouter-anthropic/claude-sonnet-4' becomes 'anthropic/claude-sonnet-4'
 
118
  actual_model_name = model_name
119
- if model_name.startswith(f"{provider_prefix}-"):
120
  actual_model_name = model_name[len(provider_prefix)+1:]
 
 
 
 
 
 
121
 
122
  # Create and return the model instance
123
  return model_class(
 
115
 
116
  # Strip the provider prefix from the model name
117
  # For example, 'openrouter-anthropic/claude-sonnet-4' becomes 'anthropic/claude-sonnet-4'
118
+ # But for OpenAI models like 'gpt-4o', we keep the full name since 'gpt-' is part of the model name
119
  actual_model_name = model_name
120
+ if provider_prefix in ["openrouter"] and model_name.startswith(f"{provider_prefix}-"):
121
  actual_model_name = model_name[len(provider_prefix)+1:]
122
+ elif provider_prefix in ["gpt", "chatgpt"]:
123
+ # For OpenAI models, use the full model name (gpt-4o, gpt-3.5-turbo, etc.)
124
+ actual_model_name = model_name
125
+ elif provider_prefix == "gemini" and model_name.startswith("gemini-"):
126
+ # For Gemini models, use the full model name (gemini-1.5-pro, etc.)
127
+ actual_model_name = model_name
128
 
129
  # Create and return the model instance
130
  return model_class(
medrax/rag/rag.py CHANGED
@@ -3,6 +3,10 @@ import time
3
  from pathlib import Path
4
  from typing import List, Optional, Dict, Any
5
  from pydantic import Field
 
 
 
 
6
 
7
  from pydantic import BaseModel, Field
8
  from langchain_cohere import ChatCohere, CohereEmbeddings, CohereRerank
@@ -19,6 +23,70 @@ from datasets import load_dataset
19
  from tqdm import tqdm
20
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class RAGConfig(BaseModel):
23
  """Configuration class for RAG (Retrieval Augmented Generation) system.
24
 
@@ -32,7 +100,8 @@ class RAGConfig(BaseModel):
32
  chunk_size (int): Size of text chunks for splitting
33
  chunk_overlap (int): Overlap between text chunks
34
  local_docs_dir (str): Directory for text files
35
- use_medrag_textbooks (bool): Whether to use MedRAG textbooks dataset
 
36
  """
37
 
38
  model: str = Field(default="command-a-03-2025")
@@ -44,7 +113,8 @@ class RAGConfig(BaseModel):
44
  chunk_size: int = Field(default=1000)
45
  chunk_overlap: int = Field(default=200)
46
  local_docs_dir: str = Field(default="medrax/rag/docs")
47
- use_medrag_textbooks: bool = Field(default=True)
 
48
 
49
 
50
  class RerankingRetriever(BaseRetriever):
@@ -191,49 +261,69 @@ class CohereRAG:
191
  )
192
 
193
  # Check if the index is empty and needs to be populated
194
- index_description = self.pinecone.describe_index(self.index_name)
195
- stats = index_description.stats
196
-
197
- if not stats or stats.get('total_vector_count', 0) == 0:
198
- print("Index is empty. Populating with documents...")
199
- documents = self._load_all_documents()
200
- if documents:
201
- total_docs = len(documents)
202
- print(
203
- f"Adding {total_docs} documents to the index. This may take a while..."
204
- )
205
-
206
- # Batching mechanism to handle rate limits
207
- batch_size = 50 # Process 50 documents per batch
208
- for i in tqdm(
209
- range(0, total_docs, batch_size),
210
- desc="Adding documents to Pinecone",
211
- ):
212
- batch = documents[i : i + batch_size]
213
- vectorstore.add_documents(batch)
214
- if (i + batch_size) < total_docs:
215
- print(
216
- f"Batch {i // batch_size + 1} added. Waiting 60 seconds to avoid rate limiting..."
217
- )
218
- time.sleep(60)
219
-
220
- print("Documents added successfully.")
 
 
 
 
 
 
 
 
 
 
 
221
  else:
222
- print("Warning: No documents found to add to the new index.")
223
- else:
224
- print("Index already populated.")
 
 
225
 
226
  return vectorstore
227
 
228
  def _load_all_documents(self) -> List[Document]:
229
  """Collect documents from all enabled sources."""
230
  all_documents = []
231
- if self.config.use_medrag_textbooks:
232
- print("Loading documents from MedRAG textbooks...")
233
- medrag_docs = self.load_medrag_textbooks()
234
- all_documents.extend(medrag_docs)
235
- print(f"Loaded {len(medrag_docs)} documents from MedRAG textbooks")
236
-
 
 
 
 
 
 
 
237
  if os.path.exists(self.local_docs_dir):
238
  print(f"Loading documents from local directory: {self.local_docs_dir}")
239
  local_docs = self.load_directory(self.local_docs_dir)
@@ -295,37 +385,257 @@ class CohereRAG:
295
 
296
  return RetrievalQA.from_chain_type(**chain_kwargs)
297
 
298
- def load_medrag_textbooks(self) -> List[Document]:
299
- """Load MedRAG textbooks dataset from Hugging Face.
 
 
 
 
300
 
301
  Returns:
302
- List[Document]: List of processed documents from MedRAG textbooks
303
 
304
  Raises:
305
  ValueError: If unable to load the dataset
306
  """
307
  try:
308
- print("Loading MedRAG textbooks dataset...")
309
- dataset = load_dataset("MedRAG/textbooks", split="train")
310
- documents = []
311
-
312
- for item in tqdm(
313
- dataset, desc="Processing MedRAG textbooks", total=len(dataset), unit="chunk"
314
- ):
315
- # Create a Document object for each textbook snippet
316
- doc = Document(
317
- page_content=item["content"],
318
- metadata={
319
- "source": f"MedRAG/textbooks",
320
- "id": item["id"],
321
- "title": item["title"],
322
- },
323
- )
324
- documents.append(doc)
325
-
326
- print(f"Loaded {len(documents)} document chunks from MedRAG textbooks")
327
- return documents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
 
329
  except Exception as e:
330
- print(f"Error loading MedRAG textbooks: {str(e)}")
331
- raise ValueError(f"Failed to load MedRAG textbooks dataset: {str(e)}")
 
3
  from pathlib import Path
4
  from typing import List, Optional, Dict, Any
5
  from pydantic import Field
6
+ import gc
7
+ import sys
8
+ import signal
9
+ from contextlib import contextmanager
10
 
11
  from pydantic import BaseModel, Field
12
  from langchain_cohere import ChatCohere, CohereEmbeddings, CohereRerank
 
23
  from tqdm import tqdm
24
 
25
 
26
+ class TimeoutError(Exception):
27
+ """Custom timeout exception"""
28
+ pass
29
+
30
+
31
+ @contextmanager
32
+ def timeout(seconds):
33
+ """Context manager for timing out operations"""
34
+ def timeout_handler(signum, frame):
35
+ raise TimeoutError(f"Operation timed out after {seconds} seconds")
36
+
37
+ # Set the signal handler
38
+ old_handler = signal.signal(signal.SIGALRM, timeout_handler)
39
+ signal.alarm(seconds)
40
+
41
+ try:
42
+ yield
43
+ finally:
44
+ # Restore the old signal handler
45
+ signal.signal(signal.SIGALRM, old_handler)
46
+ signal.alarm(0)
47
+
48
+
49
+ def safe_extract_pdf_text(pdf_data, timeout_seconds=180):
50
+ """Safely extract text from PDF with timeout and fallback handling"""
51
+ try:
52
+ with timeout(timeout_seconds):
53
+ if isinstance(pdf_data, str):
54
+ return pdf_data, {"file_type": "pdf"}
55
+ elif hasattr(pdf_data, 'pages'):
56
+ # pdfplumber PDF object
57
+ pdf_text = ""
58
+ num_pages = len(pdf_data.pages)
59
+
60
+ for page in pdf_data.pages:
61
+ text = page.extract_text()
62
+ if text:
63
+ pdf_text += text + "\n"
64
+
65
+ # Close the PDF to free resources
66
+ pdf_data.close()
67
+
68
+ return pdf_text.strip(), {"file_type": "pdf", "num_pages": num_pages}
69
+ else:
70
+ return None, None
71
+
72
+ except TimeoutError:
73
+ print(f"PDF processing timed out after {timeout_seconds} seconds")
74
+ try:
75
+ if hasattr(pdf_data, 'close'):
76
+ pdf_data.close()
77
+ except:
78
+ pass
79
+ return None, None
80
+ except Exception as e:
81
+ print(f"Error in PDF processing: {str(e)}")
82
+ try:
83
+ if hasattr(pdf_data, 'close'):
84
+ pdf_data.close()
85
+ except:
86
+ pass
87
+ return None, None
88
+
89
+
90
  class RAGConfig(BaseModel):
91
  """Configuration class for RAG (Retrieval Augmented Generation) system.
92
 
 
100
  chunk_size (int): Size of text chunks for splitting
101
  chunk_overlap (int): Overlap between text chunks
102
  local_docs_dir (str): Directory for text files
103
+ huggingface_datasets (List[str]): List of HuggingFace dataset names to load (e.g., ["MedRAG/textbooks", "VictorLJZ/medrax"])
104
+ dataset_split (str): Split to use for HuggingFace datasets (default: "train")
105
  """
106
 
107
  model: str = Field(default="command-a-03-2025")
 
113
  chunk_size: int = Field(default=1000)
114
  chunk_overlap: int = Field(default=200)
115
  local_docs_dir: str = Field(default="medrax/rag/docs")
116
+ huggingface_datasets: List[str] = Field(default_factory=lambda: ["MedRAG/textbooks"])
117
+ dataset_split: str = Field(default="train")
118
 
119
 
120
  class RerankingRetriever(BaseRetriever):
 
261
  )
262
 
263
  # Check if the index is empty and needs to be populated
264
+ try:
265
+ # Get the index object directly
266
+ index = self.pinecone.Index(self.index_name)
267
+
268
+ # Get index stats
269
+ stats = index.describe_index_stats()
270
+ print(f"Index stats: {stats}")
271
+
272
+ total_vectors = stats.get('total_vector_count', 0)
273
+ print(f"Total vectors in index: {total_vectors}")
274
+
275
+ if total_vectors == 0:
276
+ print("Index is empty. Populating with documents...")
277
+ documents = self._load_all_documents()
278
+ if documents:
279
+ total_docs = len(documents)
280
+ print(
281
+ f"Adding {total_docs} documents to the index. This may take a while..."
282
+ )
283
+
284
+ # Batching mechanism to handle rate limits
285
+ batch_size = 50 # Process 50 documents per batch
286
+ for i in tqdm(
287
+ range(0, total_docs, batch_size),
288
+ desc="Adding documents to Pinecone",
289
+ ):
290
+ batch = documents[i : i + batch_size]
291
+ vectorstore.add_documents(batch)
292
+ # Removed rate limiting - process as fast as possible
293
+
294
+ print("Documents added successfully.")
295
+
296
+ # Verify documents were added
297
+ final_stats = index.describe_index_stats()
298
+ final_count = final_stats.get('total_vector_count', 0)
299
+ print(f"Final vector count after adding documents: {final_count}")
300
+ else:
301
+ print("Warning: No documents found to add to the new index.")
302
  else:
303
+ print(f"Index already populated with {total_vectors} vectors.")
304
+
305
+ except Exception as e:
306
+ print(f"Error checking index stats: {e}")
307
+ print("Proceeding without stats check...")
308
 
309
  return vectorstore
310
 
311
  def _load_all_documents(self) -> List[Document]:
312
  """Collect documents from all enabled sources."""
313
  all_documents = []
314
+
315
+ # Load HuggingFace datasets
316
+ for dataset_name in self.config.huggingface_datasets:
317
+ print(f"Loading documents from HuggingFace dataset: {dataset_name}...")
318
+ try:
319
+ hf_docs = self.load_huggingface_dataset(dataset_name, self.config.dataset_split)
320
+ all_documents.extend(hf_docs)
321
+ print(f"Loaded {len(hf_docs)} documents from {dataset_name}")
322
+ except Exception as e:
323
+ print(f"Error loading dataset {dataset_name}: {str(e)}")
324
+ continue
325
+
326
+ # Load local documents
327
  if os.path.exists(self.local_docs_dir):
328
  print(f"Loading documents from local directory: {self.local_docs_dir}")
329
  local_docs = self.load_directory(self.local_docs_dir)
 
385
 
386
  return RetrievalQA.from_chain_type(**chain_kwargs)
387
 
388
+ def load_huggingface_dataset(self, dataset_name: str, split: str = "train") -> List[Document]:
389
+ """Load dataset from Hugging Face with batch processing for memory efficiency.
390
+
391
+ Args:
392
+ dataset_name (str): Name of the dataset on Hugging Face (e.g., "MedRAG/textbooks", "VictorLJZ/medrax")
393
+ split (str): Dataset split to load (default: "train")
394
 
395
  Returns:
396
+ List[Document]: List of processed documents from the dataset
397
 
398
  Raises:
399
  ValueError: If unable to load the dataset
400
  """
401
  try:
402
+ print(f"Loading {dataset_name} dataset from Hugging Face...")
403
+
404
+ # Special handling for PDF-heavy datasets
405
+ is_pdf_dataset = "medrax" in dataset_name.lower()
406
+ batch_size = 20 if is_pdf_dataset else 100 # Smaller batches for PDF datasets
407
+
408
+ # Load dataset
409
+ dataset = load_dataset(dataset_name, split=split, trust_remote_code=True)
410
+
411
+ # Configure text splitter
412
+ text_splitter = RecursiveCharacterTextSplitter(
413
+ chunk_size=self.config.chunk_size,
414
+ chunk_overlap=self.config.chunk_overlap,
415
+ length_function=len,
416
+ )
417
+
418
+ # Track progress
419
+ successfully_processed = 0
420
+ failed_items = 0
421
+ timeout_failures = 0
422
+ extraction_failures = 0
423
+ failed_pdfs = [] # Track details of failed PDFs
424
+ all_documents = []
425
+ total_items = len(dataset)
426
+
427
+ print(f"Processing {total_items} items in batches of {batch_size}...")
428
+ print(f"PDF processing timeout set to 30 seconds per PDF")
429
+
430
+ # Process in batches
431
+ for batch_start in range(0, total_items, batch_size):
432
+ batch_end = min(batch_start + batch_size, total_items)
433
+ batch_num = (batch_start // batch_size) + 1
434
+ total_batches = (total_items + batch_size - 1) // batch_size
435
+
436
+ print(f"\nProcessing batch {batch_num}/{total_batches} (items {batch_start}-{batch_end-1})...")
437
+
438
+ batch_documents = []
439
+ batch_failed = 0
440
+
441
+ # Process items in this batch
442
+ for idx in range(batch_start, batch_end):
443
+ try:
444
+ item = dataset[idx]
445
+ content = None
446
+ item_metadata = {
447
+ "source": f"{dataset_name}",
448
+ "dataset": dataset_name,
449
+ "split": split,
450
+ "item_index": idx,
451
+ }
452
+
453
+ # Extract identifying information for better tracking
454
+ item_identifier = f"Index {idx}"
455
+ if "title" in item:
456
+ item_identifier += f" - Title: {item['title']}"
457
+ elif "id" in item:
458
+ item_identifier += f" - ID: {item['id']}"
459
+ elif "name" in item:
460
+ item_identifier += f" - Name: {item['name']}"
461
+ elif "filename" in item:
462
+ item_identifier += f" - File: {item['filename']}"
463
+
464
+ # Try to extract first few words from content as identifier
465
+ content_preview = None
466
+ if "content" in item:
467
+ content_preview = item["content"]
468
+ elif "text" in item:
469
+ content_preview = item["text"]
470
+
471
+ if content_preview and len(str(content_preview).split()) > 0:
472
+ item_identifier += f" - Preview: {str(content_preview).split()[:5]}..."
473
+
474
+ print(f"Processing item {item_identifier}...")
475
+
476
+ # Handle different dataset structures
477
+ if "content" in item:
478
+ content = item["content"]
479
+ elif "text" in item:
480
+ content = item["text"]
481
+ elif "pdf" in item and item["pdf"] is not None:
482
+ # Use safe PDF extraction with timeout
483
+ pdf_data = item["pdf"]
484
+
485
+ print(f"Processing PDF at index {idx}...")
486
+ content, pdf_metadata = safe_extract_pdf_text(pdf_data, timeout_seconds=180)
487
+
488
+ # Try to get a content preview for identification
489
+ if content and len(content.strip()) > 0:
490
+ preview_words = content.strip().split()[:8]
491
+ content_preview = " ".join(preview_words) + "..." if len(preview_words) >= 8 else " ".join(preview_words)
492
+ item_identifier += f" - Content: {content_preview}"
493
+
494
+ if content is None:
495
+ print(f"Failed to extract text from PDF at index {idx} - skipping")
496
+ failed_items += 1
497
+ batch_failed += 1
498
+ timeout_failures += 1
499
+ failed_pdfs.append({
500
+ "index": idx,
501
+ "identifier": item_identifier,
502
+ "reason": "timeout",
503
+ "details": "PDF processing timed out after 180 seconds"
504
+ })
505
+ continue
506
+
507
+ if not content.strip():
508
+ print(f"Warning: No text extracted from PDF at index {idx} - skipping")
509
+ failed_items += 1
510
+ batch_failed += 1
511
+ extraction_failures += 1
512
+ failed_pdfs.append({
513
+ "index": idx,
514
+ "identifier": item_identifier,
515
+ "reason": "no_text",
516
+ "details": "No text could be extracted from PDF"
517
+ })
518
+ continue
519
+
520
+ # Add PDF metadata
521
+ if pdf_metadata:
522
+ item_metadata.update(pdf_metadata)
523
+ else:
524
+ # Try to find text fields
525
+ text_fields = [k for k in item.keys() if isinstance(item[k], str) and len(str(item[k])) > 50]
526
+ if text_fields:
527
+ content = item[text_fields[0]]
528
+ else:
529
+ print(f"Warning: Could not find text content in item {idx}")
530
+ failed_items += 1
531
+ batch_failed += 1
532
+ extraction_failures += 1
533
+ failed_pdfs.append({
534
+ "index": idx,
535
+ "identifier": item_identifier,
536
+ "reason": "no_content",
537
+ "details": f"No text fields found in item keys: {list(item.keys())}"
538
+ })
539
+ continue
540
+
541
+ if not content:
542
+ failed_items += 1
543
+ batch_failed += 1
544
+ failed_pdfs.append({
545
+ "index": idx,
546
+ "identifier": item_identifier,
547
+ "reason": "empty_content",
548
+ "details": "Content was empty after processing"
549
+ })
550
+ continue
551
+
552
+ # Add additional metadata
553
+ for key, value in item.items():
554
+ if key not in ["content", "text", "pdf"] and isinstance(value, (str, int, float, bool)):
555
+ item_metadata[key] = value
556
+
557
+ # Split long documents into chunks
558
+ if len(content) > self.config.chunk_size * 1.5:
559
+ temp_doc = Document(page_content=content, metadata=item_metadata)
560
+ split_docs = text_splitter.split_documents([temp_doc])
561
+
562
+ for i, doc in enumerate(split_docs):
563
+ doc.metadata["chunk_index"] = i
564
+ doc.metadata["total_chunks"] = len(split_docs)
565
+
566
+ batch_documents.extend(split_docs)
567
+ else:
568
+ doc = Document(
569
+ page_content=content,
570
+ metadata=item_metadata,
571
+ )
572
+ batch_documents.append(doc)
573
+
574
+ successfully_processed += 1
575
+
576
+ except Exception as e:
577
+ print(f"Error processing item {idx}: {str(e)}")
578
+ failed_items += 1
579
+ batch_failed += 1
580
+ failed_pdfs.append({
581
+ "index": idx,
582
+ "identifier": item_identifier,
583
+ "reason": "exception",
584
+ "details": f"Exception during processing: {str(e)}"
585
+ })
586
+ continue
587
+
588
+ # Add batch documents to all documents
589
+ all_documents.extend(batch_documents)
590
+ batch_success = len(range(batch_start, batch_end)) - batch_failed
591
+ print(f"Batch {batch_num} complete: {len(batch_documents)} documents created from {batch_success} successful PDFs")
592
+ if batch_failed > 0:
593
+ print(f"Batch {batch_num} failures: {batch_failed} PDFs failed to process")
594
+
595
+ # Memory cleanup after each batch
596
+ if is_pdf_dataset:
597
+ print("Performing memory cleanup...")
598
+ del batch_documents
599
+ gc.collect()
600
+
601
+ # Give a brief pause for system cleanup
602
+ time.sleep(1)
603
+
604
+ # Report memory usage
605
+ if hasattr(sys, 'getsizeof'):
606
+ try:
607
+ mem_mb = sys.getsizeof(all_documents) / 1024 / 1024
608
+ print(f"Current document memory usage: {mem_mb:.1f} MB")
609
+ except:
610
+ pass
611
+
612
+ print(f"\n{'='*60}")
613
+ print(f"Dataset processing complete!")
614
+ print(f"{'='*60}")
615
+ print(f"Total documents created: {len(all_documents)}")
616
+ print(f"Successfully processed: {successfully_processed} items")
617
+ print(f"Failed items: {failed_items}")
618
+ print(f" - Timeout failures: {timeout_failures}")
619
+ print(f" - Extraction failures: {extraction_failures}")
620
+ print(f" - Other failures: {failed_items - timeout_failures - extraction_failures}")
621
+ print(f"Success rate: {successfully_processed/(successfully_processed + failed_items)*100:.1f}%")
622
+
623
+ # Display details of failed PDFs
624
+ if failed_pdfs:
625
+ print(f"\n{'='*60}")
626
+ print(f"FAILED PDF DETAILS:")
627
+ print(f"{'='*60}")
628
+ for i, failed_pdf in enumerate(failed_pdfs, 1):
629
+ print(f"{i}. {failed_pdf['identifier']}")
630
+ print(f" Reason: {failed_pdf['reason'].upper()}")
631
+ print(f" Details: {failed_pdf['details']}")
632
+ if i < len(failed_pdfs):
633
+ print()
634
+
635
+ print(f"{'='*60}")
636
+
637
+ return all_documents
638
 
639
  except Exception as e:
640
+ print(f"Error loading {dataset_name}: {str(e)}")
641
+ raise ValueError(f"Failed to load dataset {dataset_name}: {str(e)}")
medrax/tools/rag.py CHANGED
@@ -1,6 +1,7 @@
1
  from langchain.tools import BaseTool
2
  from medrax.rag.rag import RAGConfig, CohereRAG
3
  from langchain.chains import RetrievalQA
 
4
 
5
 
6
  class RAGTool(BaseTool):
@@ -34,26 +35,56 @@ class RAGTool(BaseTool):
34
  self.rag = CohereRAG(config)
35
  self.chain = self.rag.initialize_rag(with_memory=True)
36
 
37
- def _run(self, query: str) -> str:
38
  """Execute the RAG tool with the given query.
39
 
40
  Args:
41
  query (str): Medical question to answer
42
 
43
  Returns:
44
- str: Generated answer from the RAG system
45
  """
46
- result = self.chain.invoke({"query": query})
47
- return result["result"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- async def _arun(self, query: str) -> str:
50
  """Async version of _run.
51
 
52
  Args:
53
  query (str): Medical question to answer
54
 
55
  Returns:
56
- str: Generated answer from the RAG system
57
 
58
  Raises:
59
  NotImplementedError: Async not implemented yet
 
1
  from langchain.tools import BaseTool
2
  from medrax.rag.rag import RAGConfig, CohereRAG
3
  from langchain.chains import RetrievalQA
4
+ from typing import Dict, Tuple, Any
5
 
6
 
7
  class RAGTool(BaseTool):
 
35
  self.rag = CohereRAG(config)
36
  self.chain = self.rag.initialize_rag(with_memory=True)
37
 
38
+ def _run(self, query: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
39
  """Execute the RAG tool with the given query.
40
 
41
  Args:
42
  query (str): Medical question to answer
43
 
44
  Returns:
45
+ Tuple[Dict[str, Any], Dict[str, Any]]: Output dictionary and metadata dictionary
46
  """
47
+ try:
48
+ result = self.chain.invoke({"query": query})
49
+
50
+ output = {
51
+ "answer": result["result"],
52
+ "source_documents": [
53
+ {
54
+ "content": doc.page_content,
55
+ "metadata": doc.metadata
56
+ }
57
+ for doc in result.get("source_documents", [])
58
+ ]
59
+ }
60
+
61
+ metadata = {
62
+ "query": query,
63
+ "analysis_status": "completed",
64
+ "num_sources": len(result.get("source_documents", [])),
65
+ "model": self.rag.config.model,
66
+ "embedding_model": self.rag.config.embedding_model,
67
+ }
68
+
69
+ return output, metadata
70
+
71
+ except Exception as e:
72
+ output = {"error": str(e)}
73
+ metadata = {
74
+ "query": query,
75
+ "analysis_status": "failed",
76
+ "error_details": str(e),
77
+ }
78
+ return output, metadata
79
 
80
+ async def _arun(self, query: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
81
  """Async version of _run.
82
 
83
  Args:
84
  query (str): Medical question to answer
85
 
86
  Returns:
87
+ Tuple[Dict[str, Any], Dict[str, Any]]: Output dictionary and metadata dictionary
88
 
89
  Raises:
90
  NotImplementedError: Async not implemented yet
pyproject.toml CHANGED
@@ -26,6 +26,8 @@ dependencies = [
26
  "pandas>=1.3.0",
27
  "pydantic>=1.8.0",
28
  "Pillow>=8.0.0",
 
 
29
  "torchxrayvision>=0.0.37",
30
  "transformers @ git+https://github.com/huggingface/transformers.git@88d960937c81a32bfb63356a2e8ecf7999619681",
31
  "datasets>=2.15.0",
 
26
  "pandas>=1.3.0",
27
  "pydantic>=1.8.0",
28
  "Pillow>=8.0.0",
29
+ "PyPDF2>=3.0.0",
30
+ "pdfplumber>=0.10.0",
31
  "torchxrayvision>=0.0.37",
32
  "transformers @ git+https://github.com/huggingface/transformers.git@88d960937c81a32bfb63356a2e8ecf7999619681",
33
  "datasets>=2.15.0",