finalized grok addition
Browse files- README.md +11 -3
- main.py +1 -1
- medrax/models/model_factory.py +2 -3
- pyproject.toml +1 -0
README.md
CHANGED
|
@@ -15,7 +15,7 @@ Chest X-rays (CXRs) play an integral role in driving critical decisions in disea
|
|
| 15 |
## MedRAX
|
| 16 |
MedRAX is built on a robust technical foundation:
|
| 17 |
- **Core Architecture**: Built on LangChain and LangGraph frameworks
|
| 18 |
-
- **Language Models**: Supports multiple LLM providers including OpenAI (GPT-4o)
|
| 19 |
- **Deployment**: Supports both local and cloud-based deployments
|
| 20 |
- **Interface**: Production-ready interface built with Gradio
|
| 21 |
- **Modular Design**: Tool-agnostic architecture allowing easy integration of new capabilities
|
|
@@ -54,7 +54,9 @@ Unzip the Eurorad figures to your local `MedMAX` directory.
|
|
| 54 |
unzip chestagentbench/figures.zip
|
| 55 |
```
|
| 56 |
|
| 57 |
-
To evaluate with
|
|
|
|
|
|
|
| 58 |
```
|
| 59 |
python quickstart.py \
|
| 60 |
--model gpt-4o \
|
|
@@ -112,6 +114,9 @@ GOOGLE_API_KEY=
|
|
| 112 |
OPENROUTER_API_KEY=
|
| 113 |
OPENROUTER_BASE_URL= # Optional: Defaults to https://openrouter.ai/api/v1
|
| 114 |
|
|
|
|
|
|
|
|
|
|
| 115 |
# -------------------------
|
| 116 |
# Tool-specific API Keys
|
| 117 |
# -------------------------
|
|
@@ -359,7 +364,10 @@ Supported prefix: `openrouter-`
|
|
| 359 |
|
| 360 |
Access many open source and proprietary models via [OpenRouter](https://openrouter.ai/).
|
| 361 |
|
| 362 |
-
|
|
|
|
|
|
|
|
|
|
| 363 |
|
| 364 |
#### Local LLMs
|
| 365 |
If you are running a local LLM using frameworks like [Ollama](https://ollama.com/) or [LM Studio](https://lmstudio.ai/), you can configure the `OPENAI_BASE_URL` in your `.env` file to point to your local endpoint (e.g., `http://localhost:11434/v1`).
|
|
|
|
| 15 |
## MedRAX
|
| 16 |
MedRAX is built on a robust technical foundation:
|
| 17 |
- **Core Architecture**: Built on LangChain and LangGraph frameworks
|
| 18 |
+
- **Language Models**: Supports multiple LLM providers including OpenAI (GPT-4o), Google (Gemini), and xAI (Grok) models
|
| 19 |
- **Deployment**: Supports both local and cloud-based deployments
|
| 20 |
- **Interface**: Production-ready interface built with Gradio
|
| 21 |
- **Modular Design**: Tool-agnostic architecture allowing easy integration of new capabilities
|
|
|
|
| 54 |
unzip chestagentbench/figures.zip
|
| 55 |
```
|
| 56 |
|
| 57 |
+
To evaluate with different models, set the appropriate API key in your `.env` file (see the "Environment Variable Setup" section for details) and run the quickstart script.
|
| 58 |
+
|
| 59 |
+
**Example with GPT-4o:**
|
| 60 |
```
|
| 61 |
python quickstart.py \
|
| 62 |
--model gpt-4o \
|
|
|
|
| 114 |
OPENROUTER_API_KEY=
|
| 115 |
OPENROUTER_BASE_URL= # Optional: Defaults to https://openrouter.ai/api/v1
|
| 116 |
|
| 117 |
+
# xAI
|
| 118 |
+
XAI_API_KEY=
|
| 119 |
+
|
| 120 |
# -------------------------
|
| 121 |
# Tool-specific API Keys
|
| 122 |
# -------------------------
|
|
|
|
| 364 |
|
| 365 |
Access many open source and proprietary models via [OpenRouter](https://openrouter.ai/).
|
| 366 |
|
| 367 |
+
#### xAI Grok Models
|
| 368 |
+
Supported prefix: `grok-`
|
| 369 |
+
|
| 370 |
+
**Note:** Tool compatibility may vary with open-source models. For best results with tools, we recommend using OpenAI, Google Gemini, or xAI Grok models.
|
| 371 |
|
| 372 |
#### Local LLMs
|
| 373 |
If you are running a local LLM using frameworks like [Ollama](https://ollama.com/) or [LM Studio](https://lmstudio.ai/), you can configure the `OPENAI_BASE_URL` in your `.env` file to point to your local endpoint (e.g., `http://localhost:11434/v1`).
|
main.py
CHANGED
|
@@ -186,7 +186,7 @@ if __name__ == "__main__":
|
|
| 186 |
model_dir="/model-weights",
|
| 187 |
temp_dir="temp", # Change this to the path of the temporary directory
|
| 188 |
device="cuda",
|
| 189 |
-
model="
|
| 190 |
temperature=0.7,
|
| 191 |
top_p=0.95,
|
| 192 |
model_kwargs=model_kwargs,
|
|
|
|
| 186 |
model_dir="/model-weights",
|
| 187 |
temp_dir="temp", # Change this to the path of the temporary directory
|
| 188 |
device="cuda",
|
| 189 |
+
model="grok-4", # Change this to the model you want to use, e.g. gpt-4.1-2025-04-14, gemini-2.5-pro
|
| 190 |
temperature=0.7,
|
| 191 |
top_p=0.95,
|
| 192 |
model_kwargs=model_kwargs,
|
medrax/models/model_factory.py
CHANGED
|
@@ -6,6 +6,7 @@ from typing import Dict, Any, Type
|
|
| 6 |
from langchain_core.language_models import BaseLanguageModel
|
| 7 |
from langchain_openai import ChatOpenAI
|
| 8 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
class ModelFactory:
|
|
@@ -35,10 +36,8 @@ class ModelFactory:
|
|
| 35 |
"default_base_url": "https://openrouter.ai/api/v1",
|
| 36 |
},
|
| 37 |
"grok": {
|
| 38 |
-
"class":
|
| 39 |
"env_key": "XAI_API_KEY",
|
| 40 |
-
"base_url_key": "XAI_BASE_URL",
|
| 41 |
-
"default_base_url": "https://api.x.ai/v1"
|
| 42 |
}
|
| 43 |
# Add more providers with default configurations here
|
| 44 |
}
|
|
|
|
| 6 |
from langchain_core.language_models import BaseLanguageModel
|
| 7 |
from langchain_openai import ChatOpenAI
|
| 8 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 9 |
+
from langchain_xai import ChatXAI
|
| 10 |
|
| 11 |
|
| 12 |
class ModelFactory:
|
|
|
|
| 36 |
"default_base_url": "https://openrouter.ai/api/v1",
|
| 37 |
},
|
| 38 |
"grok": {
|
| 39 |
+
"class": ChatXAI,
|
| 40 |
"env_key": "XAI_API_KEY",
|
|
|
|
|
|
|
| 41 |
}
|
| 42 |
# Add more providers with default configurations here
|
| 43 |
}
|
pyproject.toml
CHANGED
|
@@ -20,6 +20,7 @@ dependencies = [
|
|
| 20 |
"langchain-openai>=0.0.2",
|
| 21 |
"langchain-anthropic>=0.0.2",
|
| 22 |
"langchain-cohere>=0.0.2",
|
|
|
|
| 23 |
"langchain-chroma>=0.0.10",
|
| 24 |
"langgraph>=0.0.10",
|
| 25 |
"python-dotenv>=0.19.0",
|
|
|
|
| 20 |
"langchain-openai>=0.0.2",
|
| 21 |
"langchain-anthropic>=0.0.2",
|
| 22 |
"langchain-cohere>=0.0.2",
|
| 23 |
+
"langchain-xai>=0.0.1",
|
| 24 |
"langchain-chroma>=0.0.10",
|
| 25 |
"langgraph>=0.0.10",
|
| 26 |
"python-dotenv>=0.19.0",
|