Adibvafa
commited on
Commit
·
19ede8f
1
Parent(s):
6af2e63
feat: updates to agent and interface
Browse files- interface.py +22 -2
- main.py +21 -4
- medrax/agent/agent.py +1 -0
- medrax/docs/system_prompts.txt +3 -2
- playground.ipynb +0 -0
interface.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import re
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from pathlib import Path
|
| 4 |
import time
|
|
@@ -106,10 +107,29 @@ class ChatInterface:
|
|
| 106 |
|
| 107 |
messages = []
|
| 108 |
image_path = self.original_file_path or display_image
|
|
|
|
| 109 |
if image_path is not None:
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
if message is not None:
|
| 112 |
-
messages.append({"role": "user", "content": message})
|
| 113 |
|
| 114 |
try:
|
| 115 |
for event in self.agent.workflow.stream(
|
|
|
|
| 1 |
import re
|
| 2 |
+
import base64
|
| 3 |
import gradio as gr
|
| 4 |
from pathlib import Path
|
| 5 |
import time
|
|
|
|
| 107 |
|
| 108 |
messages = []
|
| 109 |
image_path = self.original_file_path or display_image
|
| 110 |
+
|
| 111 |
if image_path is not None:
|
| 112 |
+
# Send path for tools
|
| 113 |
+
messages.append({"role": "user", "content": f"image_path: {image_path}"})
|
| 114 |
+
|
| 115 |
+
# Load and encode image for multimodal
|
| 116 |
+
with open(image_path, "rb") as img_file:
|
| 117 |
+
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
|
| 118 |
+
|
| 119 |
+
messages.append(
|
| 120 |
+
{
|
| 121 |
+
"role": "user",
|
| 122 |
+
"content": [
|
| 123 |
+
{
|
| 124 |
+
"type": "image_url",
|
| 125 |
+
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
|
| 126 |
+
}
|
| 127 |
+
],
|
| 128 |
+
}
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
if message is not None:
|
| 132 |
+
messages.append({"role": "user", "content": [{"type": "text", "text": message}]})
|
| 133 |
|
| 134 |
try:
|
| 135 |
for event in self.agent.workflow.stream(
|
main.py
CHANGED
|
@@ -19,7 +19,14 @@ _ = load_dotenv()
|
|
| 19 |
|
| 20 |
|
| 21 |
def initialize_agent(
|
| 22 |
-
prompt_file,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
):
|
| 24 |
"""Initialize the MedRAX agent with specified tools and configuration.
|
| 25 |
|
|
@@ -29,6 +36,9 @@ def initialize_agent(
|
|
| 29 |
model_dir (str, optional): Directory containing model weights. Defaults to "/model-weights".
|
| 30 |
temp_dir (str, optional): Directory for temporary files. Defaults to "temp".
|
| 31 |
device (str, optional): Device to run models on. Defaults to "cuda".
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
Returns:
|
| 34 |
Tuple[Agent, Dict[str, BaseTool]]: Initialized agent and dictionary of tool instances
|
|
@@ -62,7 +72,7 @@ def initialize_agent(
|
|
| 62 |
tools_dict[tool_name] = all_tools[tool_name]()
|
| 63 |
|
| 64 |
checkpointer = MemorySaver()
|
| 65 |
-
model = ChatOpenAI(model=
|
| 66 |
agent = Agent(
|
| 67 |
model,
|
| 68 |
tools=list(tools_dict.values()),
|
|
@@ -98,8 +108,15 @@ if __name__ == "__main__":
|
|
| 98 |
]
|
| 99 |
|
| 100 |
agent, tools_dict = initialize_agent(
|
| 101 |
-
"medrax/docs/system_prompts.txt",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
)
|
| 103 |
demo = create_demo(agent, tools_dict)
|
| 104 |
|
| 105 |
-
demo.launch(server_name="0.0.0.0", server_port=
|
|
|
|
| 19 |
|
| 20 |
|
| 21 |
def initialize_agent(
|
| 22 |
+
prompt_file,
|
| 23 |
+
tools_to_use=None,
|
| 24 |
+
model_dir="/model-weights",
|
| 25 |
+
temp_dir="temp",
|
| 26 |
+
device="cuda",
|
| 27 |
+
model="chatgpt-4o-latest",
|
| 28 |
+
temperature=0.7,
|
| 29 |
+
top_p=0.95,
|
| 30 |
):
|
| 31 |
"""Initialize the MedRAX agent with specified tools and configuration.
|
| 32 |
|
|
|
|
| 36 |
model_dir (str, optional): Directory containing model weights. Defaults to "/model-weights".
|
| 37 |
temp_dir (str, optional): Directory for temporary files. Defaults to "temp".
|
| 38 |
device (str, optional): Device to run models on. Defaults to "cuda".
|
| 39 |
+
model (str, optional): Model to use. Defaults to "chatgpt-4o-latest".
|
| 40 |
+
temperature (float, optional): Temperature for the model. Defaults to 0.7.
|
| 41 |
+
top_p (float, optional): Top P for the model. Defaults to 0.95.
|
| 42 |
|
| 43 |
Returns:
|
| 44 |
Tuple[Agent, Dict[str, BaseTool]]: Initialized agent and dictionary of tool instances
|
|
|
|
| 72 |
tools_dict[tool_name] = all_tools[tool_name]()
|
| 73 |
|
| 74 |
checkpointer = MemorySaver()
|
| 75 |
+
model = ChatOpenAI(model=model, temperature=temperature, top_p=top_p)
|
| 76 |
agent = Agent(
|
| 77 |
model,
|
| 78 |
tools=list(tools_dict.values()),
|
|
|
|
| 108 |
]
|
| 109 |
|
| 110 |
agent, tools_dict = initialize_agent(
|
| 111 |
+
"medrax/docs/system_prompts.txt",
|
| 112 |
+
tools_to_use=selected_tools,
|
| 113 |
+
model_dir="/model-weights", # Change this to the path of the model weights
|
| 114 |
+
temp_dir="temp", # Change this to the path of the temporary directory
|
| 115 |
+
device="cuda", # Change this to the device you want to use
|
| 116 |
+
model="gpt-4o", # Change this to the model you want to use, e.g. gpt-4o-mini
|
| 117 |
+
temperature=0.7,
|
| 118 |
+
top_p=0.95,
|
| 119 |
)
|
| 120 |
demo = create_demo(agent, tools_dict)
|
| 121 |
|
| 122 |
+
demo.launch(server_name="0.0.0.0", server_port=8989, share=True)
|
medrax/agent/agent.py
CHANGED
|
@@ -81,6 +81,7 @@ class Agent:
|
|
| 81 |
log_dir (str, optional): Directory to save logs. Defaults to 'logs'.
|
| 82 |
"""
|
| 83 |
self.system_prompt = system_prompt
|
|
|
|
| 84 |
self.log_tools = log_tools
|
| 85 |
|
| 86 |
if self.log_tools:
|
|
|
|
| 81 |
log_dir (str, optional): Directory to save logs. Defaults to 'logs'.
|
| 82 |
"""
|
| 83 |
self.system_prompt = system_prompt
|
| 84 |
+
print(f"System prompt: {self.system_prompt}")
|
| 85 |
self.log_tools = log_tools
|
| 86 |
|
| 87 |
if self.log_tools:
|
medrax/docs/system_prompts.txt
CHANGED
|
@@ -1,8 +1,9 @@
|
|
| 1 |
[MEDICAL_ASSISTANT]
|
| 2 |
-
You are an expert medical AI assistant.
|
|
|
|
| 3 |
Make multiple tool calls in parallel or sequence as needed for comprehensive answers.
|
| 4 |
Critically think about and criticize the tool outputs.
|
| 5 |
-
If you need to look up some information before asking a follow up question, you are allowed to do that
|
| 6 |
|
| 7 |
[GENERAL_ASSISTANT]
|
| 8 |
You are a helpful AI assistant. Your role is to assist users with a wide range of tasks and questions, providing accurate and useful information on various topics.
|
|
|
|
| 1 |
[MEDICAL_ASSISTANT]
|
| 2 |
+
You are an expert medical AI assistant who can answer any medical questions and analyze medical images similar to a doctor.
|
| 3 |
+
Solve using our own vision and reasoning and use tools to complement your reasoning.
|
| 4 |
Make multiple tool calls in parallel or sequence as needed for comprehensive answers.
|
| 5 |
Critically think about and criticize the tool outputs.
|
| 6 |
+
If you need to look up some information before asking a follow up question, you are allowed to do that.
|
| 7 |
|
| 8 |
[GENERAL_ASSISTANT]
|
| 9 |
You are a helpful AI assistant. Your role is to assist users with a wide range of tasks and questions, providing accurate and useful information on various topics.
|
playground.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|