Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
5c09c23
1
Parent(s):
ee70a9d
添加总结按钮
Browse files- ChuanhuChatbot.py +2 -0
- modules/models/base_model.py +20 -14
- modules/utils.py +3 -0
ChuanhuChatbot.py
CHANGED
|
@@ -97,6 +97,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 97 |
)
|
| 98 |
index_files = gr.Files(label=i18n("上传"), type="file")
|
| 99 |
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
|
|
|
|
| 100 |
# TODO: 公式ocr
|
| 101 |
# formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
|
| 102 |
|
|
@@ -333,6 +334,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 333 |
submitBtn.click(**get_usage_args)
|
| 334 |
|
| 335 |
index_files.change(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [index_files, chatbot, status_display])
|
|
|
|
| 336 |
|
| 337 |
emptyBtn.click(
|
| 338 |
reset,
|
|
|
|
| 97 |
)
|
| 98 |
index_files = gr.Files(label=i18n("上传"), type="file")
|
| 99 |
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
|
| 100 |
+
summarize_btn = gr.Button(i18n("总结"))
|
| 101 |
# TODO: 公式ocr
|
| 102 |
# formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
|
| 103 |
|
|
|
|
| 334 |
submitBtn.click(**get_usage_args)
|
| 335 |
|
| 336 |
index_files.change(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [index_files, chatbot, status_display])
|
| 337 |
+
summarize_btn.click(handle_summarize_index, [current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
|
| 338 |
|
| 339 |
emptyBtn.click(
|
| 340 |
reset,
|
modules/models/base_model.py
CHANGED
|
@@ -263,22 +263,28 @@ class BaseLLMModel:
|
|
| 263 |
if files:
|
| 264 |
index = construct_index(self.api_key, file_src=files)
|
| 265 |
status = i18n("索引构建完成")
|
| 266 |
-
# Summarize the document
|
| 267 |
-
# logging.info(i18n("生成内容总结中……"))
|
| 268 |
-
# os.environ["OPENAI_API_KEY"] = self.api_key
|
| 269 |
-
# from langchain.chains.summarize import load_summarize_chain
|
| 270 |
-
# from langchain.prompts import PromptTemplate
|
| 271 |
-
# from langchain.chat_models import ChatOpenAI
|
| 272 |
-
# from langchain.callbacks import StdOutCallbackHandler
|
| 273 |
-
# prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
|
| 274 |
-
# PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
|
| 275 |
-
# llm = ChatOpenAI()
|
| 276 |
-
# chain = load_summarize_chain(llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
|
| 277 |
-
# summary = chain({"input_documents": list(index.docstore.__dict__["_dict"].values())}, return_only_outputs=True)["output_text"]
|
| 278 |
-
# print(i18n("总结") + f": {summary}")
|
| 279 |
-
# chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
|
| 280 |
return gr.Files.update(), chatbot, status
|
| 281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
| 283 |
fake_inputs = None
|
| 284 |
display_append = []
|
|
|
|
| 263 |
if files:
|
| 264 |
index = construct_index(self.api_key, file_src=files)
|
| 265 |
status = i18n("索引构建完成")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
return gr.Files.update(), chatbot, status
|
| 267 |
|
| 268 |
+
def summarize_index(self, files, chatbot, language):
|
| 269 |
+
status = gr.Markdown.update()
|
| 270 |
+
if files:
|
| 271 |
+
index = construct_index(self.api_key, file_src=files)
|
| 272 |
+
status = i18n("总结完成")
|
| 273 |
+
logging.info(i18n("生成内容总结中……"))
|
| 274 |
+
os.environ["OPENAI_API_KEY"] = self.api_key
|
| 275 |
+
from langchain.chains.summarize import load_summarize_chain
|
| 276 |
+
from langchain.prompts import PromptTemplate
|
| 277 |
+
from langchain.chat_models import ChatOpenAI
|
| 278 |
+
from langchain.callbacks import StdOutCallbackHandler
|
| 279 |
+
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
|
| 280 |
+
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
|
| 281 |
+
llm = ChatOpenAI()
|
| 282 |
+
chain = load_summarize_chain(llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
|
| 283 |
+
summary = chain({"input_documents": list(index.docstore.__dict__["_dict"].values())}, return_only_outputs=True)["output_text"]
|
| 284 |
+
print(i18n("总结") + f": {summary}")
|
| 285 |
+
chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
|
| 286 |
+
return chatbot, status
|
| 287 |
+
|
| 288 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
| 289 |
fake_inputs = None
|
| 290 |
display_append = []
|
modules/utils.py
CHANGED
|
@@ -116,6 +116,9 @@ def set_single_turn(current_model, *args):
|
|
| 116 |
def handle_file_upload(current_model, *args):
|
| 117 |
return current_model.handle_file_upload(*args)
|
| 118 |
|
|
|
|
|
|
|
|
|
|
| 119 |
def like(current_model, *args):
|
| 120 |
return current_model.like(*args)
|
| 121 |
|
|
|
|
| 116 |
def handle_file_upload(current_model, *args):
|
| 117 |
return current_model.handle_file_upload(*args)
|
| 118 |
|
| 119 |
+
def handle_summarize_index(current_model, *args):
|
| 120 |
+
return current_model.summarize_index(*args)
|
| 121 |
+
|
| 122 |
def like(current_model, *args):
|
| 123 |
return current_model.like(*args)
|
| 124 |
|