Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
48d2f65
1
Parent(s):
f05148b
bugfix: 修复自动保存参数时的若干问题
Browse files- ChuanhuChatbot.py +15 -16
- locale/en_US.json +1 -1
- locale/ja_JP.json +1 -1
- locale/ko_KR.json +1 -1
- locale/ru_RU.json +1 -1
- locale/sv_SE.json +1 -1
- locale/vi_VN.json +1 -1
- modules/models/OpenAI.py +1 -4
- modules/models/OpenAIVision.py +1 -4
- modules/models/base_model.py +31 -8
ChuanhuChatbot.py
CHANGED
|
@@ -329,7 +329,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 329 |
user_identifier_txt = gr.Textbox(
|
| 330 |
show_label=True,
|
| 331 |
placeholder=i18n("用于定位滥用行为"),
|
| 332 |
-
label=i18n("
|
| 333 |
value=user_name.value,
|
| 334 |
lines=1,
|
| 335 |
)
|
|
@@ -496,8 +496,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 496 |
user_info, user_name = gr.Markdown.update(
|
| 497 |
value=f"", visible=False), ""
|
| 498 |
current_model = get_model(
|
| 499 |
-
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key)[0]
|
| 500 |
-
current_model.set_user_identifier(user_name)
|
| 501 |
if not hide_history_when_not_logged_in or user_name:
|
| 502 |
loaded_stuff = current_model.auto_load()
|
| 503 |
else:
|
|
@@ -639,7 +638,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 639 |
user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
| 640 |
keyTxt.submit(**get_usage_args)
|
| 641 |
single_turn_checkbox.change(
|
| 642 |
-
set_single_turn, [current_model, single_turn_checkbox], None)
|
| 643 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name, current_model], [
|
| 644 |
current_model, status_display, chatbot, lora_select_dropdown, user_api_key, keyTxt], show_progress=True, api_name="get_model")
|
| 645 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
|
|
@@ -650,7 +649,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 650 |
top_p_slider, systemPromptTxt, user_name, current_model], [current_model, status_display, chatbot], show_progress=True)
|
| 651 |
|
| 652 |
# Template
|
| 653 |
-
systemPromptTxt.
|
| 654 |
current_model, systemPromptTxt], None)
|
| 655 |
templateRefreshBtn.click(get_template_dropdown, None, [
|
| 656 |
templateFileSelectDropdown])
|
|
@@ -723,25 +722,25 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
|
| 723 |
cancel_all_jobs, [], [openai_train_status], show_progress=True)
|
| 724 |
|
| 725 |
# Advanced
|
| 726 |
-
max_context_length_slider.input(
|
| 727 |
-
set_token_upper_limit, [current_model, max_context_length_slider], None)
|
| 728 |
temperature_slider.input(
|
| 729 |
-
set_temperature, [current_model, temperature_slider], None)
|
| 730 |
-
top_p_slider.input(set_top_p, [current_model, top_p_slider], None)
|
| 731 |
n_choices_slider.input(
|
| 732 |
-
set_n_choices, [current_model, n_choices_slider], None)
|
| 733 |
stop_sequence_txt.input(
|
| 734 |
-
set_stop_sequence, [current_model, stop_sequence_txt], None)
|
|
|
|
|
|
|
| 735 |
max_generation_slider.input(
|
| 736 |
-
set_max_tokens, [current_model, max_generation_slider], None)
|
| 737 |
presence_penalty_slider.input(
|
| 738 |
-
set_presence_penalty, [current_model, presence_penalty_slider], None)
|
| 739 |
frequency_penalty_slider.input(
|
| 740 |
-
set_frequency_penalty, [current_model, frequency_penalty_slider], None)
|
| 741 |
logit_bias_txt.input(
|
| 742 |
-
set_logit_bias, [current_model, logit_bias_txt], None)
|
| 743 |
user_identifier_txt.input(set_user_identifier, [
|
| 744 |
-
current_model, user_identifier_txt], None)
|
| 745 |
|
| 746 |
default_btn.click(
|
| 747 |
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
|
|
|
|
| 329 |
user_identifier_txt = gr.Textbox(
|
| 330 |
show_label=True,
|
| 331 |
placeholder=i18n("用于定位滥用行为"),
|
| 332 |
+
label=i18n("用户标识符"),
|
| 333 |
value=user_name.value,
|
| 334 |
lines=1,
|
| 335 |
)
|
|
|
|
| 496 |
user_info, user_name = gr.Markdown.update(
|
| 497 |
value=f"", visible=False), ""
|
| 498 |
current_model = get_model(
|
| 499 |
+
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
|
|
|
|
| 500 |
if not hide_history_when_not_logged_in or user_name:
|
| 501 |
loaded_stuff = current_model.auto_load()
|
| 502 |
else:
|
|
|
|
| 638 |
user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
| 639 |
keyTxt.submit(**get_usage_args)
|
| 640 |
single_turn_checkbox.change(
|
| 641 |
+
set_single_turn, [current_model, single_turn_checkbox], None, show_progress=False)
|
| 642 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name, current_model], [
|
| 643 |
current_model, status_display, chatbot, lora_select_dropdown, user_api_key, keyTxt], show_progress=True, api_name="get_model")
|
| 644 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
|
|
|
|
| 649 |
top_p_slider, systemPromptTxt, user_name, current_model], [current_model, status_display, chatbot], show_progress=True)
|
| 650 |
|
| 651 |
# Template
|
| 652 |
+
systemPromptTxt.input(set_system_prompt, [
|
| 653 |
current_model, systemPromptTxt], None)
|
| 654 |
templateRefreshBtn.click(get_template_dropdown, None, [
|
| 655 |
templateFileSelectDropdown])
|
|
|
|
| 722 |
cancel_all_jobs, [], [openai_train_status], show_progress=True)
|
| 723 |
|
| 724 |
# Advanced
|
|
|
|
|
|
|
| 725 |
temperature_slider.input(
|
| 726 |
+
set_temperature, [current_model, temperature_slider], None, show_progress=False)
|
| 727 |
+
top_p_slider.input(set_top_p, [current_model, top_p_slider], None, show_progress=False)
|
| 728 |
n_choices_slider.input(
|
| 729 |
+
set_n_choices, [current_model, n_choices_slider], None, show_progress=False)
|
| 730 |
stop_sequence_txt.input(
|
| 731 |
+
set_stop_sequence, [current_model, stop_sequence_txt], None, show_progress=False)
|
| 732 |
+
max_context_length_slider.input(
|
| 733 |
+
set_token_upper_limit, [current_model, max_context_length_slider], None, show_progress=False)
|
| 734 |
max_generation_slider.input(
|
| 735 |
+
set_max_tokens, [current_model, max_generation_slider], None, show_progress=False)
|
| 736 |
presence_penalty_slider.input(
|
| 737 |
+
set_presence_penalty, [current_model, presence_penalty_slider], None, show_progress=False)
|
| 738 |
frequency_penalty_slider.input(
|
| 739 |
+
set_frequency_penalty, [current_model, frequency_penalty_slider], None, show_progress=False)
|
| 740 |
logit_bias_txt.input(
|
| 741 |
+
set_logit_bias, [current_model, logit_bias_txt], None, show_progress=False)
|
| 742 |
user_identifier_txt.input(set_user_identifier, [
|
| 743 |
+
current_model, user_identifier_txt], None, show_progress=False)
|
| 744 |
|
| 745 |
default_btn.click(
|
| 746 |
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
|
locale/en_US.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "Status",
|
| 94 |
"生成内容总结中……": "Generating content summary...",
|
| 95 |
"用于定位滥用行为": "Used to locate abuse",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "Knowledge base",
|
| 99 |
"知识库文件": "Knowledge base files",
|
|
|
|
| 93 |
"状态": "Status",
|
| 94 |
"生成内容总结中……": "Generating content summary...",
|
| 95 |
"用于定位滥用行为": "Used to locate abuse",
|
| 96 |
+
"用户标识符": "User identifier",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "Knowledge base",
|
| 99 |
"知识库文件": "Knowledge base files",
|
locale/ja_JP.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "ステータス",
|
| 94 |
"生成内容总结中……": "コンテンツ概要を生成しています...",
|
| 95 |
"用于定位滥用行为": "不正行為を特定するために使用されます",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "ナレッジベース",
|
| 99 |
"知识库文件": "ナレッジベースファイル",
|
|
|
|
| 93 |
"状态": "ステータス",
|
| 94 |
"生成内容总结中……": "コンテンツ概要を生成しています...",
|
| 95 |
"用于定位滥用行为": "不正行為を特定するために使用されます",
|
| 96 |
+
"用户标识符": "ユーザー識別子",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "ナレッジベース",
|
| 99 |
"知识库文件": "ナレッジベースファイル",
|
locale/ko_KR.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "상태",
|
| 94 |
"生成内容总结中……": "콘텐츠 요약 생성중...",
|
| 95 |
"用于定位滥用行为": "악용 사례 파악에 활용됨",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "제작: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452), [Keldos](https://github.com/Keldos-Li)\n\n최신 코드 다운로드: [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "지식 라이브러리",
|
| 99 |
"知识库文件": "지식 라이브러리 파일",
|
|
|
|
| 93 |
"状态": "상태",
|
| 94 |
"生成内容总结中……": "콘텐츠 요약 생성중...",
|
| 95 |
"用于定位滥用行为": "악용 사례 파악에 활용됨",
|
| 96 |
+
"用户标识符": "사용자 식별자",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "제작: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452), [Keldos](https://github.com/Keldos-Li)\n\n최신 코드 다운로드: [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "지식 라이브러리",
|
| 99 |
"知识库文件": "지식 라이브러리 파일",
|
locale/ru_RU.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "Статус",
|
| 94 |
"生成内容总结中……": "Создание сводки контента...",
|
| 95 |
"用于定位滥用行为": "Используется для выявления злоупотреблений",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Разработано [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) и [Keldos](https://github.com/Keldos-Li).<br />посетите [GitHub Project](https://github.com/GaiZhenbiao/ChuanhuChatGPT) чата Chuanhu, чтобы загрузить последнюю версию скрипта",
|
| 98 |
"知识库": "База знаний",
|
| 99 |
"知识库文件": "Файл базы знаний",
|
|
|
|
| 93 |
"状态": "Статус",
|
| 94 |
"生成内容总结中……": "Создание сводки контента...",
|
| 95 |
"用于定位滥用行为": "Используется для выявления злоупотреблений",
|
| 96 |
+
"用户标识符": "Идентификатор пользователя",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Разработано [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) и [Keldos](https://github.com/Keldos-Li).<br />посетите [GitHub Project](https://github.com/GaiZhenbiao/ChuanhuChatGPT) чата Chuanhu, чтобы загрузить последнюю версию скрипта",
|
| 98 |
"知识库": "База знаний",
|
| 99 |
"知识库文件": "Файл базы знаний",
|
locale/sv_SE.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "Status",
|
| 94 |
"生成内容总结中……": "Genererar innehållssammanfattning...",
|
| 95 |
"用于定位滥用行为": "Används för att lokalisera missbruk",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Utvecklad av Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) och [Keldos](https://github.com/Keldos-Li)\n\nLadda ner senaste koden från [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "kunskapsbank",
|
| 99 |
"知识库文件": "kunskapsbankfil",
|
|
|
|
| 93 |
"状态": "Status",
|
| 94 |
"生成内容总结中……": "Genererar innehållssammanfattning...",
|
| 95 |
"用于定位滥用行为": "Används för att lokalisera missbruk",
|
| 96 |
+
"用户标识符": "Användar-ID",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Utvecklad av Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) och [Keldos](https://github.com/Keldos-Li)\n\nLadda ner senaste koden från [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "kunskapsbank",
|
| 99 |
"知识库文件": "kunskapsbankfil",
|
locale/vi_VN.json
CHANGED
|
@@ -93,7 +93,7 @@
|
|
| 93 |
"状态": "Tình trạng",
|
| 94 |
"生成内容总结中……": "Đang tạo tóm tắt nội dung...",
|
| 95 |
"用于定位滥用行为": "Sử dụng để xác định hành vi lạm dụng",
|
| 96 |
-
"
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Phát triển bởi Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) và [Keldos](https://github.com/Keldos-Li)\n\nTải mã nguồn mới nhất từ [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "Cơ sở kiến thức",
|
| 99 |
"知识库文件": "Tệp cơ sở kiến thức",
|
|
|
|
| 93 |
"状态": "Tình trạng",
|
| 94 |
"生成内容总结中……": "Đang tạo tóm tắt nội dung...",
|
| 95 |
"用于定位滥用行为": "Sử dụng để xác định hành vi lạm dụng",
|
| 96 |
+
"用户标识符": "Định danh người dùng",
|
| 97 |
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Phát triển bởi Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) và [Keldos](https://github.com/Keldos-Li)\n\nTải mã nguồn mới nhất từ [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
| 98 |
"知识库": "Cơ sở kiến thức",
|
| 99 |
"知识库文件": "Tệp cơ sở kiến thức",
|
modules/models/OpenAI.py
CHANGED
|
@@ -105,9 +105,6 @@ class OpenAIClient(BaseLLMModel):
|
|
| 105 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 106 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 107 |
|
| 108 |
-
def set_token_upper_limit(self, new_upper_limit):
|
| 109 |
-
pass
|
| 110 |
-
|
| 111 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
| 112 |
def _get_response(self, stream=False):
|
| 113 |
openai_api_key = self.api_key
|
|
@@ -139,7 +136,7 @@ class OpenAIClient(BaseLLMModel):
|
|
| 139 |
if self.stop_sequence is not None:
|
| 140 |
payload["stop"] = self.stop_sequence
|
| 141 |
if self.logit_bias is not None:
|
| 142 |
-
payload["logit_bias"] = self.
|
| 143 |
if self.user_identifier:
|
| 144 |
payload["user"] = self.user_identifier
|
| 145 |
|
|
|
|
| 105 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 106 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 107 |
|
|
|
|
|
|
|
|
|
|
| 108 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
| 109 |
def _get_response(self, stream=False):
|
| 110 |
openai_api_key = self.api_key
|
|
|
|
| 136 |
if self.stop_sequence is not None:
|
| 137 |
payload["stop"] = self.stop_sequence
|
| 138 |
if self.logit_bias is not None:
|
| 139 |
+
payload["logit_bias"] = self.encoded_logit_bias()
|
| 140 |
if self.user_identifier:
|
| 141 |
payload["user"] = self.user_identifier
|
| 142 |
|
modules/models/OpenAIVision.py
CHANGED
|
@@ -174,9 +174,6 @@ class OpenAIVisionClient(BaseLLMModel):
|
|
| 174 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 175 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 176 |
|
| 177 |
-
def set_token_upper_limit(self, new_upper_limit):
|
| 178 |
-
pass
|
| 179 |
-
|
| 180 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
| 181 |
def _get_response(self, stream=False):
|
| 182 |
openai_api_key = self.api_key
|
|
@@ -214,7 +211,7 @@ class OpenAIVisionClient(BaseLLMModel):
|
|
| 214 |
if self.stop_sequence is not None:
|
| 215 |
payload["stop"] = self.stop_sequence
|
| 216 |
if self.logit_bias is not None:
|
| 217 |
-
payload["logit_bias"] = self.
|
| 218 |
if self.user_identifier:
|
| 219 |
payload["user"] = self.user_identifier
|
| 220 |
|
|
|
|
| 174 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 175 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 176 |
|
|
|
|
|
|
|
|
|
|
| 177 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
| 178 |
def _get_response(self, stream=False):
|
| 179 |
openai_api_key = self.api_key
|
|
|
|
| 211 |
if self.stop_sequence is not None:
|
| 212 |
payload["stop"] = self.stop_sequence
|
| 213 |
if self.logit_bias is not None:
|
| 214 |
+
payload["logit_bias"] = self.encoded_logit_bias()
|
| 215 |
if self.user_identifier:
|
| 216 |
payload["user"] = self.user_identifier
|
| 217 |
|
modules/models/base_model.py
CHANGED
|
@@ -208,7 +208,7 @@ class BaseLLMModel:
|
|
| 208 |
temperature=1.0,
|
| 209 |
top_p=1.0,
|
| 210 |
n_choices=1,
|
| 211 |
-
stop=
|
| 212 |
max_generation_token=None,
|
| 213 |
presence_penalty=0,
|
| 214 |
frequency_penalty=0,
|
|
@@ -233,6 +233,7 @@ class BaseLLMModel:
|
|
| 233 |
self.need_api_key = False
|
| 234 |
self.history_file_path = get_first_history_name(user)
|
| 235 |
self.user_name = user
|
|
|
|
| 236 |
|
| 237 |
self.default_single_turn = single_turn
|
| 238 |
self.default_temperature = temperature
|
|
@@ -637,6 +638,7 @@ class BaseLLMModel:
|
|
| 637 |
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
|
| 638 |
yield chatbot, status_text
|
| 639 |
|
|
|
|
| 640 |
self.auto_save(chatbot)
|
| 641 |
|
| 642 |
def retry(
|
|
@@ -702,32 +704,45 @@ class BaseLLMModel:
|
|
| 702 |
|
| 703 |
def set_token_upper_limit(self, new_upper_limit):
|
| 704 |
self.token_upper_limit = new_upper_limit
|
| 705 |
-
|
| 706 |
|
| 707 |
def set_temperature(self, new_temperature):
|
| 708 |
self.temperature = new_temperature
|
|
|
|
| 709 |
|
| 710 |
def set_top_p(self, new_top_p):
|
| 711 |
self.top_p = new_top_p
|
|
|
|
| 712 |
|
| 713 |
def set_n_choices(self, new_n_choices):
|
| 714 |
self.n_choices = new_n_choices
|
|
|
|
| 715 |
|
| 716 |
def set_stop_sequence(self, new_stop_sequence: str):
|
| 717 |
new_stop_sequence = new_stop_sequence.split(",")
|
| 718 |
self.stop_sequence = new_stop_sequence
|
|
|
|
| 719 |
|
| 720 |
def set_max_tokens(self, new_max_tokens):
|
| 721 |
self.max_generation_token = new_max_tokens
|
|
|
|
| 722 |
|
| 723 |
def set_presence_penalty(self, new_presence_penalty):
|
| 724 |
self.presence_penalty = new_presence_penalty
|
|
|
|
| 725 |
|
| 726 |
def set_frequency_penalty(self, new_frequency_penalty):
|
| 727 |
self.frequency_penalty = new_frequency_penalty
|
|
|
|
| 728 |
|
| 729 |
def set_logit_bias(self, logit_bias):
|
| 730 |
-
logit_bias = logit_bias
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 731 |
bias_map = {}
|
| 732 |
encoding = tiktoken.get_encoding("cl100k_base")
|
| 733 |
for line in logit_bias:
|
|
@@ -735,13 +750,15 @@ class BaseLLMModel:
|
|
| 735 |
if word:
|
| 736 |
for token in encoding.encode(word):
|
| 737 |
bias_map[token] = float(bias_amount)
|
| 738 |
-
|
| 739 |
|
| 740 |
def set_user_identifier(self, new_user_identifier):
|
| 741 |
self.user_identifier = new_user_identifier
|
|
|
|
| 742 |
|
| 743 |
def set_system_prompt(self, new_system_prompt):
|
| 744 |
self.system_prompt = new_system_prompt
|
|
|
|
| 745 |
|
| 746 |
def set_key(self, new_access_key):
|
| 747 |
if "*" not in new_access_key:
|
|
@@ -754,6 +771,7 @@ class BaseLLMModel:
|
|
| 754 |
|
| 755 |
def set_single_turn(self, new_single_turn):
|
| 756 |
self.single_turn = new_single_turn
|
|
|
|
| 757 |
|
| 758 |
def reset(self, remain_system_prompt=False):
|
| 759 |
self.history = []
|
|
@@ -813,6 +831,7 @@ class BaseLLMModel:
|
|
| 813 |
msg = "删除了一组对话的token计数记录"
|
| 814 |
self.all_token_counts.pop()
|
| 815 |
msg = "删除了一组对话"
|
|
|
|
| 816 |
self.auto_save(chatbot)
|
| 817 |
return chatbot, msg
|
| 818 |
|
|
@@ -861,7 +880,9 @@ class BaseLLMModel:
|
|
| 861 |
else:
|
| 862 |
return gr.update()
|
| 863 |
|
| 864 |
-
def auto_save(self, chatbot):
|
|
|
|
|
|
|
| 865 |
save_file(self.history_file_path, self, chatbot)
|
| 866 |
|
| 867 |
def export_markdown(self, filename, chatbot):
|
|
@@ -924,7 +945,7 @@ class BaseLLMModel:
|
|
| 924 |
self.temperature = saved_json.get("temperature", self.temperature)
|
| 925 |
self.top_p = saved_json.get("top_p", self.top_p)
|
| 926 |
self.n_choices = saved_json.get("n_choices", self.n_choices)
|
| 927 |
-
self.stop_sequence = saved_json.get("stop_sequence", self.stop_sequence)
|
| 928 |
self.token_upper_limit = saved_json.get(
|
| 929 |
"token_upper_limit", self.token_upper_limit
|
| 930 |
)
|
|
@@ -940,6 +961,7 @@ class BaseLLMModel:
|
|
| 940 |
self.logit_bias = saved_json.get("logit_bias", self.logit_bias)
|
| 941 |
self.user_identifier = saved_json.get("user_identifier", self.user_name)
|
| 942 |
self.metadata = saved_json.get("metadata", self.metadata)
|
|
|
|
| 943 |
return (
|
| 944 |
os.path.basename(self.history_file_path)[:-5],
|
| 945 |
saved_json["system"],
|
|
@@ -948,7 +970,7 @@ class BaseLLMModel:
|
|
| 948 |
self.temperature,
|
| 949 |
self.top_p,
|
| 950 |
self.n_choices,
|
| 951 |
-
self.stop_sequence,
|
| 952 |
self.token_upper_limit,
|
| 953 |
self.max_generation_token,
|
| 954 |
self.presence_penalty,
|
|
@@ -959,6 +981,7 @@ class BaseLLMModel:
|
|
| 959 |
except:
|
| 960 |
# 没有对话历史或者对话历史解析失败
|
| 961 |
logging.info(f"没有找到对话历史记录 {self.history_file_path}")
|
|
|
|
| 962 |
return (
|
| 963 |
os.path.basename(self.history_file_path),
|
| 964 |
"",
|
|
@@ -967,7 +990,7 @@ class BaseLLMModel:
|
|
| 967 |
self.temperature,
|
| 968 |
self.top_p,
|
| 969 |
self.n_choices,
|
| 970 |
-
self.stop_sequence,
|
| 971 |
self.token_upper_limit,
|
| 972 |
self.max_generation_token,
|
| 973 |
self.presence_penalty,
|
|
|
|
| 208 |
temperature=1.0,
|
| 209 |
top_p=1.0,
|
| 210 |
n_choices=1,
|
| 211 |
+
stop="",
|
| 212 |
max_generation_token=None,
|
| 213 |
presence_penalty=0,
|
| 214 |
frequency_penalty=0,
|
|
|
|
| 233 |
self.need_api_key = False
|
| 234 |
self.history_file_path = get_first_history_name(user)
|
| 235 |
self.user_name = user
|
| 236 |
+
self.chatbot = []
|
| 237 |
|
| 238 |
self.default_single_turn = single_turn
|
| 239 |
self.default_temperature = temperature
|
|
|
|
| 638 |
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
|
| 639 |
yield chatbot, status_text
|
| 640 |
|
| 641 |
+
self.chatbot = chatbot
|
| 642 |
self.auto_save(chatbot)
|
| 643 |
|
| 644 |
def retry(
|
|
|
|
| 704 |
|
| 705 |
def set_token_upper_limit(self, new_upper_limit):
|
| 706 |
self.token_upper_limit = new_upper_limit
|
| 707 |
+
self.auto_save()
|
| 708 |
|
| 709 |
def set_temperature(self, new_temperature):
|
| 710 |
self.temperature = new_temperature
|
| 711 |
+
self.auto_save()
|
| 712 |
|
| 713 |
def set_top_p(self, new_top_p):
|
| 714 |
self.top_p = new_top_p
|
| 715 |
+
self.auto_save()
|
| 716 |
|
| 717 |
def set_n_choices(self, new_n_choices):
|
| 718 |
self.n_choices = new_n_choices
|
| 719 |
+
self.auto_save()
|
| 720 |
|
| 721 |
def set_stop_sequence(self, new_stop_sequence: str):
|
| 722 |
new_stop_sequence = new_stop_sequence.split(",")
|
| 723 |
self.stop_sequence = new_stop_sequence
|
| 724 |
+
self.auto_save()
|
| 725 |
|
| 726 |
def set_max_tokens(self, new_max_tokens):
|
| 727 |
self.max_generation_token = new_max_tokens
|
| 728 |
+
self.auto_save()
|
| 729 |
|
| 730 |
def set_presence_penalty(self, new_presence_penalty):
|
| 731 |
self.presence_penalty = new_presence_penalty
|
| 732 |
+
self.auto_save()
|
| 733 |
|
| 734 |
def set_frequency_penalty(self, new_frequency_penalty):
|
| 735 |
self.frequency_penalty = new_frequency_penalty
|
| 736 |
+
self.auto_save()
|
| 737 |
|
| 738 |
def set_logit_bias(self, logit_bias):
|
| 739 |
+
self.logit_bias = logit_bias
|
| 740 |
+
self.auto_save()
|
| 741 |
+
|
| 742 |
+
def encoded_logit_bias(self):
|
| 743 |
+
if self.logit_bias is None:
|
| 744 |
+
return {}
|
| 745 |
+
logit_bias = self.logit_bias.split()
|
| 746 |
bias_map = {}
|
| 747 |
encoding = tiktoken.get_encoding("cl100k_base")
|
| 748 |
for line in logit_bias:
|
|
|
|
| 750 |
if word:
|
| 751 |
for token in encoding.encode(word):
|
| 752 |
bias_map[token] = float(bias_amount)
|
| 753 |
+
return bias_map
|
| 754 |
|
| 755 |
def set_user_identifier(self, new_user_identifier):
|
| 756 |
self.user_identifier = new_user_identifier
|
| 757 |
+
self.auto_save()
|
| 758 |
|
| 759 |
def set_system_prompt(self, new_system_prompt):
|
| 760 |
self.system_prompt = new_system_prompt
|
| 761 |
+
self.auto_save()
|
| 762 |
|
| 763 |
def set_key(self, new_access_key):
|
| 764 |
if "*" not in new_access_key:
|
|
|
|
| 771 |
|
| 772 |
def set_single_turn(self, new_single_turn):
|
| 773 |
self.single_turn = new_single_turn
|
| 774 |
+
self.auto_save()
|
| 775 |
|
| 776 |
def reset(self, remain_system_prompt=False):
|
| 777 |
self.history = []
|
|
|
|
| 831 |
msg = "删除了一组对话的token计数记录"
|
| 832 |
self.all_token_counts.pop()
|
| 833 |
msg = "删除了一组对话"
|
| 834 |
+
self.chatbot = chatbot
|
| 835 |
self.auto_save(chatbot)
|
| 836 |
return chatbot, msg
|
| 837 |
|
|
|
|
| 880 |
else:
|
| 881 |
return gr.update()
|
| 882 |
|
| 883 |
+
def auto_save(self, chatbot=None):
|
| 884 |
+
if chatbot is None:
|
| 885 |
+
chatbot = self.chatbot
|
| 886 |
save_file(self.history_file_path, self, chatbot)
|
| 887 |
|
| 888 |
def export_markdown(self, filename, chatbot):
|
|
|
|
| 945 |
self.temperature = saved_json.get("temperature", self.temperature)
|
| 946 |
self.top_p = saved_json.get("top_p", self.top_p)
|
| 947 |
self.n_choices = saved_json.get("n_choices", self.n_choices)
|
| 948 |
+
self.stop_sequence = list(saved_json.get("stop_sequence", self.stop_sequence))
|
| 949 |
self.token_upper_limit = saved_json.get(
|
| 950 |
"token_upper_limit", self.token_upper_limit
|
| 951 |
)
|
|
|
|
| 961 |
self.logit_bias = saved_json.get("logit_bias", self.logit_bias)
|
| 962 |
self.user_identifier = saved_json.get("user_identifier", self.user_name)
|
| 963 |
self.metadata = saved_json.get("metadata", self.metadata)
|
| 964 |
+
self.chatbot = saved_json["chatbot"]
|
| 965 |
return (
|
| 966 |
os.path.basename(self.history_file_path)[:-5],
|
| 967 |
saved_json["system"],
|
|
|
|
| 970 |
self.temperature,
|
| 971 |
self.top_p,
|
| 972 |
self.n_choices,
|
| 973 |
+
",".join(self.stop_sequence),
|
| 974 |
self.token_upper_limit,
|
| 975 |
self.max_generation_token,
|
| 976 |
self.presence_penalty,
|
|
|
|
| 981 |
except:
|
| 982 |
# 没有对话历史或者对话历史解析失败
|
| 983 |
logging.info(f"没有找到对话历史记录 {self.history_file_path}")
|
| 984 |
+
self.reset()
|
| 985 |
return (
|
| 986 |
os.path.basename(self.history_file_path),
|
| 987 |
"",
|
|
|
|
| 990 |
self.temperature,
|
| 991 |
self.top_p,
|
| 992 |
self.n_choices,
|
| 993 |
+
",".join(self.stop_sequence),
|
| 994 |
self.token_upper_limit,
|
| 995 |
self.max_generation_token,
|
| 996 |
self.presence_penalty,
|