Spaces:
Runtime error
Runtime error
Update app_dialogue.py
Browse files- app_dialogue.py +16 -10
app_dialogue.py
CHANGED
|
@@ -293,17 +293,23 @@ def format_user_prompt_with_im_history_and_system_conditioning(
|
|
| 293 |
resulting_list = copy.deepcopy(SYSTEM_PROMPT)
|
| 294 |
|
| 295 |
# Format history
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
|
| 300 |
-
optional_space = ""
|
| 301 |
-
if not is_image(splitted_user_utterance[0]):
|
| 302 |
-
optional_space = " "
|
| 303 |
resulting_list.append(f"\nUser:{optional_space}")
|
| 304 |
-
resulting_list.extend(splitted_user_utterance)
|
| 305 |
-
resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
|
| 306 |
-
|
| 307 |
# Format current input
|
| 308 |
current_user_prompt_str = remove_spaces_around_token(current_user_prompt_str)
|
| 309 |
if current_image is None:
|
|
@@ -554,7 +560,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 554 |
#stream = client.generate_stream(prompt=query, **generation_args)
|
| 555 |
#resp = client.text_generation(query, **generation_args, stream=False,details=True, return_full_text=False)
|
| 556 |
stream = client.text_generation(query, **generation_args, stream=False)
|
| 557 |
-
|
| 558 |
#stream = ""
|
| 559 |
#for response in resp:
|
| 560 |
# stream += response.token.text
|
|
|
|
| 293 |
resulting_list = copy.deepcopy(SYSTEM_PROMPT)
|
| 294 |
|
| 295 |
# Format history
|
| 296 |
+
if history:
|
| 297 |
+
for turn in history:
|
| 298 |
+
user_utterance, assistant_utterance = turn
|
| 299 |
+
splitted_user_utterance = split_str_on_im_markdown(user_utterance)
|
| 300 |
+
|
| 301 |
+
optional_space = ""
|
| 302 |
+
if not is_image(splitted_user_utterance[0]):
|
| 303 |
+
optional_space = " "
|
| 304 |
+
resulting_list.append(f"\nUser:{optional_space}")
|
| 305 |
+
resulting_list.extend(splitted_user_utterance)
|
| 306 |
+
resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
|
| 307 |
+
else:
|
| 308 |
+
optional_space = " "
|
| 309 |
|
|
|
|
|
|
|
|
|
|
| 310 |
resulting_list.append(f"\nUser:{optional_space}")
|
| 311 |
+
#resulting_list.extend(splitted_user_utterance)
|
| 312 |
+
resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
|
|
|
|
| 313 |
# Format current input
|
| 314 |
current_user_prompt_str = remove_spaces_around_token(current_user_prompt_str)
|
| 315 |
if current_image is None:
|
|
|
|
| 560 |
#stream = client.generate_stream(prompt=query, **generation_args)
|
| 561 |
#resp = client.text_generation(query, **generation_args, stream=False,details=True, return_full_text=False)
|
| 562 |
stream = client.text_generation(query, **generation_args, stream=False)
|
| 563 |
+
print (stream)
|
| 564 |
#stream = ""
|
| 565 |
#for response in resp:
|
| 566 |
# stream += response.token.text
|