Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
ae6a83b
1
Parent(s):
e7fcf86
llama支持instruction
Browse files- modules/models.py +7 -3
modules/models.py
CHANGED
|
@@ -207,7 +207,7 @@ class OpenAIClient(BaseLLMModel):
|
|
| 207 |
continue
|
| 208 |
if error_msg:
|
| 209 |
raise Exception(error_msg)
|
| 210 |
-
|
| 211 |
|
| 212 |
class ChatGLM_Client(BaseLLMModel):
|
| 213 |
def __init__(self, model_name) -> None:
|
|
@@ -293,12 +293,13 @@ class LLaMA_Client(BaseLLMModel):
|
|
| 293 |
from lmflow.pipeline.auto_pipeline import AutoPipeline
|
| 294 |
from lmflow.models.auto_model import AutoModel
|
| 295 |
from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
|
| 296 |
-
|
| 297 |
self.max_generation_token = 1000
|
| 298 |
self.end_string = "\n\n"
|
| 299 |
# We don't need input data
|
| 300 |
data_args = DatasetArguments(dataset_path=None)
|
| 301 |
self.dataset = Dataset(data_args)
|
|
|
|
| 302 |
|
| 303 |
global LLAMA_MODEL, LLAMA_INFERENCER
|
| 304 |
if LLAMA_MODEL is None or LLAMA_INFERENCER is None:
|
|
@@ -343,9 +344,12 @@ class LLaMA_Client(BaseLLMModel):
|
|
| 343 |
|
| 344 |
def _get_llama_style_input(self):
|
| 345 |
history = []
|
|
|
|
|
|
|
|
|
|
| 346 |
for x in self.history:
|
| 347 |
if x["role"] == "user":
|
| 348 |
-
history.append(f"Input: {x['content']}")
|
| 349 |
else:
|
| 350 |
history.append(f"Output: {x['content']}")
|
| 351 |
context = "\n\n".join(history)
|
|
|
|
| 207 |
continue
|
| 208 |
if error_msg:
|
| 209 |
raise Exception(error_msg)
|
| 210 |
+
|
| 211 |
|
| 212 |
class ChatGLM_Client(BaseLLMModel):
|
| 213 |
def __init__(self, model_name) -> None:
|
|
|
|
| 293 |
from lmflow.pipeline.auto_pipeline import AutoPipeline
|
| 294 |
from lmflow.models.auto_model import AutoModel
|
| 295 |
from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
|
| 296 |
+
|
| 297 |
self.max_generation_token = 1000
|
| 298 |
self.end_string = "\n\n"
|
| 299 |
# We don't need input data
|
| 300 |
data_args = DatasetArguments(dataset_path=None)
|
| 301 |
self.dataset = Dataset(data_args)
|
| 302 |
+
self.system_prompt = ""
|
| 303 |
|
| 304 |
global LLAMA_MODEL, LLAMA_INFERENCER
|
| 305 |
if LLAMA_MODEL is None or LLAMA_INFERENCER is None:
|
|
|
|
| 344 |
|
| 345 |
def _get_llama_style_input(self):
|
| 346 |
history = []
|
| 347 |
+
instruction = ""
|
| 348 |
+
if self.system_prompt:
|
| 349 |
+
instruction = (f"Instruction: {self.system_prompt}\n")
|
| 350 |
for x in self.history:
|
| 351 |
if x["role"] == "user":
|
| 352 |
+
history.append(f"{instruction}Input: {x['content']}")
|
| 353 |
else:
|
| 354 |
history.append(f"Output: {x['content']}")
|
| 355 |
context = "\n\n".join(history)
|