Commit 
							
							·
						
						106a2ee
	
1
								Parent(s):
							
							783dbaa
								
Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -84,20 +84,21 @@ class LLM_Langchain(): | |
| 84 |  | 
| 85 | 
             
                def generate_response(self, input_text, context):
         | 
| 86 |  | 
| 87 | 
            -
                    template = f"<|system|>\nYou are a intelligent chatbot.</s>\n<|user|>\n{input_text}.\n<|assistant|>"
         | 
| 88 | 
             
                    llm = HuggingFaceHub(
         | 
| 89 | 
             
                        repo_id = self.model_name,
         | 
| 90 | 
             
                        model_kwargs = self.model_kwargs
         | 
| 91 | 
             
                    )
         | 
| 92 | 
            -
                    llm_chain = LLMChain(
         | 
| 93 | 
            -
             | 
| 94 | 
            -
             | 
| 95 | 
            -
             | 
| 96 | 
            -
             | 
| 97 | 
            -
                    result = llm_chain.run({
         | 
| 98 | 
            -
             | 
| 99 | 
            -
             | 
| 100 | 
            -
                    })
         | 
|  | |
| 101 | 
             
                    # return llm(input_text)
         | 
| 102 | 
             
                    return result
         | 
| 103 |  | 
|  | |
| 84 |  | 
| 85 | 
             
                def generate_response(self, input_text, context):
         | 
| 86 |  | 
| 87 | 
            +
                    template = f"<|system|>\nYou are a intelligent chatbot and expertise in {context}.</s>\n<|user|>\n{input_text}.\n<|assistant|>"
         | 
| 88 | 
             
                    llm = HuggingFaceHub(
         | 
| 89 | 
             
                        repo_id = self.model_name,
         | 
| 90 | 
             
                        model_kwargs = self.model_kwargs
         | 
| 91 | 
             
                    )
         | 
| 92 | 
            +
                    # llm_chain = LLMChain(
         | 
| 93 | 
            +
                    #     prompt=template,
         | 
| 94 | 
            +
                    #     llm=llm,
         | 
| 95 | 
            +
                    # )
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                    # result = llm_chain.run({
         | 
| 98 | 
            +
                    #     "question": input_text,
         | 
| 99 | 
            +
                    #     "context": context
         | 
| 100 | 
            +
                    # })
         | 
| 101 | 
            +
                    result = llm(template)
         | 
| 102 | 
             
                    # return llm(input_text)
         | 
| 103 | 
             
                    return result
         | 
| 104 |  | 
