-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathopenAssist.py
46 lines (29 loc) · 1.39 KB
/
openAssist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import streamlit as st
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from dotenv import load_dotenv
load_dotenv()
def response(sentence):
template = """<|prompter|>{question}<|endoftext|><|assistant|>"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm=HuggingFaceHub(repo_id="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", model_kwargs={"max_new_tokens":1200})
# llm=HuggingFaceHub(repo_id="jondurbin/airoboros-l2-70b-gpt4-1.4.1", model_kwargs={"max_new_tokens":1200})
llm_chain=LLMChain(
llm=llm,
prompt=prompt
)
response = llm_chain.run(sentence)
# print(response)
return response
# def selfTry(sentence):
# template = """<|prompter|>you are my personal virtual assistant named 'ZARA'. {question}<|endoftext|><|assistant|>"""
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm=HuggingFaceHub(repo_id="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", model_kwargs={"max_new_tokens":1200})
# # llm=HuggingFaceHub(repo_id="jondurbin/airoboros-l2-70b-gpt4-1.4.1", model_kwargs={"max_new_tokens":1200})
# llm_chain=LLMChain(
# llm=llm,
# prompt=prompt
# )
# response = llm_chain.run(sentence)
# response = llm_chain.run(sentence)
# # print(response)
# return response