build-your-own-rag-chatbot

Форк
0
68 строк · 2.2 Кб
1
import streamlit as st
2
from langchain_openai import OpenAIEmbeddings
3
from langchain_openai import ChatOpenAI
4
from langchain_community.vectorstores import AstraDB
5
from langchain.schema.runnable import RunnableMap
6
from langchain.prompts import ChatPromptTemplate
7

8
# Cache prompt for future runs
9
@st.cache_data()
10
def load_prompt():
11
    template = """You're a helpful AI assistent tasked to answer the user's questions.
12
You're friendly and you answer extensively with multiple sentences. You prefer to use bulletpoints to summarize.
13

14
QUESTION:
15
{question}
16

17
YOUR ANSWER:"""
18
    return ChatPromptTemplate.from_messages([("system", template)])
19
prompt = load_prompt()
20

21
# Cache OpenAI Chat Model for future runs
22
@st.cache_resource()
23
def load_chat_model():
24
    return ChatOpenAI(
25
        temperature=0.3,
26
        model='gpt-3.5-turbo',
27
        streaming=True,
28
        verbose=True
29
    )
30
chat_model = load_chat_model()
31

32
# Start with empty messages, stored in session state
33
if 'messages' not in st.session_state:
34
    st.session_state.messages = []
35

36
# Draw a title and some markdown
37
st.title("Your personal Efficiency Booster")
38
st.markdown("""Generative AI is considered to bring the next Industrial Revolution.  
39
Why? Studies show a **37% efficiency boost** in day to day work activities!""")
40

41
# Draw all messages, both user and bot so far (every time the app reruns)
42
for message in st.session_state.messages:
43
    st.chat_message(message['role']).markdown(message['content'])
44

45
# Draw the chat input box
46
if question := st.chat_input("What's up?"):
47
    
48
    # Store the user's question in a session object for redrawing next time
49
    st.session_state.messages.append({"role": "human", "content": question})
50

51
    # Draw the user's question
52
    with st.chat_message('human'):
53
        st.markdown(question)
54

55
    # Generate the answer by calling OpenAI's Chat Model
56
    inputs = RunnableMap({
57
        'question': lambda x: x['question']
58
    })
59
    chain = inputs | prompt | chat_model
60
    response = chain.invoke({'question': question})
61
    answer = response.content
62

63
    # Store the bot's answer in a session object for redrawing next time
64
    st.session_state.messages.append({"role": "ai", "content": answer})
65

66
    # Draw the bot's answer
67
    with st.chat_message('assistant'):
68
        st.markdown(answer)
69

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.