build-your-own-rag-chatbot

Форк
0
105 строк · 3.4 Кб
1
import streamlit as st
2
import os
3
from langchain_openai import OpenAIEmbeddings
4
from langchain_openai import ChatOpenAI
5
from langchain_community.vectorstores import AstraDB
6
from langchain.schema.runnable import RunnableMap
7
from langchain.prompts import ChatPromptTemplate
8
from langchain.callbacks.base import BaseCallbackHandler
9

10
# Streaming call back handler for responses
11
class StreamHandler(BaseCallbackHandler):
12
    def __init__(self, container, initial_text=""):
13
        self.container = container
14
        self.text = initial_text
15

16
    def on_llm_new_token(self, token: str, **kwargs):
17
        self.text += token
18
        self.container.markdown(self.text + "▌")
19

20
# Cache prompt for future runs
21
@st.cache_data()
22
def load_prompt():
23
    template = """You're a helpful AI assistent tasked to answer the user's questions.
24
You're friendly and you answer extensively with multiple sentences. You prefer to use bulletpoints to summarize.
25

26
CONTEXT:
27
{context}
28

29
QUESTION:
30
{question}
31

32
YOUR ANSWER:"""
33
    return ChatPromptTemplate.from_messages([("system", template)])
34
prompt = load_prompt()
35

36
# Cache OpenAI Chat Model for future runs
37
@st.cache_resource()
38
def load_chat_model():
39
    return ChatOpenAI(
40
        temperature=0.3,
41
        model='gpt-3.5-turbo',
42
        streaming=True,
43
        verbose=True
44
    )
45
chat_model = load_chat_model()
46

47
# Cache the Astra DB Vector Store for future runs
48
@st.cache_resource(show_spinner='Connecting to Astra')
49
def load_retriever():
50
    # Connect to the Vector Store
51
    vector_store = AstraDB(
52
        embedding=OpenAIEmbeddings(),
53
        collection_name="my_store",
54
        api_endpoint=st.secrets['ASTRA_API_ENDPOINT'],
55
        token=st.secrets['ASTRA_TOKEN']
56
    )
57

58
    # Get the retriever for the Chat Model
59
    retriever = vector_store.as_retriever(
60
        search_kwargs={"k": 5}
61
    )
62
    return retriever
63
retriever = load_retriever()
64

65
# Start with empty messages, stored in session state
66
if 'messages' not in st.session_state:
67
    st.session_state.messages = []
68

69
# Draw a title and some markdown
70
st.title("Your personal Efficiency Booster")
71
st.markdown("""Generative AI is considered to bring the next Industrial Revolution.  
72
Why? Studies show a **37% efficiency boost** in day to day work activities!""")
73

74
# Draw all messages, both user and bot so far (every time the app reruns)
75
for message in st.session_state.messages:
76
    st.chat_message(message['role']).markdown(message['content'])
77

78
# Draw the chat input box
79
if question := st.chat_input("What's up?"):
80
    
81
    # Store the user's question in a session object for redrawing next time
82
    st.session_state.messages.append({"role": "human", "content": question})
83

84
    # Draw the user's question
85
    with st.chat_message('human'):
86
        st.markdown(question)
87

88
    # UI placeholder to start filling with agent response
89
    with st.chat_message('assistant'):
90
        response_placeholder = st.empty()
91

92
    # Generate the answer by calling OpenAI's Chat Model
93
    inputs = RunnableMap({
94
        'context': lambda x: retriever.get_relevant_documents(x['question']),
95
        'question': lambda x: x['question']
96
    })
97
    chain = inputs | prompt | chat_model
98
    response = chain.invoke({'question': question}, config={'callbacks': [StreamHandler(response_placeholder)]})
99
    answer = response.content
100

101
    # Store the bot's answer in a session object for redrawing next time
102
    st.session_state.messages.append({"role": "ai", "content": answer})
103

104
    # Write the final answer without the cursor
105
    response_placeholder.markdown(answer)

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.