embedchain
105 строк · 3.3 Кб
1import queue
2
3import streamlit as st
4
5from embedchain import App
6from embedchain.config import BaseLlmConfig
7from embedchain.helpers.callbacks import (StreamingStdOutCallbackHandlerYield,
8generate)
9
10
11@st.cache_resource
12def unacademy_ai():
13app = App()
14return app
15
16
17app = unacademy_ai()
18
19assistant_avatar_url = "https://cdn-images-1.medium.com/v2/resize:fit:1200/1*LdFNhpOe7uIn-bHK9VUinA.jpeg"
20
21st.markdown(f"# <img src='{assistant_avatar_url}' width={35} /> Unacademy UPSC AI", unsafe_allow_html=True)
22
23styled_caption = """
24<p style="font-size: 17px; color: #aaa;">
25🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Unacademy\'s UPSC data!
26</p>
27"""
28st.markdown(styled_caption, unsafe_allow_html=True)
29
30with st.expander(":grey[Want to create your own Unacademy UPSC AI?]"):
31st.write(
32"""
33```bash
34pip install embedchain
35```
36
37```python
38from embedchain import App
39unacademy_ai_app = App()
40unacademy_ai_app.add(
41"https://unacademy.com/content/upsc/study-material/plan-policy/atma-nirbhar-bharat-3-0/",
42data_type="web_page"
43)
44unacademy_ai_app.chat("What is Atma Nirbhar 3.0?")
45```
46
47For more information, checkout the [Embedchain docs](https://docs.embedchain.ai/get-started/quickstart).
48"""
49)
50
51if "messages" not in st.session_state:
52st.session_state.messages = [
53{
54"role": "assistant",
55"content": """Hi, I'm Unacademy UPSC AI bot, who can answer any questions related to UPSC preparation.
56Let me help you prepare better for UPSC.\n
57Sample questions:
58- What are the subjects in UPSC CSE?
59- What is the CSE scholarship price amount?
60- What are different indian calendar forms?
61""",
62}
63]
64
65for message in st.session_state.messages:
66role = message["role"]
67with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
68st.markdown(message["content"])
69
70if prompt := st.chat_input("Ask me anything!"):
71with st.chat_message("user"):
72st.markdown(prompt)
73st.session_state.messages.append({"role": "user", "content": prompt})
74
75with st.chat_message("assistant", avatar=assistant_avatar_url):
76msg_placeholder = st.empty()
77msg_placeholder.markdown("Thinking...")
78full_response = ""
79
80q = queue.Queue()
81
82def app_response(result):
83llm_config = app.llm.config.as_dict()
84llm_config["callbacks"] = [StreamingStdOutCallbackHandlerYield(q=q)]
85config = BaseLlmConfig(**llm_config)
86answer, citations = app.chat(prompt, config=config, citations=True)
87result["answer"] = answer
88result["citations"] = citations
89
90results = {}
91
92for answer_chunk in generate(q):
93full_response += answer_chunk
94msg_placeholder.markdown(full_response)
95
96answer, citations = results["answer"], results["citations"]
97
98if citations:
99full_response += "\n\n**Sources**:\n"
100sources = list(set(map(lambda x: x[1], citations)))
101for i, source in enumerate(sources):
102full_response += f"{i+1}. {source}\n"
103
104msg_placeholder.markdown(full_response)
105st.session_state.messages.append({"role": "assistant", "content": full_response})
106