examples

Форк
0
1
import os
2
import asyncio
3
from typing import Any
4

5
import uvicorn
6
from fastapi import FastAPI, Body
7
from fastapi.responses import StreamingResponse
8
from queue import Queue
9
from pydantic import BaseModel
10

11
from langchain.agents import AgentType, initialize_agent
12
from langchain.chat_models import ChatOpenAI
13
from langchain.memory import ConversationBufferWindowMemory
14
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
15
from langchain.callbacks.streaming_stdout_final_only import FinalStreamingStdOutCallbackHandler
16
from langchain.schema import LLMResult
17

18
app = FastAPI()
19

20
# initialize the agent (we need to do this for the callbacks)
21
llm = ChatOpenAI(
22
    openai_api_key=os.getenv("OPENAI_API_KEY"),
23
    temperature=0.0,
24
    model_name="gpt-3.5-turbo",
25
    streaming=True,  # ! important
26
    callbacks=[]  # ! important (but we will add them later)
27
)
28
memory = ConversationBufferWindowMemory(
29
    memory_key="chat_history",
30
    k=5,
31
    return_messages=True,
32
    output_key="output"
33
)
34
agent = initialize_agent(
35
    agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
36
    tools=[],
37
    llm=llm,
38
    verbose=True,
39
    max_iterations=3,
40
    early_stopping_method="generate",
41
    memory=memory,
42
    return_intermediate_steps=False
43
)
44

45
class AsyncCallbackHandler(AsyncIteratorCallbackHandler):
46
    content: str = ""
47
    final_answer: bool = False
48
    
49
    def __init__(self) -> None:
50
        super().__init__()
51

52
    async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
53
        self.content += token
54
        # if we passed the final answer, we put tokens in queue
55
        if self.final_answer:
56
            if '"action_input": "' in self.content:
57
                if token not in ['"', "}"]:
58
                    self.queue.put_nowait(token)
59
        elif "Final Answer" in self.content:
60
            self.final_answer = True
61
            self.content = ""
62
    
63
    async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
64
        if self.final_answer:
65
            self.content = ""
66
            self.final_answer = False
67
            self.done.set()
68
        else:
69
            self.content = ""
70

71
async def run_call(query: str, stream_it: AsyncCallbackHandler):
72
    # assign callback handler
73
    agent.agent.llm_chain.llm.callbacks = [stream_it]
74
    # now query
75
    await agent.acall(inputs={"input": query})
76

77
# request input format
78
class Query(BaseModel):
79
    text: str
80

81
async def create_gen(query: str, stream_it: AsyncCallbackHandler):
82
    task = asyncio.create_task(run_call(query, stream_it))
83
    async for token in stream_it.aiter():
84
        yield token
85
    await task
86

87
@app.post("/chat")
88
async def chat(
89
    query: Query = Body(...),
90
):
91
    stream_it = AsyncCallbackHandler()
92
    gen = create_gen(query.text, stream_it)
93
    return StreamingResponse(gen, media_type="text/event-stream")
94

95
@app.get("/health")
96
async def health():
97
    """Check the api is running"""
98
    return {"status": "🤙"}
99
    
100

101
if __name__ == "__main__":
102
    uvicorn.run(
103
        "app:app",
104
        host="localhost",
105
        port=8000,
106
        reload=True
107
    )
108

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.