llama-index
136 строк · 4.5 Кб
1"""ReAct agent.
2
3Simple wrapper around AgentRunner + ReActAgentWorker.
4
5For the legacy implementation see:
6```python
7from llama_index.legacy.agent.legacy.react.base import ReActAgent
8```
9
10"""
11
12from typing import (13Any,14List,15Optional,16Sequence,17Type,18)
19
20from llama_index.legacy.agent.react.formatter import ReActChatFormatter21from llama_index.legacy.agent.react.output_parser import ReActOutputParser22from llama_index.legacy.agent.react.step import ReActAgentWorker23from llama_index.legacy.agent.runner.base import AgentRunner24from llama_index.legacy.callbacks import (25CallbackManager,26)
27from llama_index.legacy.core.llms.types import ChatMessage28from llama_index.legacy.llms.llm import LLM29from llama_index.legacy.llms.openai import OpenAI30from llama_index.legacy.memory.chat_memory_buffer import ChatMemoryBuffer31from llama_index.legacy.memory.types import BaseMemory32from llama_index.legacy.objects.base import ObjectRetriever33from llama_index.legacy.prompts.mixin import PromptMixinType34from llama_index.legacy.tools import BaseTool35
36DEFAULT_MODEL_NAME = "gpt-3.5-turbo-0613"37
38
39class ReActAgent(AgentRunner):40"""ReAct agent.41
42Subclasses AgentRunner with a ReActAgentWorker.
43
44For the legacy implementation see:
45```python
46from llama_index.legacy.agent.legacy.react.base import ReActAgent
47```
48
49"""
50
51def __init__(52self,53tools: Sequence[BaseTool],54llm: LLM,55memory: BaseMemory,56max_iterations: int = 10,57react_chat_formatter: Optional[ReActChatFormatter] = None,58output_parser: Optional[ReActOutputParser] = None,59callback_manager: Optional[CallbackManager] = None,60verbose: bool = False,61tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,62context: Optional[str] = None,63) -> None:64"""Init params."""65callback_manager = callback_manager or llm.callback_manager66if context and react_chat_formatter:67raise ValueError("Cannot provide both context and react_chat_formatter")68if context:69react_chat_formatter = ReActChatFormatter.from_context(context)70
71step_engine = ReActAgentWorker.from_tools(72tools=tools,73tool_retriever=tool_retriever,74llm=llm,75max_iterations=max_iterations,76react_chat_formatter=react_chat_formatter,77output_parser=output_parser,78callback_manager=callback_manager,79verbose=verbose,80)81super().__init__(82step_engine,83memory=memory,84llm=llm,85callback_manager=callback_manager,86)87
88@classmethod89def from_tools(90cls,91tools: Optional[List[BaseTool]] = None,92tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,93llm: Optional[LLM] = None,94chat_history: Optional[List[ChatMessage]] = None,95memory: Optional[BaseMemory] = None,96memory_cls: Type[BaseMemory] = ChatMemoryBuffer,97max_iterations: int = 10,98react_chat_formatter: Optional[ReActChatFormatter] = None,99output_parser: Optional[ReActOutputParser] = None,100callback_manager: Optional[CallbackManager] = None,101verbose: bool = False,102context: Optional[str] = None,103**kwargs: Any,104) -> "ReActAgent":105"""Convenience constructor method from set of of BaseTools (Optional).106
107NOTE: kwargs should have been exhausted by this point. In other words
108the various upstream components such as BaseSynthesizer (response synthesizer)
109or BaseRetriever should have picked up off their respective kwargs in their
110constructions.
111
112Returns:
113ReActAgent
114"""
115llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)116if callback_manager is not None:117llm.callback_manager = callback_manager118memory = memory or memory_cls.from_defaults(119chat_history=chat_history or [], llm=llm120)121return cls(122tools=tools or [],123tool_retriever=tool_retriever,124llm=llm,125memory=memory,126max_iterations=max_iterations,127react_chat_formatter=react_chat_formatter,128output_parser=output_parser,129callback_manager=callback_manager,130verbose=verbose,131context=context,132)133
134def _get_prompt_modules(self) -> PromptMixinType:135"""Get prompt modules."""136return {"agent_worker": self.agent_worker}137