llama-index
24 строки · 744.0 Байт
1"""Network Query Engine.
2
3Make sure the app in `contributor.py` is running before trying to run this
4script. Run `python contributor.py`.
5"""
6
7import asyncio
8from llama_index.llms.openai import OpenAI
9from llama_index.networks.contributor import ContributorClient
10from llama_index.networks.query_engine import NetworkQueryEngine
11
12client = ContributorClient.from_config_file(env_file=".env.contributor.client")
13
14# build NetworkRAG
15llm = OpenAI()
16network_query_engine = NetworkQueryEngine.from_args(contributors=[client], llm=llm)
17
18if __name__ == "__main__":
19sync_res = network_query_engine.query("Who is paul")
20print(sync_res)
21print("\n")
22
23async_res = asyncio.run(network_query_engine.aquery("Who is paul"))
24print(async_res)
25