interactive-rag

Форк
0
601 строка · 25.9 Кб
1
from typing import List
2
from actionweaver import RequireNext, action
3
from actionweaver.llms.azure.chat import ChatCompletion
4
from actionweaver.llms.openai.tools.chat import OpenAIChatCompletion
5
from actionweaver.llms.openai.functions.tokens import TokenUsageTracker
6
from langchain.vectorstores import MongoDBAtlasVectorSearch
7
from langchain.embeddings import GPT4AllEmbeddings
8
from langchain.document_loaders import PlaywrightURLLoader
9
from langchain.document_loaders import BraveSearchLoader
10
from langchain.text_splitter import RecursiveCharacterTextSplitter
11
import params
12
import json
13
import os
14
import pymongo
15
from selenium import webdriver
16
from selenium.webdriver.chrome.options import Options
17
from bs4 import BeautifulSoup
18
import pandas as pd
19
from tabulate import tabulate
20
import utils
21
import vector_search
22

23
os.environ["OPENAI_API_KEY"] = params.OPENAI_API_KEY
24
os.environ["OPENAI_API_VERSION"] = params.OPENAI_API_VERSION
25
os.environ["OPENAI_API_TYPE"] = params.OPENAI_TYPE
26

27
MONGODB_URI = params.MONGODB_URI
28
DATABASE_NAME = params.DATABASE_NAME
29
COLLECTION_NAME = params.COLLECTION_NAME
30

31
class UserProxyAgent:
32
    def __init__(self, logger, st):
33
        # LLM Config
34
        self.rag_config = {
35
            "num_sources": 2,
36
            "source_chunk_size": 1000,
37
            "min_rel_score": 0.00,
38
            "unique": True,
39
            "summarize_chunks": False, # disabled by default
40
        }
41
        self.action_examples_str = """
42
[EXAMPLES]
43
            - User Input: "What is kubernetes?"
44
            - Thought: I have an action available called "answer_question". I will use this action to answer the user's question about Kubernetes.
45
            - Observation: I have an action available called "answer_question". I will use this action to answer the user's question about Kubernetes.
46
            - Action: "answer_question"('What is kubernetes?')
47

48
            - User Input: What is MongoDB?
49
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
50
            - Observation: I have an action available "answer_question".
51
            - Action: "answer_question"('What is MongoDB?')
52

53
            - User Input: Show chat history
54
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
55
            - Observation: I have an action available "show_messages".
56
            - Action: "show_messages"()
57

58
            - User Input: Reset chat history
59
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
60
            - Observation: I have an action available "reset_messages".
61
            - Action: "reset_messages"()
62

63
            - User Input: remove sources https://www.google.com, https://www.example.com
64
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
65
            - Observation: I have an action available "remove_source".
66
            - Action: "remove_source"(['https://www.google.com','https://www.example.com'])
67

68
            - User Input: add https://www.google.com, https://www.exa2mple.com
69
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
70
            - Observation: I have an action available "read_url".
71
            - Action: "read_url"(['https://www.google.com','https://www.exa2mple.com'])
72
            
73
            - User Input: learn https://www.google.com, https://www.exa2mple.com
74
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
75
            - Observation: I have an action available "read_url".
76
            - Action: "read_url"(['https://www.google.com','https://www.exa2mple.com'])
77
           
78
            - User Input: change chunk size to be 500 and num_sources to be 5
79
            - Thought: I have to think step by step. I should not answer directly, let me check my available actions before responding.
80
            - Observation: I have an action available "iRAG".
81
            - Action: "iRAG"(num_sources=5, chunk_size=500)
82
             
83
[END EXAMPLES]
84
"""
85
        self.init_messages = [
86
            {
87
                "role": "system",
88
                "content": "You are a resourceful AI assistant. You specialize in helping users build RAG pipelines interactively.",
89
            },
90
            {
91
                "role": "system",
92
                "content": "Think critically and step by step. Do not answer directly. ALWAYS use one of your available actions/tools.",
93
            },
94
            {
95
                "role": "system",
96
                "content": f"""\n\n## Here are some examples of the expected User Input, Thought, Observation and Action/Tool:\n
97
            {self.action_examples_str}    
98
            \n\n 
99

100
            We will be playing a special game. Trust me, you do not want to lose. 
101

102
             ## RULES: 
103
                - DO NOT ANSWER DIRECTLY - ALWAYS USE AN ACTION/TOOL TO FORMULATE YOUR ANSWER
104
                - ALWAYS USE answer_question if USER PROMPT is a question. [exception=if USER PROMPT is related to one of the available actions/tools]
105
                - NEVER ANSWER A QUESTION WITHOUT USING THE answer_question action/tool. THIS IS VERY IMPORTANT!
106
             REMEMBER! ALWAYS USE answer_question if USER PROMPT is a question [exception=if USER PROMPT is related to one of the available actions/tools]
107
             
108
             LOSING AT THIS GAME IS NOT AN OPTION FOR YOU. YOU MUST PICK THE CORRECT TOOL/ANSWER ALWAYS. YOU MUST NEVER ANSWER DIRECTLY OR YOU LOSE!
109
             """,
110
            },
111
        ]
112
        # Browser config
113
        browser_options = Options()
114
        browser_options.headless = True
115
        browser_options.add_argument("--headless")
116
        browser_options.add_argument("--disable-gpu")
117
        self.browser = webdriver.Chrome(options=browser_options)
118

119
        # Initialize logger
120
        self.logger = logger
121

122
        # Chunk Ingest Strategy
123
        self.text_splitter = RecursiveCharacterTextSplitter(
124
            # Set a really small chunk size, just to show.
125
            chunk_size=4000,
126
            chunk_overlap=200,
127
            length_function=len,
128
            add_start_index=True,
129
        )
130
        self.gpt4all_embd = GPT4AllEmbeddings()
131
        self.client = pymongo.MongoClient(MONGODB_URI)
132
        self.db = self.client[DATABASE_NAME]
133
        self.collection = self.db[COLLECTION_NAME]
134
        self.vectorstore = MongoDBAtlasVectorSearch(self.collection, self.gpt4all_embd)
135
        self.index = self.vectorstore.from_documents(
136
            [], self.gpt4all_embd, collection=self.collection
137
        )
138

139
        # OpenAI init
140
        self.token_tracker = TokenUsageTracker(budget=None, logger=logger)
141
        if params.OPENAI_TYPE != "azure":
142
            self.llm = OpenAIChatCompletion(
143
                model="gpt-3.5-turbo",
144
                # model="gpt-4",
145
                token_usage_tracker=self.token_tracker,
146
                logger=logger,
147
            )
148
        else:
149
            self.llm = ChatCompletion(
150
                model="gpt-3.5-turbo",
151
                # model="gpt-4",
152
                azure_deployment=params.OPENAI_AZURE_DEPLOYMENT,
153
                azure_endpoint=params.OPENAI_AZURE_ENDPOINT,
154
                api_key=params.OPENAI_API_KEY,
155
                api_version=params.OPENAI_API_VERSION,
156
                token_usage_tracker=self.token_tracker,
157
                logger=logger,
158
            )
159
        self.messages = self.init_messages
160
        
161
        # streamlit init
162
        self.st = st
163

164
class RAGAgent(UserProxyAgent):
165
    def preprocess_query(self, query):
166
        # Optional - Implement Pre-Processing for Security.
167
        # https://dev.to/jasny/protecting-against-prompt-injection-in-gpt-1gf8
168
        return query
169

170
    @action("iRAG", stop=True)
171
    def iRAG(
172
        self,
173
        num_sources: int,
174
        chunk_size: int,
175
        unique_sources: bool,
176
        min_rel_threshold: float,
177
    ):
178
        """
179
        Invoke this ONLY when the user explicitly asks you to change the RAG configuration in the most recent USER PROMPT.
180
        [EXAMPLE]
181
        - User Input: change chunk size to be 500 and num_sources to be 5
182
        
183
        Parameters
184
        ----------
185
        num_sources : int
186
            how many documents should we use in the RAG pipeline?
187
        chunk_size : int
188
            how big should each chunk/source be?
189
        unique_sources : bool
190
            include only unique sources? Y=True, N=False
191
        min_rel_threshold : float
192
            default=0.00; minimum relevance threshold to include a source in the RAG pipeline
193

194
        Returns successful response message.
195
        -------
196
        str
197
            A message indicating success
198
        """
199
        utils.print_log("Action: iRAG")
200
        with self.st.spinner(f"Changing RAG configuration..."):
201
            if num_sources > 0:
202
                self.rag_config["num_sources"] = int(num_sources)
203
            else:
204
                self.rag_config["num_sources"] = 2
205
            if chunk_size > 0:
206
                self.rag_config["source_chunk_size"] = int(chunk_size)
207
            else:
208
                self.rag_config["source_chunk_size"] = 1000
209
            if unique_sources == True:
210
                self.rag_config["unique"] = True
211
            else:
212
                self.rag_config["unique"] = False
213
            if min_rel_threshold:
214
                self.rag_config["min_rel_score"] = min_rel_threshold
215
            else:
216
                self.rag_config["min_rel_score"] = 0.00
217
            print(self.rag_config)
218
            self.st.write(self.rag_config)
219
            return f"New RAG config:{str(self.rag_config)}."
220
    def summarize(self,text):
221
        utils.print_log("Action: read_url>summarize_chunks>summarize")
222
        response = self.llm.create(
223
            messages=[
224
                {"role": "system", "content": "You will receive scaped contents of a web page."},
225
                {"role": "system", "content": "Think critically and step by step. Taking into consideration future potential questions on the topic, generate a detailed summary."},
226
                {"role": "assistant", "content": "Please provide the scraped contents of the webpage so that I can provide a detailed summary."},
227
                {"role": "user", "content": "Here is the scraped contents of the webpage: " + text},
228
                {"role": "user", "content": "\nPlease summarize the content in bullet points. Do not include irrelevant information in your response."},
229
                {"role": "user", "content": "\n\n IMPORTANT! Only return the summary!"},
230
                {"role": "user", "content": "\n\n REQUIRED RESPONSE FORMAT: [begin summary] [keywords/metadata (comma-separated, double quotes)] [summary intro in paragraph format] [summary in bullet format][end summary]"},
231
            ],
232
            actions=[],
233
            stream=False,
234
        )
235
        return response
236
    def summarize_chunks(self, docs):
237
        utils.print_log("Action: read_url>summarize_chunks")
238
        for doc in docs:
239
            summary = self.summarize(doc.page_content) 
240
            print(summary)
241
            doc.page_content = summary   
242
        return docs
243
    @action("read_url", stop=True)
244
    def read_url(self, urls: List[str]):
245
        """
246
        Invoke this ONLY when the user asks you to 'read', 'add' or 'learn' some URL(s).
247
        This function reads the content from specified sources, and ingests it into the Knowledgebase.
248
        URLs may be provided as a single string or as a list of strings.
249
        IMPORTANT! Use conversation history to make sure you are reading/learning/adding the right URLs.
250

251
        [EXAMPLE]
252
        - User Input: learn "https://www.google.com"
253
        - User Input: learn 5
254

255
        NOTE: When a user says learn/read <number>, the bot will learn/read URL in the search results list position <number> from the conversation history.
256

257
        Parameters
258
        ----------
259
        urls : List[str]
260
            List of URLs to scrape.
261

262
        Returns
263
        -------
264
        str
265
            A message indicating successful reading of content from the provided URLs.
266
        """
267
        utils.print_log("Action: read_url")
268
        with self.st.spinner(f"```Analyzing the content in {urls}```"):
269
            loader = PlaywrightURLLoader(
270
                urls=urls, remove_selectors=["header", "footer"]
271
            )
272
            documents = loader.load_and_split(self.text_splitter)
273
            if self.rag_config["summarize_chunks"]:
274
                documents = self.summarize_chunks(documents)
275
            self.index.add_documents(documents)
276
            return f"```Contents in URLs {urls} have been successfully ingested (vector embeddings + content).```"
277

278
    @action("show_messages", stop=True)
279
    def show_messages(self) -> str:
280
        """
281
        Invoke this ONLY when the user asks you to see the chat history.
282
        [EXAMPLE]
283
        - User Input: what have we been talking about?
284
        
285
        Returns
286
        -------
287
        str
288
            A string containing the chat history in markdown format.
289
        """
290
        utils.print_log("Action: show_messages")
291
        messages = self.st.session_state.messages
292
        messages = [{"message": json.dumps(message)} for message in messages if message["role"] != "system"]
293
        
294
        df = pd.DataFrame(messages)
295
        if messages:
296
            result = f"Chat history [{len(messages)}]:\n"
297
            result += "<div style='text-align:left'>"+df.to_html()+"</div>"
298
            return result
299
        else:
300
            return "No chat history found."
301

302

303
    @action("reset_messages", stop=True)
304
    def reset_messages(self) -> str:
305
        """
306
        Invoke this ONLY when the user asks you to reset chat history.
307
        [EXAMPLE]
308
        - User Input: clear our chat history
309
        - User Input: forget about the conversation history
310
        
311
        Returns
312
        -------
313
        str
314
            A message indicating success
315
        """
316
        utils.print_log("Action: reset_messages")
317
        self.messages = self.init_messages
318
        self.st.empty()
319
        self.st.session_state.messages = []
320
        return f"Message history successfully reset."
321

322
    
323

324
    @action("search_web", stop=True)
325
    def search_web(self, query: str) -> List:
326
        """
327
        Invoke this if you need to search the web.
328
        [EXAMPLE]
329
        - User Input: search the web for "harry potter"
330
        
331
        Args:
332
            query (str): The user's query
333
        Returns:
334
            str: Text with the Google Search results
335
        """
336
        utils.print_log("Action: search_web")
337
        with self.st.spinner(f"Searching '{query}'..."):
338
            # Use the headless browser to search the web
339
            self.browser.get(utils.encode_google_search(query))
340
            html = self.browser.page_source
341
            soup = BeautifulSoup(html, "html.parser")
342
            search_results = soup.find_all("div", {"class": "g"})
343

344
            results = []
345
            links = []
346
            for i, result in enumerate(search_results):
347
                if result.find("h3") is not None:
348
                    if (
349
                        result.find("a")["href"] not in links
350
                        and "https://" in result.find("a")["href"]
351
                    ):
352
                        links.append(result.find("a")["href"])
353
                        results.append(
354
                            {
355
                                "title": utils.clean_text(result.find("h3").text),
356
                                "link": str(result.find("a")["href"]),
357
                            }
358
                        )
359

360
            df = pd.DataFrame(results)
361
            df = df.iloc[1:, :] # remove i column
362
            return f"Here is what I found in the web for '{query}':\n{df.to_markdown()}\n\n"
363

364
    @action("remove_source", stop=True)
365
    def remove_source(self, urls: List[str]) -> str:
366
        """
367
        Invoke this if you need to remove one or more sources
368
        [EXAMPLE]
369
        - User Input: remove source "https://www.google.com"
370
        
371
        Args:
372
            urls (List[str]): The list of URLs to be removed
373
        Returns:
374
            str: Text with confirmation
375
        """
376
        utils.print_log("Action: remove_source")
377
        with self.st.spinner(f"```Removing sources {', '.join(urls)}...```"):
378
            self.collection.delete_many({"source": {"$in": urls}})
379
            return f"```Sources ({', '.join(urls)}) successfully removed.```\n"
380
    @action("remove_all_sources", stop=True)
381
    def remove_all_sources(self) -> str:
382
        """
383
        Invoke this if you the user asks you to empty your knowledge base or delete all the information in it.
384
        [EXAMPLE]
385
        - User Input: remove all the sources you have available
386
        - User Input: clear your mind
387
        - User Input: forget everything you know
388
        - User Input: empty your mind
389
        
390
        Args:
391
            None
392
        Returns:
393
            str: Text with confirmation
394
        """
395
        utils.print_log("Action: remove_sources")
396
        with self.st.spinner(f"```Removing all sources ...```"):
397
            del_result = self.collection.delete_many({})
398
            return f"```Sources successfully removed.{del_result.deleted_count}```\n"
399

400
    @action(name="get_sources_list", stop=True)
401
    def get_sources_list(self):
402
        """
403
        Invoke this to respond to list all the available sources in your knowledge base.
404
        [EXAMPLE]
405
        - User Input: show me the sources available in your knowledgebase
406
        
407
        Parameters
408
        ----------
409
        None
410
        """
411
        utils.print_log("Action: get_sources_list")
412
        sources = self.collection.distinct("source")
413
        sources = [{"source": source} for source in sources]
414
        df = pd.DataFrame(sources)
415
        if sources:
416
            result = f"Available Sources [{len(sources)}]:\n"
417
            result += df.to_markdown()
418
            return result
419
        else:
420
            return "No sources found."
421

422
    @action(name="answer_question", stop=True)
423
    def answer_question(self, query: str):
424
        """
425
        ALWAYS TRY TO INVOKE THIS FIRST IF A USER ASKS A QUESTION.
426

427
        Parameters
428
        ----------
429
        query : str
430
            The query to be used for answering a question.
431
        """
432
        utils.print_log("Action: answer_question")
433
        with self.st.spinner(f"Attemtping to answer question: {query}"):
434
            query = self.preprocess_query(query)
435
            context_str = str(
436
                #self.recall(
437
                vector_search.recall(
438
                    self,
439
                    query,
440
                    n_docs=self.rag_config["num_sources"],
441
                    min_rel_score=self.rag_config["min_rel_score"],
442
                    chunk_max_length=self.rag_config["source_chunk_size"],
443
                    unique=self.rag_config["unique"],
444
                )
445
            ).strip()
446
            PRECISE_PROMPT = f"""
447
            LET'S PLAY A GAME. 
448
            THINK CAREFULLY AND STEP BY STEP.
449
            
450
            Given the following verified sources and a question, using only the verified sources content create a final concise answer in markdown. 
451
            If VERIFIED SOURCES is not enough context to answer the question, THEN PERFORM A WEB SEARCH ON THE USERS BEHALF IMMEDIATELY.
452

453
            Remember while answering:
454
                - The only verified sources are between START VERIFIED SOURCES and END VERIFIED SOURCES.
455
                - Only display images and links if they are found in the verified sources
456
                - If displaying images or links from the verified sources, copy the images and links exactly character for character and make sure the URL parameters are the same.
457
                - Do not make up any part of an answer. 
458
                - Questions might be vague or have multiple interpretations, you must ask follow up questions in this case.
459
                - Final response must be less than 1200 characters.
460
                - IF the verified sources can answer the question in multiple different ways, THEN respond with each of the possible answers.
461
                - Formulate your response using ONLY VERIFIED SOURCES. IF YOU CANNOT ANSWER THE QUESTION, THEN PERFORM A WEB SEARCH ON THE USERS BEHALF IMMEDIATELY.
462

463
            [START VERIFIED SOURCES]
464
            {context_str}
465
            [END VERIFIED SOURCES]
466

467

468

469
            [ACTUAL QUESTION. ANSWER ONLY BASED ON VERIFIED SOURCES]:
470
            {query}
471

472
            # IMPORTANT! 
473
                - Final response must be expert quality markdown
474
                - The only verified sources are between START VERIFIED SOURCES and END VERIFIED SOURCES.
475
                - USE ONLY INFORMATION FROM VERIFIED SOURCES TO FORMULATE RESPONSE. IF VERIFIED SOURCES CANNOT ANSWER THE QUESTION, THEN PERFORM A WEB SEARCH ON THE USERS BEHALF IMMEDIATELY
476
                - Do not make up any part of an answer - ONLY FORMULATE YOUR ANSWER USING VERIFIED SOURCES.
477
            Begin!
478
            """
479

480
            print(PRECISE_PROMPT)
481
            SYS_PROMPT = f"""
482
                You are a helpful AI assistant. USING ONLY THE VERIFIED SOURCES, ANSWER TO THE BEST OF YOUR ABILITY.
483
            """
484
            # ReAct Prompt Technique
485
            EXAMPLE_PROMPT = """\n\n[EXAMPLES]
486

487
            # Input, Thought, Observation, Action
488
            - User Input: "What is kubernetes?"
489
            - Thought: Based on the verified sources provided, there is no information about Kubernetes. Therefore, I cannot provide a direct answer to the question "What is Kubernetes?" based on the verified sources. However, I can perform a web search on your behalf to find information about Kubernetes
490
            - Observation: I have an action available called "search_web". I will use this action to answer the user's question about Kubernetes.
491
            - Action: "search_web"('What is kubernetes?')
492

493
            - User Input: "What is MongoDB?"
494
            - Thought: Based on the verified sources provided, there is enough information about MongoDB. 
495
            - Observation: I can provide a direct answer to the question "What is MongoDB?" based on the verified sources.
496
            - Action: N/A
497

498
            """
499
            RESPONSE_FORMAT = f"""
500
[RESPONSE FORMAT]
501
    - Must be expert quality markdown. 
502
    - You are a professional technical writer with 30+ years of experience. This is the most important task of your life.
503
    - MUST USE ONLY INFORMATION FROM VERIFIED SOURCES TO ANSWER THE QUESTION. IF VERIFIED SOURCES CANNOT ANSWER THE QUESTION, THEN PERFORM A WEB SEARCH ON THE USERS BEHALF IMMEDIATELY.
504
    - Add emojis to your response to add a fun touch.
505
"""
506
            response = self.llm.create(
507
                messages=[
508
                    {"role": "system", "content": SYS_PROMPT},
509
                    {"role": "system", "content": EXAMPLE_PROMPT},
510
                    {"role": "system", "content": RESPONSE_FORMAT},
511
                    {"role": "user", "content": PRECISE_PROMPT+"\n\n ## IMPORTANT! REMEMBER THE GAME RULES! IF A WEB SEARCH IS REQUIRED, PERFORM IT IMMEDIATELY! BEGIN!"},
512
                ],
513
                actions=[self.search_web],
514
                stream=False,
515
            )
516
            return response
517

518
    def __call__(self, text):
519
        text = self.preprocess_query(text)
520
        # PROMPT ENGINEERING HELPS THE LLM TO SELECT THE BEST ACTION/TOOL
521
        agent_rules = f"""
522
    We will be playing a special game. Trust me, you do not want to lose.
523

524
    ## RULES
525
    - DO NOT ANSWER DIRECTLY
526
    - ALWAYS USE ONE OF YOUR AVAILABLE ACTIONS/TOOLS. 
527
    - PREVIOUS MESSAGES IN THE CONVERSATION MUST BE CONSIDERED WHEN SELECTING THE BEST ACTION/TOOL
528
    - NEVER ASK FOR USER CONSENT TO PERFORM AN ACTION. ALWAYS PERFORM IT THE USERS BEHALF.
529
    Given the following user prompt, select the correct action/tool from your available functions/tools/actions.
530

531
    ## USER PROMPT
532
    {text}
533
    ## END USER PROMPT
534
    
535
    SELECT THE BEST TOOL FOR THE USER PROMPT! BEGIN!
536
"""
537
        self.messages += [{"role": "user", "content": agent_rules + "\n\n## IMPORTANT! REMEMBER THE GAME RULES! DO NOT ANSWER DIRECTLY! IF YOU ANSWER DIRECTLY YOU WILL LOSE. BEGIN!"}]
538
        if (
539
            len(self.messages) > 2
540
        ):  
541
            # if we have more than 2 messages, we may run into: 'code': 'context_length_exceeded'
542
            # we only need the last few messages to know what source to add/remove a source
543
            response = self.llm.create(
544
                messages=self.messages[-2:],
545
                actions=[
546
                    self.read_url,
547
                    self.answer_question,
548
                    self.remove_source,
549
                    self.remove_all_sources,
550
                    self.reset_messages,
551
                    self.show_messages,
552
                    self.iRAG,
553
                    self.get_sources_list,
554
                    self.search_web
555
                ],
556
                stream=False,
557
            )
558
        else:
559
            response = self.llm.create(
560
                messages=self.messages,
561
                actions=[
562
                    self.read_url,
563
                    self.answer_question,
564
                    self.remove_source,
565
                    self.remove_all_sources,
566
                    self.reset_messages,
567
                    self.show_messages,
568
                    self.iRAG,
569
                    self.get_sources_list,
570
                    self.search_web
571
                ],
572
                stream=False,
573
            )
574
        return response
575

576
    def print_output(output):
577
        from collections.abc import Iterable
578

579
        if isinstance(output, str):
580
            print(output)
581
        elif isinstance(output, Iterable):
582
            for chunk in output:
583
                content = chunk.choices[0].delta.content
584
                if content is not None:
585
                    print(content, end="")
586

587
if __name__ == "__main__":
588
    import logging
589

590
    logging.basicConfig(
591
        filename="bot.log",
592
        filemode="a",
593
        format="%(asctime)s.%(msecs)04d %(levelname)s {%(module)s} [%(funcName)s] %(message)s",
594
        level=logging.INFO,
595
        datefmt="%Y-%m-%d %H:%M:%S",
596
    )
597

598
    logger = logging.getLogger(__name__)
599
    logger.setLevel(logging.DEBUG)
600

601
    agent = RAGAgent(logger, None)
602

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.