local-llm-with-rag

Форк
0
122 строки · 3.4 Кб
1
from langchain_community.document_loaders import DirectoryLoader, PyPDFLoader
2
from langchain_community.llms import Ollama
3
from langchain_community.embeddings import OllamaEmbeddings
4
from langchain.text_splitter import RecursiveCharacterTextSplitter
5
from langchain_community.vectorstores import Chroma
6
from langchain.chains import RetrievalQA
7
from langchain.prompts import PromptTemplate
8
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
9
from models import check_if_model_is_available
10
from document_loader import load_documents
11
import argparse
12
import sys
13

14

15
TEXT_SPLITTER = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
16

17

18
PROMPT_TEMPLATE = """
19
### Instruction:
20
You're helpful assistant, who answers questions based upon provided research in a distinct and clear way.
21

22
## Research:
23
{context}
24

25
## Question:
26
{question}
27
"""
28

29

30
PROMPT = PromptTemplate(
31
    template=PROMPT_TEMPLATE, input_variables=["context", "question"]
32
)
33

34

35
def load_documents_into_database(model_name: str, documents_path: str) -> Chroma:
36
    """
37
    Loads documents from the specified directory into the Chroma database
38
    after splitting the text into chunks.
39

40
    Returns:
41
        Chroma: The Chroma database with loaded documents.
42
    """
43

44
    print("Loading documents")
45
    raw_documents = load_documents(documents_path)
46
    documents = TEXT_SPLITTER.split_documents(raw_documents)
47

48
    print("Creating embeddings and loading documents into Chroma")
49
    db = Chroma.from_documents(
50
        documents,
51
        OllamaEmbeddings(model=model_name),
52
    )
53
    return db
54

55

56
def main(llm_model_name: str, embedding_model_name: str, documents_path: str) -> None:
57
    # Check to see if the models available, if not attempt to pull them
58
    try:
59
        check_if_model_is_available(llm_model_name)
60
        check_if_model_is_available(embedding_model_name)
61
    except Exception as e:
62
        print(e)
63
        sys.exit()
64

65
    # Creating database form documents
66
    try:
67
        db = load_documents_into_database(embedding_model_name, documents_path)
68
    except FileNotFoundError as e:
69
        print(e)
70
        sys.exit()
71

72
    llm = Ollama(
73
        model=llm_model_name,
74
        callbacks=[StreamingStdOutCallbackHandler()],
75
    )
76

77
    qa_chain = RetrievalQA.from_chain_type(
78
        llm,
79
        retriever=db.as_retriever(search_kwargs={"k": 8}),
80
        chain_type_kwargs={"prompt": PROMPT},
81
    )
82

83
    while True:
84
        try:
85
            user_input = input(
86
                "\n\nPlease enter your question (or type 'exit' to end): "
87
            )
88
            if user_input.lower() == "exit":
89
                break
90

91
            docs = db.similarity_search(user_input)
92
            qa_chain.invoke({"query": user_input})
93
        except KeyboardInterrupt:
94
            break
95

96

97
def parse_arguments() -> argparse.Namespace:
98
    parser = argparse.ArgumentParser(description="Run local LLM with RAG with Ollama.")
99
    parser.add_argument(
100
        "-m",
101
        "--model",
102
        default="mistral",
103
        help="The name of the LLM model to use.",
104
    )
105
    parser.add_argument(
106
        "-e",
107
        "--embedding_model",
108
        default="nomic-embed-text",
109
        help="The name of the embedding model to use.",
110
    )
111
    parser.add_argument(
112
        "-p",
113
        "--path",
114
        default="Research",
115
        help="The path to the directory containing documents to load.",
116
    )
117
    return parser.parse_args()
118

119

120
if __name__ == "__main__":
121
    args = parse_arguments()
122
    main(args.model, args.embedding_model, args.path)
123

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.