llama-index

Форк
0
171 строка · 4.4 Кб
1
"""Init file of LlamaIndex."""
2

3
from pathlib import Path
4

5
with open(Path(__file__).absolute().parents[0] / "VERSION") as _f:
6
    __version__ = _f.read().strip()
7

8

9
import logging
10
from logging import NullHandler
11
from typing import Callable, Optional
12

13
# import global eval handler
14
from llama_index.legacy.callbacks.global_handlers import set_global_handler
15

16
# response
17
from llama_index.legacy.core.response.schema import Response
18
from llama_index.legacy.data_structs.struct_type import IndexStructType
19

20
# embeddings
21
from llama_index.legacy.embeddings import OpenAIEmbedding
22

23
# indices
24
# loading
25
from llama_index.legacy.indices import (
26
    ComposableGraph,
27
    DocumentSummaryIndex,
28
    GPTDocumentSummaryIndex,
29
    GPTKeywordTableIndex,
30
    GPTKnowledgeGraphIndex,
31
    GPTListIndex,
32
    GPTRAKEKeywordTableIndex,
33
    GPTSimpleKeywordTableIndex,
34
    GPTTreeIndex,
35
    GPTVectorStoreIndex,
36
    KeywordTableIndex,
37
    KnowledgeGraphIndex,
38
    ListIndex,
39
    RAKEKeywordTableIndex,
40
    SimpleKeywordTableIndex,
41
    SummaryIndex,
42
    TreeIndex,
43
    VectorStoreIndex,
44
    load_graph_from_storage,
45
    load_index_from_storage,
46
    load_indices_from_storage,
47
)
48

49
# structured
50
from llama_index.legacy.indices.common.struct_store.base import (
51
    SQLDocumentContextBuilder,
52
)
53

54
# prompt helper
55
from llama_index.legacy.indices.prompt_helper import PromptHelper
56
from llama_index.legacy.llm_predictor import LLMPredictor
57

58
# token predictor
59
from llama_index.legacy.llm_predictor.mock import MockLLMPredictor
60

61
# prompts
62
from llama_index.legacy.prompts import (
63
    BasePromptTemplate,
64
    ChatPromptTemplate,
65
    # backwards compatibility
66
    Prompt,
67
    PromptTemplate,
68
    SelectorPromptTemplate,
69
)
70
from llama_index.legacy.readers import (
71
    SimpleDirectoryReader,
72
    download_loader,
73
)
74

75
# Response Synthesizer
76
from llama_index.legacy.response_synthesizers.factory import get_response_synthesizer
77
from llama_index.legacy.schema import Document, QueryBundle
78
from llama_index.legacy.service_context import (
79
    ServiceContext,
80
    set_global_service_context,
81
)
82

83
# storage
84
from llama_index.legacy.storage.storage_context import StorageContext
85
from llama_index.legacy.token_counter.mock_embed_model import MockEmbedding
86

87
# sql wrapper
88
from llama_index.legacy.utilities.sql_wrapper import SQLDatabase
89

90
# global tokenizer
91
from llama_index.legacy.utils import get_tokenizer, set_global_tokenizer
92

93
# best practices for library logging:
94
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
95
logging.getLogger(__name__).addHandler(NullHandler())
96

97
__all__ = [
98
    "StorageContext",
99
    "ServiceContext",
100
    "ComposableGraph",
101
    # indices
102
    "SummaryIndex",
103
    "VectorStoreIndex",
104
    "SimpleKeywordTableIndex",
105
    "KeywordTableIndex",
106
    "RAKEKeywordTableIndex",
107
    "TreeIndex",
108
    "DocumentSummaryIndex",
109
    "KnowledgeGraphIndex",
110
    # indices - legacy names
111
    "GPTKeywordTableIndex",
112
    "GPTKnowledgeGraphIndex",
113
    "GPTSimpleKeywordTableIndex",
114
    "GPTRAKEKeywordTableIndex",
115
    "GPTListIndex",
116
    "ListIndex",
117
    "GPTTreeIndex",
118
    "GPTVectorStoreIndex",
119
    "GPTDocumentSummaryIndex",
120
    "Prompt",
121
    "PromptTemplate",
122
    "BasePromptTemplate",
123
    "ChatPromptTemplate",
124
    "SelectorPromptTemplate",
125
    "OpenAIEmbedding",
126
    "SummaryPrompt",
127
    "TreeInsertPrompt",
128
    "TreeSelectPrompt",
129
    "TreeSelectMultiplePrompt",
130
    "RefinePrompt",
131
    "QuestionAnswerPrompt",
132
    "KeywordExtractPrompt",
133
    "QueryKeywordExtractPrompt",
134
    "Response",
135
    "Document",
136
    "SimpleDirectoryReader",
137
    "LLMPredictor",
138
    "MockLLMPredictor",
139
    "VellumPredictor",
140
    "VellumPromptRegistry",
141
    "MockEmbedding",
142
    "SQLDatabase",
143
    "SQLDocumentContextBuilder",
144
    "SQLContextBuilder",
145
    "PromptHelper",
146
    "IndexStructType",
147
    "download_loader",
148
    "load_graph_from_storage",
149
    "load_index_from_storage",
150
    "load_indices_from_storage",
151
    "QueryBundle",
152
    "get_response_synthesizer",
153
    "set_global_service_context",
154
    "set_global_handler",
155
    "set_global_tokenizer",
156
    "get_tokenizer",
157
]
158

159
# eval global toggle
160
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
161

162
global_handler: Optional[BaseCallbackHandler] = None
163

164
# NOTE: keep for backwards compatibility
165
SQLContextBuilder = SQLDocumentContextBuilder
166

167
# global service context for ServiceContext.from_defaults()
168
global_service_context: Optional[ServiceContext] = None
169

170
# global tokenizer
171
global_tokenizer: Optional[Callable[[str], list]] = None
172

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.