llama-index

Форк
0
1
import logging
2
from abc import ABC, abstractmethod
3
from typing import Any, Dict, List, Optional, Type
4

5
import numpy as np
6

7
from llama_index.legacy.bridge.pydantic import Field
8
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
9
from llama_index.legacy.vector_stores.types import (
10
    VectorStore,
11
    VectorStoreQuery,
12
    VectorStoreQueryResult,
13
)
14
from llama_index.legacy.vector_stores.utils import (
15
    legacy_metadata_dict_to_node,
16
    metadata_dict_to_node,
17
    node_to_metadata_dict,
18
)
19

20
logger = logging.getLogger(__name__)
21

22

23
class DocArrayVectorStore(VectorStore, ABC):
24
    """DocArray Vector Store Base Class.
25

26

27
    This is an abstract base class for creating a DocArray vector store.
28
    The subclasses should implement _init_index and _find_docs_to_be_removed methods.
29
    """
30

31
    # for mypy. will get initialized by the subclass.
32
    _index: Any
33
    _schema: Any
34
    _ref_docs: Dict[str, List[str]]
35

36
    stores_text: bool = True
37
    flat_metadata: bool = False
38

39
    def _update_ref_docs(self, docs) -> None:  # type: ignore[no-untyped-def]
40
        pass
41

42
    @abstractmethod
43
    def _init_index(self, **kwargs: Any):  # type: ignore[no-untyped-def]
44
        """Initializes the index.
45

46
        This method should be overridden by the subclasses.
47
        """
48

49
    @abstractmethod
50
    def _find_docs_to_be_removed(self, doc_id: str) -> List[str]:
51
        """Finds the documents to be removed from the vector store.
52

53
        Args:
54
            doc_id (str): Document ID that should be removed.
55

56
        Returns:
57
            List[str]: List of document IDs to be removed.
58

59
        This is an abstract method and needs to be implemented in any concrete subclass.
60
        """
61

62
    @property
63
    def client(self) -> Any:
64
        """Get client."""
65
        return None
66

67
    def num_docs(self) -> int:
68
        """Retrieves the number of documents in the index.
69

70
        Returns:
71
            int: The number of documents in the index.
72
        """
73
        return self._index.num_docs()
74

75
    @staticmethod
76
    def _get_schema(**embeddings_params: Any) -> Type:
77
        """Fetches the schema for DocArray indices.
78

79
        Args:
80
            **embeddings_params: Variable length argument list for the embedding.
81

82
        Returns:
83
            DocArraySchema: Schema for a DocArray index.
84
        """
85
        from docarray import BaseDoc
86
        from docarray.typing import ID, NdArray
87

88
        class DocArraySchema(BaseDoc):
89
            id: Optional[ID] = None
90
            text: Optional[str] = None
91
            metadata: Optional[dict] = None
92
            embedding: NdArray = Field(**embeddings_params)
93

94
        return DocArraySchema
95

96
    def add(
97
        self,
98
        nodes: List[BaseNode],
99
        **add_kwargs: Any,
100
    ) -> List[str]:
101
        """Adds nodes to the vector store.
102

103
        Args:
104
            nodes (List[BaseNode]): List of nodes with embeddings.
105

106
        Returns:
107
            List[str]: List of document IDs added to the vector store.
108
        """
109
        from docarray import DocList
110

111
        # check to see if empty document list was passed
112
        if len(nodes) == 0:
113
            return []
114

115
        docs = DocList[self._schema](  # type: ignore[name-defined]
116
            self._schema(
117
                id=node.node_id,
118
                metadata=node_to_metadata_dict(node, flat_metadata=self.flat_metadata),
119
                text=node.get_content(metadata_mode=MetadataMode.NONE),
120
                embedding=node.get_embedding(),
121
            )
122
            for node in nodes
123
        )
124
        self._index.index(docs)
125
        logger.info(f"Successfully added {len(docs)} documents to the index")
126
        if self._ref_docs is not None:
127
            self._update_ref_docs(docs)
128
        return [doc.id for doc in docs]
129

130
    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
131
        """Deletes a document from the vector store.
132

133
        Args:
134
            ref_doc_id (str): Document ID to be deleted.
135
            **delete_kwargs (Any): Additional arguments to pass to the delete method.
136
        """
137
        docs_to_be_removed = self._find_docs_to_be_removed(ref_doc_id)
138
        if not docs_to_be_removed:
139
            logger.warning(f"Document with doc_id {ref_doc_id} not found")
140
            return
141

142
        del self._index[docs_to_be_removed]
143
        logger.info(f"Deleted {len(docs_to_be_removed)} documents from the index")
144

145
    def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
146
        """Queries the vector store and retrieves the results.
147

148
        Args:
149
            query (VectorStoreQuery): Query for the vector store.
150

151
        Returns:
152
            VectorStoreQueryResult: Result of the query from vector store.
153
        """
154
        if query.filters:
155
            # only for ExactMatchFilters
156
            filter_query = {
157
                "metadata__" + filter.key: {"$eq": filter.value}
158
                for filter in query.filters.legacy_filters()
159
            }
160
            query = (
161
                self._index.build_query()  # get empty query object
162
                .find(
163
                    query=self._schema(embedding=np.array(query.query_embedding)),
164
                    search_field="embedding",
165
                    limit=query.similarity_top_k,
166
                )  # add vector similarity search
167
                .filter(filter_query=filter_query)  # add filter search
168
                .build()  # build the query
169
            )
170

171
            # execute the combined query and return the results
172
            docs, scores = self._index.execute_query(query)
173
        else:
174
            docs, scores = self._index.find(
175
                query=self._schema(embedding=np.array(query.query_embedding)),
176
                search_field="embedding",
177
                limit=query.similarity_top_k,
178
            )
179
        nodes, ids = [], []
180
        for doc in docs:
181
            try:
182
                node = metadata_dict_to_node(doc.metadata)
183
                node.text = doc.text
184
            except Exception:
185
                # TODO: legacy metadata support
186
                metadata, node_info, relationships = legacy_metadata_dict_to_node(
187
                    doc.metadata
188
                )
189
                node = TextNode(
190
                    id_=doc.id,
191
                    text=doc.text,
192
                    metadata=metadata,
193
                    start_char_idx=node_info.get("start", None),
194
                    end_char_idx=node_info.get("end", None),
195
                    relationships=relationships,
196
                )
197

198
            nodes.append(node)
199
            ids.append(doc.id)
200
        logger.info(f"Found {len(nodes)} results for the query")
201

202
        return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores)
203

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.