llama-index

Форк
0
211 строк · 6.7 Кб
1
"""DashVector Vector Store."""
2

3
import logging
4
from typing import Any, List, Optional, cast
5

6
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
7
from llama_index.legacy.vector_stores.types import (
8
    MetadataFilters,
9
    VectorStore,
10
    VectorStoreQuery,
11
    VectorStoreQueryMode,
12
    VectorStoreQueryResult,
13
)
14
from llama_index.legacy.vector_stores.utils import (
15
    DEFAULT_DOC_ID_KEY,
16
    DEFAULT_TEXT_KEY,
17
    legacy_metadata_dict_to_node,
18
    metadata_dict_to_node,
19
    node_to_metadata_dict,
20
)
21

22
DEFAULT_BATCH_SIZE = 100
23
logger = logging.getLogger(__name__)
24

25

26
def _to_dashvector_filter(
27
    standard_filters: Optional[MetadataFilters] = None,
28
) -> Optional[str]:
29
    """Convert from standard filter to dashvector filter dict."""
30
    if standard_filters is None:
31
        return None
32

33
    filters = []
34
    for filter in standard_filters.legacy_filters():
35
        if isinstance(filter.value, str):
36
            value = f"'{filter.value}'"
37
        else:
38
            value = f"{filter.value}"
39
        filters.append(f"{filter.key} = {value}")
40
    return " and ".join(filters)
41

42

43
class DashVectorStore(VectorStore):
44
    """Dash Vector Store.
45

46
    In this vector store, embeddings and docs are stored within a
47
    DashVector collection.
48

49
    During query time, the index uses DashVector to query for the top
50
    k most similar nodes.
51

52
    Args:
53
        collection (Optional[dashvector.Collection]): DashVector collection instance
54
        support_sparse_vector (bool): whether support sparse vector for collection.
55
        encoder (Optional[dashtext.SparseVectorEncoder]): encoder for generating sparse vector from document
56
    """
57

58
    stores_text: bool = True
59
    flat_metadata: bool = True
60

61
    def __init__(
62
        self,
63
        collection: Optional[Any] = None,
64
        support_sparse_vector: bool = False,
65
        encoder: Optional[Any] = None,
66
    ) -> None:
67
        """Initialize params."""
68
        try:
69
            import dashvector
70
        except ImportError:
71
            raise ImportError(
72
                "`dashvector` package not found, please run `pip install dashvector`"
73
            )
74

75
        if support_sparse_vector:
76
            try:
77
                import dashtext
78
            except ImportError:
79
                raise ImportError(
80
                    "`dashtext` package not found, please run `pip install dashtext`"
81
                )
82

83
            if encoder is None:
84
                encoder = dashtext.SparseVectorEncoder.default()
85

86
            self._support_sparse_vector = support_sparse_vector
87
            self._encoder = cast(dashtext.SparseVectorEncoder, encoder)
88

89
        if collection is not None:
90
            self._collection = cast(dashvector.Collection, collection)
91

92
    def add(
93
        self,
94
        nodes: List[BaseNode],
95
        **add_kwargs: Any,
96
    ) -> List[str]:
97
        """Add nodes to vector store.
98

99
        Args:
100
            nodes (List[BaseNode]): list of nodes with embeddings
101
        """
102
        from dashvector import Doc
103

104
        for i in range(0, len(nodes), DEFAULT_BATCH_SIZE):
105
            # batch end
106
            end = min(i + DEFAULT_BATCH_SIZE, len(nodes))
107
            docs = [
108
                Doc(
109
                    id=node.node_id,
110
                    vector=node.embedding,
111
                    sparse_vector=(
112
                        self._encoder.encode_documents(
113
                            node.get_content(metadata_mode=MetadataMode.EMBED)
114
                        )
115
                        if self._support_sparse_vector
116
                        else None
117
                    ),
118
                    fields=node_to_metadata_dict(
119
                        node, remove_text=False, flat_metadata=self.flat_metadata
120
                    ),
121
                )
122
                for node in nodes[i:end]
123
            ]
124

125
            resp = self._collection.upsert(docs)
126
            if not resp:
127
                raise Exception(f"Failed to upsert docs, error: {resp}")
128

129
        return [node.node_id for node in nodes]
130

131
    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
132
        """
133
        Delete nodes using with ref_doc_id.
134

135
        Args:
136
            ref_doc_id (str): The doc_id of the document to delete.
137

138
        """
139
        filter = f"{DEFAULT_DOC_ID_KEY}='{ref_doc_id}'"
140
        resp = self._collection.query(filter=filter)
141
        if not resp:
142
            raise Exception(f"Failed to query doc by {filter}")
143

144
        self._collection.delete(ids=[doc.id for doc in resp])
145

146
    def query(
147
        self,
148
        query: VectorStoreQuery,
149
        **kwargs: Any,
150
    ) -> VectorStoreQueryResult:
151
        """Query vector store."""
152
        query_embedding = (
153
            [float(e) for e in query.query_embedding] if query.query_embedding else []
154
        )
155

156
        sparse_vector = None
157
        topk = query.similarity_top_k
158
        if (
159
            query.mode in (VectorStoreQueryMode.SPARSE, VectorStoreQueryMode.HYBRID)
160
            and self._support_sparse_vector
161
        ):
162
            sparse_vector = self._encoder.encode_queries(query.query_str)
163
            topk = query.hybrid_top_k or query.similarity_top_k
164

165
            if query.alpha is not None:
166
                from dashtext import combine_dense_and_sparse
167

168
                query_embedding, sparse_vector = combine_dense_and_sparse(
169
                    query_embedding, sparse_vector, query.alpha
170
                )
171

172
        filter = _to_dashvector_filter(query.filters)
173
        rsp = self._collection.query(
174
            vector=query_embedding,
175
            sparse_vector=sparse_vector,
176
            topk=topk,
177
            filter=filter,
178
            include_vector=True,
179
        )
180
        if not rsp:
181
            raise Exception(f"Failed to query docs, error: {rsp}")
182

183
        top_k_ids = []
184
        top_k_nodes = []
185
        top_k_scores = []
186
        for doc in rsp:
187
            try:
188
                node = metadata_dict_to_node(doc.fields)
189
            except Exception:
190
                # NOTE: deprecated legacy logic for backward compatibility
191
                logger.debug("Failed to parse Node metadata, fallback to legacy logic.")
192
                metadata, node_info, relationships = legacy_metadata_dict_to_node(
193
                    doc.fields
194
                )
195

196
                text = doc.fields[DEFAULT_TEXT_KEY]
197
                node = TextNode(
198
                    id_=doc.id,
199
                    text=text,
200
                    metadata=metadata,
201
                    start_char_idx=node_info.get("start", None),
202
                    end_char_idx=node_info.get("end", None),
203
                    relationships=relationships,
204
                )
205
            top_k_ids.append(doc.id)
206
            top_k_nodes.append(node)
207
            top_k_scores.append(doc.score)
208

209
        return VectorStoreQueryResult(
210
            nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
211
        )
212

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.