llama-index

Форк
0
847 строк · 30.3 Кб
1
"""
2
Qdrant vector store index.
3

4
An index that is built on top of an existing Qdrant collection.
5

6
"""
7

8
import logging
9
from typing import Any, List, Optional, Tuple, cast
10

11
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
12
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
13
from llama_index.legacy.utils import iter_batch
14
from llama_index.legacy.vector_stores.qdrant_utils import (
15
    HybridFusionCallable,
16
    SparseEncoderCallable,
17
    default_sparse_encoder,
18
    relative_score_fusion,
19
)
20
from llama_index.legacy.vector_stores.types import (
21
    BasePydanticVectorStore,
22
    VectorStoreQuery,
23
    VectorStoreQueryMode,
24
    VectorStoreQueryResult,
25
)
26
from llama_index.legacy.vector_stores.utils import (
27
    legacy_metadata_dict_to_node,
28
    metadata_dict_to_node,
29
    node_to_metadata_dict,
30
)
31

32
logger = logging.getLogger(__name__)
33
import_err_msg = (
34
    "`qdrant-client` package not found, please run `pip install qdrant-client`"
35
)
36

37

38
class QdrantVectorStore(BasePydanticVectorStore):
39
    """
40
    Qdrant Vector Store.
41

42
    In this vector store, embeddings and docs are stored within a
43
    Qdrant collection.
44

45
    During query time, the index uses Qdrant to query for the top
46
    k most similar nodes.
47

48
    Args:
49
        collection_name: (str): name of the Qdrant collection
50
        client (Optional[Any]): QdrantClient instance from `qdrant-client` package
51
        aclient (Optional[Any]): AsyncQdrantClient instance from `qdrant-client` package
52
        url (Optional[str]): url of the Qdrant instance
53
        api_key (Optional[str]): API key for authenticating with Qdrant
54
        batch_size (int): number of points to upload in a single request to Qdrant. Defaults to 64
55
        parallel (int): number of parallel processes to use during upload. Defaults to 1
56
        max_retries (int): maximum number of retries in case of a failure. Defaults to 3
57
        client_kwargs (Optional[dict]): additional kwargs for QdrantClient and AsyncQdrantClient
58
        enable_hybrid (bool): whether to enable hybrid search using dense and sparse vectors
59
        sparse_doc_fn (Optional[SparseEncoderCallable]): function to encode sparse vectors
60
        sparse_query_fn (Optional[SparseEncoderCallable]): function to encode sparse queries
61
        hybrid_fusion_fn (Optional[HybridFusionCallable]): function to fuse hybrid search results
62
    """
63

64
    stores_text: bool = True
65
    flat_metadata: bool = False
66

67
    collection_name: str
68
    path: Optional[str]
69
    url: Optional[str]
70
    api_key: Optional[str]
71
    batch_size: int
72
    parallel: int
73
    max_retries: int
74
    client_kwargs: dict = Field(default_factory=dict)
75
    enable_hybrid: bool
76

77
    _client: Any = PrivateAttr()
78
    _aclient: Any = PrivateAttr()
79
    _collection_initialized: bool = PrivateAttr()
80
    _sparse_doc_fn: Optional[SparseEncoderCallable] = PrivateAttr()
81
    _sparse_query_fn: Optional[SparseEncoderCallable] = PrivateAttr()
82
    _hybrid_fusion_fn: Optional[HybridFusionCallable] = PrivateAttr()
83

84
    def __init__(
85
        self,
86
        collection_name: str,
87
        client: Optional[Any] = None,
88
        aclient: Optional[Any] = None,
89
        url: Optional[str] = None,
90
        api_key: Optional[str] = None,
91
        batch_size: int = 64,
92
        parallel: int = 1,
93
        max_retries: int = 3,
94
        client_kwargs: Optional[dict] = None,
95
        enable_hybrid: bool = False,
96
        sparse_doc_fn: Optional[SparseEncoderCallable] = None,
97
        sparse_query_fn: Optional[SparseEncoderCallable] = None,
98
        hybrid_fusion_fn: Optional[HybridFusionCallable] = None,
99
        **kwargs: Any,
100
    ) -> None:
101
        """Init params."""
102
        try:
103
            import qdrant_client
104
        except ImportError:
105
            raise ImportError(import_err_msg)
106

107
        if (
108
            client is None
109
            and aclient is None
110
            and (url is None or api_key is None or collection_name is None)
111
        ):
112
            raise ValueError(
113
                "Must provide either a QdrantClient instance or a url and api_key."
114
            )
115

116
        if client is None and aclient is None:
117
            client_kwargs = client_kwargs or {}
118
            self._client = qdrant_client.QdrantClient(
119
                url=url, api_key=api_key, **client_kwargs
120
            )
121
            self._aclient = qdrant_client.AsyncQdrantClient(
122
                url=url, api_key=api_key, **client_kwargs
123
            )
124
        else:
125
            if client is not None and aclient is not None:
126
                logger.warning(
127
                    "Both client and aclient are provided. If using `:memory:` "
128
                    "mode, the data between clients is not synced."
129
                )
130

131
            self._client = client
132
            self._aclient = aclient
133

134
        if self._client is not None:
135
            self._collection_initialized = self._collection_exists(collection_name)
136
        else:
137
            #  need to do lazy init for async clients
138
            self._collection_initialized = False
139

140
        # setup hybrid search if enabled
141
        if enable_hybrid:
142
            self._sparse_doc_fn = sparse_doc_fn or default_sparse_encoder(
143
                "naver/efficient-splade-VI-BT-large-doc"
144
            )
145
            self._sparse_query_fn = sparse_query_fn or default_sparse_encoder(
146
                "naver/efficient-splade-VI-BT-large-query"
147
            )
148
            self._hybrid_fusion_fn = hybrid_fusion_fn or cast(
149
                HybridFusionCallable, relative_score_fusion
150
            )
151

152
        super().__init__(
153
            collection_name=collection_name,
154
            url=url,
155
            api_key=api_key,
156
            batch_size=batch_size,
157
            parallel=parallel,
158
            max_retries=max_retries,
159
            client_kwargs=client_kwargs or {},
160
            enable_hybrid=enable_hybrid,
161
        )
162

163
    @classmethod
164
    def class_name(cls) -> str:
165
        return "QdrantVectorStore"
166

167
    def _build_points(self, nodes: List[BaseNode]) -> Tuple[List[Any], List[str]]:
168
        from qdrant_client.http import models as rest
169

170
        ids = []
171
        points = []
172
        for node_batch in iter_batch(nodes, self.batch_size):
173
            node_ids = []
174
            vectors: List[Any] = []
175
            sparse_vectors: List[List[float]] = []
176
            sparse_indices: List[List[int]] = []
177
            payloads = []
178

179
            if self.enable_hybrid and self._sparse_doc_fn is not None:
180
                sparse_indices, sparse_vectors = self._sparse_doc_fn(
181
                    [
182
                        node.get_content(metadata_mode=MetadataMode.EMBED)
183
                        for node in node_batch
184
                    ],
185
                )
186

187
            for i, node in enumerate(node_batch):
188
                assert isinstance(node, BaseNode)
189
                node_ids.append(node.node_id)
190

191
                if self.enable_hybrid:
192
                    if (
193
                        len(sparse_vectors) > 0
194
                        and len(sparse_indices) > 0
195
                        and len(sparse_vectors) == len(sparse_indices)
196
                    ):
197
                        vectors.append(
198
                            {
199
                                "text-sparse": rest.SparseVector(
200
                                    indices=sparse_indices[i],
201
                                    values=sparse_vectors[i],
202
                                ),
203
                                "text-dense": node.get_embedding(),
204
                            }
205
                        )
206
                    else:
207
                        vectors.append(
208
                            {
209
                                "text-dense": node.get_embedding(),
210
                            }
211
                        )
212
                else:
213
                    vectors.append(node.get_embedding())
214

215
                metadata = node_to_metadata_dict(
216
                    node, remove_text=False, flat_metadata=self.flat_metadata
217
                )
218

219
                payloads.append(metadata)
220

221
            points.extend(
222
                [
223
                    rest.PointStruct(id=node_id, payload=payload, vector=vector)
224
                    for node_id, payload, vector in zip(node_ids, payloads, vectors)
225
                ]
226
            )
227

228
            ids.extend(node_ids)
229

230
        return points, ids
231

232
    def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
233
        """
234
        Add nodes to index.
235

236
        Args:
237
            nodes: List[BaseNode]: list of nodes with embeddings
238

239
        """
240
        if len(nodes) > 0 and not self._collection_initialized:
241
            self._create_collection(
242
                collection_name=self.collection_name,
243
                vector_size=len(nodes[0].get_embedding()),
244
            )
245

246
        points, ids = self._build_points(nodes)
247

248
        self._client.upload_points(
249
            collection_name=self.collection_name,
250
            points=points,
251
            batch_size=self.batch_size,
252
            parallel=self.parallel,
253
            max_retries=self.max_retries,
254
            wait=True,
255
        )
256

257
        return ids
258

259
    async def async_add(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
260
        """
261
        Asynchronous method to add nodes to Qdrant index.
262

263
        Args:
264
            nodes: List[BaseNode]: List of nodes with embeddings.
265

266
        Returns:
267
            List of node IDs that were added to the index.
268

269
        Raises:
270
            ValueError: If trying to using async methods without aclient
271
        """
272
        collection_initialized = await self._acollection_exists(self.collection_name)
273

274
        if len(nodes) > 0 and not collection_initialized:
275
            await self._acreate_collection(
276
                collection_name=self.collection_name,
277
                vector_size=len(nodes[0].get_embedding()),
278
            )
279

280
        points, ids = self._build_points(nodes)
281

282
        await self._aclient.upload_points(
283
            collection_name=self.collection_name,
284
            points=points,
285
            batch_size=self.batch_size,
286
            parallel=self.parallel,
287
            max_retries=self.max_retries,
288
            wait=True,
289
        )
290

291
        return ids
292

293
    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
294
        """
295
        Delete nodes using with ref_doc_id.
296

297
        Args:
298
            ref_doc_id (str): The doc_id of the document to delete.
299

300
        """
301
        from qdrant_client.http import models as rest
302

303
        self._client.delete(
304
            collection_name=self.collection_name,
305
            points_selector=rest.Filter(
306
                must=[
307
                    rest.FieldCondition(
308
                        key="doc_id", match=rest.MatchValue(value=ref_doc_id)
309
                    )
310
                ]
311
            ),
312
        )
313

314
    async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
315
        """
316
        Asynchronous method to delete nodes using with ref_doc_id.
317

318
        Args:
319
            ref_doc_id (str): The doc_id of the document to delete.
320

321
        """
322
        from qdrant_client.http import models as rest
323

324
        await self._aclient.delete(
325
            collection_name=self.collection_name,
326
            points_selector=rest.Filter(
327
                must=[
328
                    rest.FieldCondition(
329
                        key="doc_id", match=rest.MatchValue(value=ref_doc_id)
330
                    )
331
                ]
332
            ),
333
        )
334

335
    @property
336
    def client(self) -> Any:
337
        """Return the Qdrant client."""
338
        return self._client
339

340
    def _create_collection(self, collection_name: str, vector_size: int) -> None:
341
        """Create a Qdrant collection."""
342
        from qdrant_client.http import models as rest
343
        from qdrant_client.http.exceptions import UnexpectedResponse
344

345
        try:
346
            if self.enable_hybrid:
347
                self._client.create_collection(
348
                    collection_name=collection_name,
349
                    vectors_config={
350
                        "text-dense": rest.VectorParams(
351
                            size=vector_size,
352
                            distance=rest.Distance.COSINE,
353
                        )
354
                    },
355
                    sparse_vectors_config={
356
                        "text-sparse": rest.SparseVectorParams(
357
                            index=rest.SparseIndexParams()
358
                        )
359
                    },
360
                )
361
            else:
362
                self._client.create_collection(
363
                    collection_name=collection_name,
364
                    vectors_config=rest.VectorParams(
365
                        size=vector_size,
366
                        distance=rest.Distance.COSINE,
367
                    ),
368
                )
369
        except (ValueError, UnexpectedResponse) as exc:
370
            if "already exists" not in str(exc):
371
                raise exc  # noqa: TRY201
372
            logger.warning(
373
                "Collection %s already exists, skipping collection creation.",
374
                collection_name,
375
            )
376
        self._collection_initialized = True
377

378
    async def _acreate_collection(self, collection_name: str, vector_size: int) -> None:
379
        """Asynchronous method to create a Qdrant collection."""
380
        from qdrant_client.http import models as rest
381
        from qdrant_client.http.exceptions import UnexpectedResponse
382

383
        try:
384
            if self.enable_hybrid:
385
                await self._aclient.create_collection(
386
                    collection_name=collection_name,
387
                    vectors_config={
388
                        "text-dense": rest.VectorParams(
389
                            size=vector_size,
390
                            distance=rest.Distance.COSINE,
391
                        )
392
                    },
393
                    sparse_vectors_config={
394
                        "text-sparse": rest.SparseVectorParams(
395
                            index=rest.SparseIndexParams()
396
                        )
397
                    },
398
                )
399
            else:
400
                await self._aclient.create_collection(
401
                    collection_name=collection_name,
402
                    vectors_config=rest.VectorParams(
403
                        size=vector_size,
404
                        distance=rest.Distance.COSINE,
405
                    ),
406
                )
407
        except (ValueError, UnexpectedResponse) as exc:
408
            if "already exists" not in str(exc):
409
                raise exc  # noqa: TRY201
410
            logger.warning(
411
                "Collection %s already exists, skipping collection creation.",
412
                collection_name,
413
            )
414
        self._collection_initialized = True
415

416
    def _collection_exists(self, collection_name: str) -> bool:
417
        """Check if a collection exists."""
418
        from grpc import RpcError
419
        from qdrant_client.http.exceptions import UnexpectedResponse
420

421
        try:
422
            self._client.get_collection(collection_name)
423
        except (RpcError, UnexpectedResponse, ValueError):
424
            return False
425
        return True
426

427
    async def _acollection_exists(self, collection_name: str) -> bool:
428
        """Asynchronous method to check if a collection exists."""
429
        from grpc import RpcError
430
        from qdrant_client.http.exceptions import UnexpectedResponse
431

432
        try:
433
            await self._aclient.get_collection(collection_name)
434
        except (RpcError, UnexpectedResponse, ValueError):
435
            return False
436
        return True
437

438
    def query(
439
        self,
440
        query: VectorStoreQuery,
441
        **kwargs: Any,
442
    ) -> VectorStoreQueryResult:
443
        """
444
        Query index for top k most similar nodes.
445

446
        Args:
447
            query (VectorStoreQuery): query
448
        """
449
        from qdrant_client import models as rest
450
        from qdrant_client.http.models import Filter
451

452
        query_embedding = cast(List[float], query.query_embedding)
453
        #  NOTE: users can pass in qdrant_filters (nested/complicated filters) to override the default MetadataFilters
454
        qdrant_filters = kwargs.get("qdrant_filters")
455
        if qdrant_filters is not None:
456
            query_filter = qdrant_filters
457
        else:
458
            query_filter = cast(Filter, self._build_query_filter(query))
459

460
        if query.mode == VectorStoreQueryMode.HYBRID and not self.enable_hybrid:
461
            raise ValueError(
462
                "Hybrid search is not enabled. Please build the query with "
463
                "`enable_hybrid=True` in the constructor."
464
            )
465
        elif (
466
            query.mode == VectorStoreQueryMode.HYBRID
467
            and self.enable_hybrid
468
            and self._sparse_query_fn is not None
469
            and query.query_str is not None
470
        ):
471
            sparse_indices, sparse_embedding = self._sparse_query_fn(
472
                [query.query_str],
473
            )
474
            sparse_top_k = query.sparse_top_k or query.similarity_top_k
475

476
            sparse_response = self._client.search_batch(
477
                collection_name=self.collection_name,
478
                requests=[
479
                    rest.SearchRequest(
480
                        vector=rest.NamedVector(
481
                            name="text-dense",
482
                            vector=query_embedding,
483
                        ),
484
                        limit=query.similarity_top_k,
485
                        filter=query_filter,
486
                        with_payload=True,
487
                    ),
488
                    rest.SearchRequest(
489
                        vector=rest.NamedSparseVector(
490
                            name="text-sparse",
491
                            vector=rest.SparseVector(
492
                                indices=sparse_indices[0],
493
                                values=sparse_embedding[0],
494
                            ),
495
                        ),
496
                        limit=sparse_top_k,
497
                        filter=query_filter,
498
                        with_payload=True,
499
                    ),
500
                ],
501
            )
502

503
            # sanity check
504
            assert len(sparse_response) == 2
505
            assert self._hybrid_fusion_fn is not None
506

507
            # flatten the response
508
            return self._hybrid_fusion_fn(
509
                self.parse_to_query_result(sparse_response[0]),
510
                self.parse_to_query_result(sparse_response[1]),
511
                # NOTE: only for hybrid search (0 for sparse search, 1 for dense search)
512
                alpha=query.alpha or 0.5,
513
                # NOTE: use hybrid_top_k if provided, otherwise use similarity_top_k
514
                top_k=query.hybrid_top_k or query.similarity_top_k,
515
            )
516
        elif (
517
            query.mode == VectorStoreQueryMode.SPARSE
518
            and self.enable_hybrid
519
            and self._sparse_query_fn is not None
520
            and query.query_str is not None
521
        ):
522
            sparse_indices, sparse_embedding = self._sparse_query_fn(
523
                [query.query_str],
524
            )
525
            sparse_top_k = query.sparse_top_k or query.similarity_top_k
526

527
            sparse_response = self._client.search_batch(
528
                collection_name=self.collection_name,
529
                requests=[
530
                    rest.SearchRequest(
531
                        vector=rest.NamedSparseVector(
532
                            name="text-sparse",
533
                            vector=rest.SparseVector(
534
                                indices=sparse_indices[0],
535
                                values=sparse_embedding[0],
536
                            ),
537
                        ),
538
                        limit=sparse_top_k,
539
                        filter=query_filter,
540
                        with_payload=True,
541
                    ),
542
                ],
543
            )
544
            return self.parse_to_query_result(sparse_response[0])
545

546
        elif self.enable_hybrid:
547
            # search for dense vectors only
548
            response = self._client.search_batch(
549
                collection_name=self.collection_name,
550
                requests=[
551
                    rest.SearchRequest(
552
                        vector=rest.NamedVector(
553
                            name="text-dense",
554
                            vector=query_embedding,
555
                        ),
556
                        limit=query.similarity_top_k,
557
                        filter=query_filter,
558
                        with_payload=True,
559
                    ),
560
                ],
561
            )
562

563
            return self.parse_to_query_result(response[0])
564
        else:
565
            response = self._client.search(
566
                collection_name=self.collection_name,
567
                query_vector=query_embedding,
568
                limit=query.similarity_top_k,
569
                query_filter=query_filter,
570
            )
571
            return self.parse_to_query_result(response)
572

573
    async def aquery(
574
        self, query: VectorStoreQuery, **kwargs: Any
575
    ) -> VectorStoreQueryResult:
576
        """
577
        Asynchronous method to query index for top k most similar nodes.
578

579
        Args:
580
            query (VectorStoreQuery): query
581
        """
582
        from qdrant_client import models as rest
583
        from qdrant_client.http.models import Filter
584

585
        query_embedding = cast(List[float], query.query_embedding)
586

587
        #  NOTE: users can pass in qdrant_filters (nested/complicated filters) to override the default MetadataFilters
588
        qdrant_filters = kwargs.get("qdrant_filters")
589
        if qdrant_filters is not None:
590
            query_filter = qdrant_filters
591
        else:
592
            # build metadata filters
593
            query_filter = cast(Filter, self._build_query_filter(query))
594

595
        if query.mode == VectorStoreQueryMode.HYBRID and not self.enable_hybrid:
596
            raise ValueError(
597
                "Hybrid search is not enabled. Please build the query with "
598
                "`enable_hybrid=True` in the constructor."
599
            )
600
        elif (
601
            query.mode == VectorStoreQueryMode.HYBRID
602
            and self.enable_hybrid
603
            and self._sparse_query_fn is not None
604
            and query.query_str is not None
605
        ):
606
            sparse_indices, sparse_embedding = self._sparse_query_fn(
607
                [query.query_str],
608
            )
609
            sparse_top_k = query.sparse_top_k or query.similarity_top_k
610

611
            sparse_response = await self._aclient.search_batch(
612
                collection_name=self.collection_name,
613
                requests=[
614
                    rest.SearchRequest(
615
                        vector=rest.NamedVector(
616
                            name="text-dense",
617
                            vector=query_embedding,
618
                        ),
619
                        limit=query.similarity_top_k,
620
                        filter=query_filter,
621
                        with_payload=True,
622
                    ),
623
                    rest.SearchRequest(
624
                        vector=rest.NamedSparseVector(
625
                            name="text-sparse",
626
                            vector=rest.SparseVector(
627
                                indices=sparse_indices[0],
628
                                values=sparse_embedding[0],
629
                            ),
630
                        ),
631
                        limit=sparse_top_k,
632
                        filter=query_filter,
633
                        with_payload=True,
634
                    ),
635
                ],
636
            )
637

638
            # sanity check
639
            assert len(sparse_response) == 2
640
            assert self._hybrid_fusion_fn is not None
641

642
            # flatten the response
643
            return self._hybrid_fusion_fn(
644
                self.parse_to_query_result(sparse_response[0]),
645
                self.parse_to_query_result(sparse_response[1]),
646
                alpha=query.alpha or 0.5,
647
                # NOTE: use hybrid_top_k if provided, otherwise use similarity_top_k
648
                top_k=query.hybrid_top_k or query.similarity_top_k,
649
            )
650
        elif (
651
            query.mode == VectorStoreQueryMode.SPARSE
652
            and self.enable_hybrid
653
            and self._sparse_query_fn is not None
654
            and query.query_str is not None
655
        ):
656
            sparse_indices, sparse_embedding = self._sparse_query_fn(
657
                [query.query_str],
658
            )
659
            sparse_top_k = query.sparse_top_k or query.similarity_top_k
660

661
            sparse_response = await self._aclient.search_batch(
662
                collection_name=self.collection_name,
663
                requests=[
664
                    rest.SearchRequest(
665
                        vector=rest.NamedSparseVector(
666
                            name="text-sparse",
667
                            vector=rest.SparseVector(
668
                                indices=sparse_indices[0],
669
                                values=sparse_embedding[0],
670
                            ),
671
                        ),
672
                        limit=sparse_top_k,
673
                        filter=query_filter,
674
                        with_payload=True,
675
                    ),
676
                ],
677
            )
678
            return self.parse_to_query_result(sparse_response[0])
679
        elif self.enable_hybrid:
680
            # search for dense vectors only
681
            response = await self._aclient.search_batch(
682
                collection_name=self.collection_name,
683
                requests=[
684
                    rest.SearchRequest(
685
                        vector=rest.NamedVector(
686
                            name="text-dense",
687
                            vector=query_embedding,
688
                        ),
689
                        limit=query.similarity_top_k,
690
                        filter=query_filter,
691
                        with_payload=True,
692
                    ),
693
                ],
694
            )
695

696
            return self.parse_to_query_result(response[0])
697
        else:
698
            response = await self._aclient.search(
699
                collection_name=self.collection_name,
700
                query_vector=query_embedding,
701
                limit=query.similarity_top_k,
702
                query_filter=query_filter,
703
            )
704

705
            return self.parse_to_query_result(response)
706

707
    def parse_to_query_result(self, response: List[Any]) -> VectorStoreQueryResult:
708
        """
709
        Convert vector store response to VectorStoreQueryResult.
710

711
        Args:
712
            response: List[Any]: List of results returned from the vector store.
713
        """
714
        from qdrant_client.http.models import Payload
715

716
        nodes = []
717
        similarities = []
718
        ids = []
719

720
        for point in response:
721
            payload = cast(Payload, point.payload)
722
            try:
723
                node = metadata_dict_to_node(payload)
724
            except Exception:
725
                # NOTE: deprecated legacy logic for backward compatibility
726
                logger.debug("Failed to parse Node metadata, fallback to legacy logic.")
727
                metadata, node_info, relationships = legacy_metadata_dict_to_node(
728
                    payload
729
                )
730

731
                node = TextNode(
732
                    id_=str(point.id),
733
                    text=payload.get("text"),
734
                    metadata=metadata,
735
                    start_char_idx=node_info.get("start", None),
736
                    end_char_idx=node_info.get("end", None),
737
                    relationships=relationships,
738
                )
739
            nodes.append(node)
740
            similarities.append(point.score)
741
            ids.append(str(point.id))
742

743
        return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
744

745
    def _build_query_filter(self, query: VectorStoreQuery) -> Optional[Any]:
746
        if not query.doc_ids and not query.query_str:
747
            return None
748

749
        from qdrant_client.http.models import (
750
            FieldCondition,
751
            Filter,
752
            MatchAny,
753
            MatchExcept,
754
            MatchText,
755
            MatchValue,
756
            Range,
757
        )
758

759
        must_conditions = []
760

761
        if query.doc_ids:
762
            must_conditions.append(
763
                FieldCondition(
764
                    key="doc_id",
765
                    match=MatchAny(any=query.doc_ids),
766
                )
767
            )
768

769
        if query.node_ids:
770
            must_conditions.append(
771
                FieldCondition(
772
                    key="id",
773
                    match=MatchAny(any=query.node_ids),
774
                )
775
            )
776

777
        # Qdrant does not use the query.query_str property for the filtering. Full-text
778
        # filtering cannot handle longer queries and can effectively filter our all the
779
        # nodes. See: https://github.com/jerryjliu/llama_index/pull/1181
780

781
        if query.filters is None:
782
            return Filter(must=must_conditions)
783

784
        for subfilter in query.filters.filters:
785
            # only for exact match
786
            if not subfilter.operator or subfilter.operator == "==":
787
                if isinstance(subfilter.value, float):
788
                    must_conditions.append(
789
                        FieldCondition(
790
                            key=subfilter.key,
791
                            range=Range(
792
                                gte=subfilter.value,
793
                                lte=subfilter.value,
794
                            ),
795
                        )
796
                    )
797
                else:
798
                    must_conditions.append(
799
                        FieldCondition(
800
                            key=subfilter.key,
801
                            match=MatchValue(value=subfilter.value),
802
                        )
803
                    )
804
            elif subfilter.operator == "<":
805
                must_conditions.append(
806
                    FieldCondition(
807
                        key=subfilter.key,
808
                        range=Range(lt=subfilter.value),
809
                    )
810
                )
811
            elif subfilter.operator == ">":
812
                must_conditions.append(
813
                    FieldCondition(
814
                        key=subfilter.key,
815
                        range=Range(gt=subfilter.value),
816
                    )
817
                )
818
            elif subfilter.operator == ">=":
819
                must_conditions.append(
820
                    FieldCondition(
821
                        key=subfilter.key,
822
                        range=Range(gte=subfilter.value),
823
                    )
824
                )
825
            elif subfilter.operator == "<=":
826
                must_conditions.append(
827
                    FieldCondition(
828
                        key=subfilter.key,
829
                        range=Range(lte=subfilter.value),
830
                    )
831
                )
832
            elif subfilter.operator == "text_match":
833
                must_conditions.append(
834
                    FieldCondition(
835
                        key=subfilter.key,
836
                        match=MatchText(text=subfilter.value),
837
                    )
838
                )
839
            elif subfilter.operator == "!=":
840
                must_conditions.append(
841
                    FieldCondition(
842
                        key=subfilter.key,
843
                        match=MatchExcept(**{"except": [subfilter.value]}),
844
                    )
845
                )
846

847
        return Filter(must=must_conditions)
848

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.