Langchain-Chatchat

Форк
0
122 строки · 4.6 Кб
1
import os
2
import sys
3

4
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
5
from typing import Any, List, Optional
6
from sentence_transformers import CrossEncoder
7
from typing import Optional, Sequence
8
from langchain_core.documents import Document
9
from langchain.callbacks.manager import Callbacks
10
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
11
from llama_index.bridge.pydantic import Field, PrivateAttr
12

13

14
class LangchainReranker(BaseDocumentCompressor):
15
    """Document compressor that uses `Cohere Rerank API`."""
16
    model_name_or_path: str = Field()
17
    _model: Any = PrivateAttr()
18
    top_n: int = Field()
19
    device: str = Field()
20
    max_length: int = Field()
21
    batch_size: int = Field()
22
    # show_progress_bar: bool = None
23
    num_workers: int = Field()
24

25
    # activation_fct = None
26
    # apply_softmax = False
27

28
    def __init__(self,
29
                 model_name_or_path: str,
30
                 top_n: int = 3,
31
                 device: str = "cuda",
32
                 max_length: int = 1024,
33
                 batch_size: int = 32,
34
                 # show_progress_bar: bool = None,
35
                 num_workers: int = 0,
36
                 # activation_fct = None,
37
                 # apply_softmax = False,
38
                 ):
39
        # self.top_n=top_n
40
        # self.model_name_or_path=model_name_or_path
41
        # self.device=device
42
        # self.max_length=max_length
43
        # self.batch_size=batch_size
44
        # self.show_progress_bar=show_progress_bar
45
        # self.num_workers=num_workers
46
        # self.activation_fct=activation_fct
47
        # self.apply_softmax=apply_softmax
48

49
        self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
50
        super().__init__(
51
            top_n=top_n,
52
            model_name_or_path=model_name_or_path,
53
            device=device,
54
            max_length=max_length,
55
            batch_size=batch_size,
56
            # show_progress_bar=show_progress_bar,
57
            num_workers=num_workers,
58
            # activation_fct=activation_fct,
59
            # apply_softmax=apply_softmax
60
        )
61

62
    def compress_documents(
63
            self,
64
            documents: Sequence[Document],
65
            query: str,
66
            callbacks: Optional[Callbacks] = None,
67
    ) -> Sequence[Document]:
68
        """
69
        Compress documents using Cohere's rerank API.
70

71
        Args:
72
            documents: A sequence of documents to compress.
73
            query: The query to use for compressing the documents.
74
            callbacks: Callbacks to run during the compression process.
75

76
        Returns:
77
            A sequence of compressed documents.
78
        """
79
        if len(documents) == 0:  # to avoid empty api call
80
            return []
81
        doc_list = list(documents)
82
        _docs = [d.page_content for d in doc_list]
83
        sentence_pairs = [[query, _doc] for _doc in _docs]
84
        results = self._model.predict(sentences=sentence_pairs,
85
                                      batch_size=self.batch_size,
86
                                      #  show_progress_bar=self.show_progress_bar,
87
                                      num_workers=self.num_workers,
88
                                      #  activation_fct=self.activation_fct,
89
                                      #  apply_softmax=self.apply_softmax,
90
                                      convert_to_tensor=True
91
                                      )
92
        top_k = self.top_n if self.top_n < len(results) else len(results)
93

94
        values, indices = results.topk(top_k)
95
        final_results = []
96
        for value, index in zip(values, indices):
97
            doc = doc_list[index]
98
            doc.metadata["relevance_score"] = value
99
            final_results.append(doc)
100
        return final_results
101

102

103
if __name__ == "__main__":
104
    from configs import (LLM_MODELS,
105
                         VECTOR_SEARCH_TOP_K,
106
                         SCORE_THRESHOLD,
107
                         TEMPERATURE,
108
                         USE_RERANKER,
109
                         RERANKER_MODEL,
110
                         RERANKER_MAX_LENGTH,
111
                         MODEL_PATH)
112
    from server.utils import embedding_device
113

114
    if USE_RERANKER:
115
        reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
116
        print("-----------------model path------------------")
117
        print(reranker_model_path)
118
        reranker_model = LangchainReranker(top_n=3,
119
                                           device=embedding_device(),
120
                                           max_length=RERANKER_MAX_LENGTH,
121
                                           model_name_or_path=reranker_model_path
122
                                           )
123

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.