llama-index

Форк
0
173 строки · 6.7 Кб
1
"""Relevancy evaluation."""
2

3
from __future__ import annotations
4

5
import asyncio
6
import re
7
from typing import Any, Callable, Optional, Sequence, Tuple
8

9
from llama_index.legacy import ServiceContext
10
from llama_index.legacy.evaluation.base import BaseEvaluator, EvaluationResult
11
from llama_index.legacy.indices import SummaryIndex
12
from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate
13
from llama_index.legacy.prompts.mixin import PromptDictType
14
from llama_index.legacy.schema import Document
15

16
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
17
    "Your task is to evaluate if the retrieved context from the document sources are relevant to the query.\n"
18
    "The evaluation should be performed in a step-by-step manner by answering the following questions:\n"
19
    "1. Does the retrieved context match the subject matter of the user's query?\n"
20
    "2. Can the retrieved context be used exclusively to provide a full answer to the user's query?\n"
21
    "Each question above is worth 2 points, where partial marks are allowed and encouraged. Provide detailed feedback on the response "
22
    "according to the criteria questions previously mentioned. "
23
    "After your feedback provide a final result by strictly following this format: "
24
    "'[RESULT] followed by the float number representing the total score assigned to the response'\n\n"
25
    "Query: \n {query_str}\n"
26
    "Context: \n {context_str}\n"
27
    "Feedback:"
28
)
29

30
_DEFAULT_SCORE_THRESHOLD = 4.0
31

32
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
33
    "We want to understand if the following query and response is"
34
    "in line with the context information: \n {query_str}\n"
35
    "We have provided an existing evaluation score: \n {existing_answer}\n"
36
    "We have the opportunity to refine the existing evaluation "
37
    "(only if needed) with some more context below.\n"
38
    "------------\n"
39
    "{context_msg}\n"
40
    "------------\n"
41
    f"If the existing evaluation was already {_DEFAULT_SCORE_THRESHOLD}, still answer {_DEFAULT_SCORE_THRESHOLD}. "
42
    f"If the information is present in the new context, answer {_DEFAULT_SCORE_THRESHOLD}. "
43
    "Otherwise answer {existing_answer}.\n"
44
)
45

46

47
def _default_parser_function(output_str: str) -> Tuple[Optional[float], Optional[str]]:
48
    # Pattern to match the feedback and response
49
    # This pattern looks for any text ending with '[RESULT]' followed by a number
50
    pattern = r"([\s\S]+)(?:\[RESULT\]\s*)([\d.]+)"
51

52
    # Using regex to find all matches
53
    result = re.search(pattern, output_str)
54

55
    # Check if any match is found
56
    if result:
57
        # Assuming there's only one match in the text, extract feedback and response
58
        feedback, score = result.groups()
59
        score = float(score) if score is not None else score
60
        return score, feedback.strip()
61
    else:
62
        return None, None
63

64

65
class ContextRelevancyEvaluator(BaseEvaluator):
66
    """Context relevancy evaluator.
67

68
    Evaluates the relevancy of retrieved contexts to a query.
69
    This evaluator considers the query string and retrieved contexts.
70

71
    Args:
72
        service_context(Optional[ServiceContext]):
73
            The service context to use for evaluation.
74
        raise_error(Optional[bool]):
75
            Whether to raise an error if the response is invalid.
76
            Defaults to False.
77
        eval_template(Optional[Union[str, BasePromptTemplate]]):
78
            The template to use for evaluation.
79
        refine_template(Optional[Union[str, BasePromptTemplate]]):
80
            The template to use for refinement.
81
    """
82

83
    def __init__(
84
        self,
85
        service_context: ServiceContext | None = None,
86
        raise_error: bool = False,
87
        eval_template: str | BasePromptTemplate | None = None,
88
        refine_template: str | BasePromptTemplate | None = None,
89
        score_threshold: float = _DEFAULT_SCORE_THRESHOLD,
90
        parser_function: Callable[
91
            [str], Tuple[Optional[float], Optional[str]]
92
        ] = _default_parser_function,
93
    ) -> None:
94
        """Init params."""
95
        self._service_context = service_context or ServiceContext.from_defaults()
96
        self._raise_error = raise_error
97

98
        self._eval_template: BasePromptTemplate
99
        if isinstance(eval_template, str):
100
            self._eval_template = PromptTemplate(eval_template)
101
        else:
102
            self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
103

104
        self._refine_template: BasePromptTemplate
105
        if isinstance(refine_template, str):
106
            self._refine_template = PromptTemplate(refine_template)
107
        else:
108
            self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
109

110
        self.parser_function = parser_function
111
        self.score_threshold = score_threshold
112

113
    def _get_prompts(self) -> PromptDictType:
114
        """Get prompts."""
115
        return {
116
            "eval_template": self._eval_template,
117
            "refine_template": self._refine_template,
118
        }
119

120
    def _update_prompts(self, prompts: PromptDictType) -> None:
121
        """Update prompts."""
122
        if "eval_template" in prompts:
123
            self._eval_template = prompts["eval_template"]
124
        if "refine_template" in prompts:
125
            self._refine_template = prompts["refine_template"]
126

127
    async def aevaluate(
128
        self,
129
        query: str | None = None,
130
        response: str | None = None,
131
        contexts: Sequence[str] | None = None,
132
        sleep_time_in_seconds: int = 0,
133
        **kwargs: Any,
134
    ) -> EvaluationResult:
135
        """Evaluate whether the contexts is relevant to the query."""
136
        del kwargs  # Unused
137
        del response  # Unused
138

139
        if query is None or contexts is None:
140
            raise ValueError("Both query and contexts must be provided")
141

142
        docs = [Document(text=context) for context in contexts]
143
        index = SummaryIndex.from_documents(docs, service_context=self._service_context)
144

145
        await asyncio.sleep(sleep_time_in_seconds)
146

147
        query_engine = index.as_query_engine(
148
            text_qa_template=self._eval_template,
149
            refine_template=self._refine_template,
150
        )
151
        response_obj = await query_engine.aquery(query)
152
        raw_response_txt = str(response_obj)
153

154
        score, reasoning = self.parser_function(raw_response_txt)
155

156
        invalid_result, invalid_reason = False, None
157
        if score is None and reasoning is None:
158
            if self._raise_error:
159
                raise ValueError("The response is invalid")
160
            invalid_result = True
161
            invalid_reason = "Unable to parse the output string."
162

163
        if score:
164
            score /= self.score_threshold
165

166
        return EvaluationResult(
167
            query=query,
168
            contexts=contexts,
169
            score=score,
170
            feedback=raw_response_txt,
171
            invalid_result=invalid_result,
172
            invalid_reason=invalid_reason,
173
        )
174

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.