llama-index

Форк
0
142 строки · 5.0 Кб
1
"""Relevancy evaluation."""
2

3
from __future__ import annotations
4

5
import asyncio
6
from typing import Any, Sequence
7

8
from llama_index.legacy import ServiceContext
9
from llama_index.legacy.evaluation.base import BaseEvaluator, EvaluationResult
10
from llama_index.legacy.indices import SummaryIndex
11
from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate
12
from llama_index.legacy.prompts.mixin import PromptDictType
13
from llama_index.legacy.schema import Document
14

15
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
16
    "Your task is to evaluate if the response for the query \
17
    is in line with the context information provided.\n"
18
    "You have two options to answer. Either YES/ NO.\n"
19
    "Answer - YES, if the response for the query \
20
    is in line with context information otherwise NO.\n"
21
    "Query and Response: \n {query_str}\n"
22
    "Context: \n {context_str}\n"
23
    "Answer: "
24
)
25

26
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
27
    "We want to understand if the following query and response is"
28
    "in line with the context information: \n {query_str}\n"
29
    "We have provided an existing YES/NO answer: \n {existing_answer}\n"
30
    "We have the opportunity to refine the existing answer "
31
    "(only if needed) with some more context below.\n"
32
    "------------\n"
33
    "{context_msg}\n"
34
    "------------\n"
35
    "If the existing answer was already YES, still answer YES. "
36
    "If the information is present in the new context, answer YES. "
37
    "Otherwise answer NO.\n"
38
)
39

40

41
class RelevancyEvaluator(BaseEvaluator):
42
    """Relenvancy evaluator.
43

44
    Evaluates the relevancy of retrieved contexts and response to a query.
45
    This evaluator considers the query string, retrieved contexts, and response string.
46

47
    Args:
48
        service_context(Optional[ServiceContext]):
49
            The service context to use for evaluation.
50
        raise_error(Optional[bool]):
51
            Whether to raise an error if the response is invalid.
52
            Defaults to False.
53
        eval_template(Optional[Union[str, BasePromptTemplate]]):
54
            The template to use for evaluation.
55
        refine_template(Optional[Union[str, BasePromptTemplate]]):
56
            The template to use for refinement.
57
    """
58

59
    def __init__(
60
        self,
61
        service_context: ServiceContext | None = None,
62
        raise_error: bool = False,
63
        eval_template: str | BasePromptTemplate | None = None,
64
        refine_template: str | BasePromptTemplate | None = None,
65
    ) -> None:
66
        """Init params."""
67
        self._service_context = service_context or ServiceContext.from_defaults()
68
        self._raise_error = raise_error
69

70
        self._eval_template: BasePromptTemplate
71
        if isinstance(eval_template, str):
72
            self._eval_template = PromptTemplate(eval_template)
73
        else:
74
            self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
75

76
        self._refine_template: BasePromptTemplate
77
        if isinstance(refine_template, str):
78
            self._refine_template = PromptTemplate(refine_template)
79
        else:
80
            self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
81

82
    def _get_prompts(self) -> PromptDictType:
83
        """Get prompts."""
84
        return {
85
            "eval_template": self._eval_template,
86
            "refine_template": self._refine_template,
87
        }
88

89
    def _update_prompts(self, prompts: PromptDictType) -> None:
90
        """Update prompts."""
91
        if "eval_template" in prompts:
92
            self._eval_template = prompts["eval_template"]
93
        if "refine_template" in prompts:
94
            self._refine_template = prompts["refine_template"]
95

96
    async def aevaluate(
97
        self,
98
        query: str | None = None,
99
        response: str | None = None,
100
        contexts: Sequence[str] | None = None,
101
        sleep_time_in_seconds: int = 0,
102
        **kwargs: Any,
103
    ) -> EvaluationResult:
104
        """Evaluate whether the contexts and response are relevant to the query."""
105
        del kwargs  # Unused
106

107
        if query is None or contexts is None or response is None:
108
            raise ValueError("query, contexts, and response must be provided")
109

110
        docs = [Document(text=context) for context in contexts]
111
        index = SummaryIndex.from_documents(docs, service_context=self._service_context)
112

113
        query_response = f"Question: {query}\nResponse: {response}"
114

115
        await asyncio.sleep(sleep_time_in_seconds)
116

117
        query_engine = index.as_query_engine(
118
            text_qa_template=self._eval_template,
119
            refine_template=self._refine_template,
120
        )
121
        response_obj = await query_engine.aquery(query_response)
122

123
        raw_response_txt = str(response_obj)
124

125
        if "yes" in raw_response_txt.lower():
126
            passing = True
127
        else:
128
            if self._raise_error:
129
                raise ValueError("The response is invalid")
130
            passing = False
131

132
        return EvaluationResult(
133
            query=query,
134
            response=response,
135
            passing=passing,
136
            score=1.0 if passing else 0.0,
137
            feedback=raw_response_txt,
138
            contexts=contexts,
139
        )
140

141

142
QueryResponseEvaluator = RelevancyEvaluator
143

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.