llama-index
142 строки · 5.0 Кб
1"""Relevancy evaluation."""
2
3from __future__ import annotations
4
5import asyncio
6from typing import Any, Sequence
7
8from llama_index.legacy import ServiceContext
9from llama_index.legacy.evaluation.base import BaseEvaluator, EvaluationResult
10from llama_index.legacy.indices import SummaryIndex
11from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate
12from llama_index.legacy.prompts.mixin import PromptDictType
13from llama_index.legacy.schema import Document
14
15DEFAULT_EVAL_TEMPLATE = PromptTemplate(
16"Your task is to evaluate if the response for the query \
17is in line with the context information provided.\n"
18"You have two options to answer. Either YES/ NO.\n"
19"Answer - YES, if the response for the query \
20is in line with context information otherwise NO.\n"
21"Query and Response: \n {query_str}\n"
22"Context: \n {context_str}\n"
23"Answer: "
24)
25
26DEFAULT_REFINE_TEMPLATE = PromptTemplate(
27"We want to understand if the following query and response is"
28"in line with the context information: \n {query_str}\n"
29"We have provided an existing YES/NO answer: \n {existing_answer}\n"
30"We have the opportunity to refine the existing answer "
31"(only if needed) with some more context below.\n"
32"------------\n"
33"{context_msg}\n"
34"------------\n"
35"If the existing answer was already YES, still answer YES. "
36"If the information is present in the new context, answer YES. "
37"Otherwise answer NO.\n"
38)
39
40
41class RelevancyEvaluator(BaseEvaluator):
42"""Relenvancy evaluator.
43
44Evaluates the relevancy of retrieved contexts and response to a query.
45This evaluator considers the query string, retrieved contexts, and response string.
46
47Args:
48service_context(Optional[ServiceContext]):
49The service context to use for evaluation.
50raise_error(Optional[bool]):
51Whether to raise an error if the response is invalid.
52Defaults to False.
53eval_template(Optional[Union[str, BasePromptTemplate]]):
54The template to use for evaluation.
55refine_template(Optional[Union[str, BasePromptTemplate]]):
56The template to use for refinement.
57"""
58
59def __init__(
60self,
61service_context: ServiceContext | None = None,
62raise_error: bool = False,
63eval_template: str | BasePromptTemplate | None = None,
64refine_template: str | BasePromptTemplate | None = None,
65) -> None:
66"""Init params."""
67self._service_context = service_context or ServiceContext.from_defaults()
68self._raise_error = raise_error
69
70self._eval_template: BasePromptTemplate
71if isinstance(eval_template, str):
72self._eval_template = PromptTemplate(eval_template)
73else:
74self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
75
76self._refine_template: BasePromptTemplate
77if isinstance(refine_template, str):
78self._refine_template = PromptTemplate(refine_template)
79else:
80self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
81
82def _get_prompts(self) -> PromptDictType:
83"""Get prompts."""
84return {
85"eval_template": self._eval_template,
86"refine_template": self._refine_template,
87}
88
89def _update_prompts(self, prompts: PromptDictType) -> None:
90"""Update prompts."""
91if "eval_template" in prompts:
92self._eval_template = prompts["eval_template"]
93if "refine_template" in prompts:
94self._refine_template = prompts["refine_template"]
95
96async def aevaluate(
97self,
98query: str | None = None,
99response: str | None = None,
100contexts: Sequence[str] | None = None,
101sleep_time_in_seconds: int = 0,
102**kwargs: Any,
103) -> EvaluationResult:
104"""Evaluate whether the contexts and response are relevant to the query."""
105del kwargs # Unused
106
107if query is None or contexts is None or response is None:
108raise ValueError("query, contexts, and response must be provided")
109
110docs = [Document(text=context) for context in contexts]
111index = SummaryIndex.from_documents(docs, service_context=self._service_context)
112
113query_response = f"Question: {query}\nResponse: {response}"
114
115await asyncio.sleep(sleep_time_in_seconds)
116
117query_engine = index.as_query_engine(
118text_qa_template=self._eval_template,
119refine_template=self._refine_template,
120)
121response_obj = await query_engine.aquery(query_response)
122
123raw_response_txt = str(response_obj)
124
125if "yes" in raw_response_txt.lower():
126passing = True
127else:
128if self._raise_error:
129raise ValueError("The response is invalid")
130passing = False
131
132return EvaluationResult(
133query=query,
134response=response,
135passing=passing,
136score=1.0 if passing else 0.0,
137feedback=raw_response_txt,
138contexts=contexts,
139)
140
141
142QueryResponseEvaluator = RelevancyEvaluator
143