llama-index

Форк
0
247 строк · 9.6 Кб
1
from __future__ import annotations
2

3
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
4
from uuid import uuid4
5

6
from llama_index.legacy.llm_predictor.vellum.types import (
7
    VellumCompiledPrompt,
8
    VellumRegisteredPrompt,
9
)
10
from llama_index.legacy.llm_predictor.vellum.utils import convert_to_kebab_case
11
from llama_index.legacy.prompts import BasePromptTemplate
12
from llama_index.legacy.prompts.base import PromptTemplate
13

14
if TYPE_CHECKING:
15
    import vellum
16

17

18
class VellumPromptRegistry:
19
    """Registers and retrieves prompts with Vellum.
20

21
    LlamaIndex Prompts can be registered within Vellum, at which point Vellum becomes
22
    the source of truth for the prompt. From there, Vellum can be used for prompt/model
23
    experimentation, request monitoring, and more.
24
    """
25

26
    def __init__(self, vellum_api_key: str) -> None:
27
        import_err_msg = (
28
            "`vellum` package not found, please run `pip install vellum-ai`"
29
        )
30
        try:
31
            from vellum.client import Vellum
32
        except ImportError:
33
            raise ImportError(import_err_msg)
34

35
        self._vellum_client = Vellum(api_key=vellum_api_key)
36

37
    def from_prompt(self, initial_prompt: BasePromptTemplate) -> VellumRegisteredPrompt:
38
        """Accepts a LlamaIndex prompt and retrieves a corresponding registered prompt
39
        from Vellum.
40

41
        If the LlamaIndex prompt hasn't yet been registered, it'll be registered
42
        automatically, after which point Vellum becomes the source-of-truth for the
43
        prompt's definition.
44

45
        In this way, the LlamaIndex prompt is treated as the initial value for the newly
46
        registered prompt in Vellum.
47

48
        You can reference a previously registered prompt by providing either
49
        `vellum_deployment_id` or `vellum_deployment_name` as key/value pairs within
50
        `BasePromptTemplate.metadata`.
51
        """
52
        from vellum.core import ApiError
53

54
        deployment_id = initial_prompt.metadata.get("vellum_deployment_id")
55
        deployment_name = initial_prompt.metadata.get(
56
            "vellum_deployment_name"
57
        ) or self._generate_default_name(initial_prompt)
58

59
        registered_prompt: VellumRegisteredPrompt
60
        try:
61
            deployment = self._vellum_client.deployments.retrieve(
62
                deployment_id or deployment_name
63
            )
64
        except ApiError as e:
65
            if e.status_code == 404:
66
                registered_prompt = self._register_prompt(initial_prompt)
67
            else:
68
                raise
69
        else:
70
            registered_prompt = self._get_registered_prompt(deployment)
71

72
        return registered_prompt
73

74
    def get_compiled_prompt(
75
        self, registered_prompt: VellumRegisteredPrompt, input_values: Dict[str, Any]
76
    ) -> VellumCompiledPrompt:
77
        """Retrieves the fully-compiled prompt from Vellum, after all variable
78
        substitutions, templating, etc.
79
        """
80
        result = self._vellum_client.model_versions.model_version_compile_prompt(
81
            registered_prompt.model_version_id, input_values=input_values
82
        )
83
        return VellumCompiledPrompt(
84
            text=result.prompt.text, num_tokens=result.prompt.num_tokens
85
        )
86

87
    def _get_registered_prompt(
88
        self, deployment: vellum.DeploymentRead
89
    ) -> VellumRegisteredPrompt:
90
        """Retrieves a prompt from Vellum, keying off of the deployment's id/name."""
91
        # Assume that the deployment backing a registered prompt will always have a
92
        # single model version. Note that this may not be true in the future once
93
        # deployment-level A/B testing is supported and someone configures an A/B test.
94
        model_version_id = deployment.active_model_version_ids[0]
95
        model_version = self._vellum_client.model_versions.retrieve(model_version_id)
96

97
        sandbox_snapshot_info = model_version.build_config.sandbox_snapshot
98
        sandbox_snapshot_id = (
99
            sandbox_snapshot_info.id if sandbox_snapshot_info else None
100
        )
101
        prompt_id = sandbox_snapshot_info.prompt_id if sandbox_snapshot_info else None
102
        sandbox_id = sandbox_snapshot_info.sandbox_id if sandbox_snapshot_info else None
103

104
        return VellumRegisteredPrompt(
105
            deployment_id=deployment.id,
106
            deployment_name=deployment.name,
107
            model_version_id=model_version.id,
108
            sandbox_id=sandbox_id,
109
            sandbox_snapshot_id=sandbox_snapshot_id,
110
            prompt_id=prompt_id,
111
        )
112

113
    def _register_prompt(self, prompt: BasePromptTemplate) -> VellumRegisteredPrompt:
114
        """Registers a prompt with Vellum.
115

116
        By registering a prompt, Vellum will:
117
        1) Create a Sandbox for the prompt so that you can experiment with the
118
              prompt, LLM provider, model, and parameters via Vellum's UI.
119
        2) Deployment for the prompt so that you can monitor requests and
120
            update the prompt, LLM provider, model, and parameters via Vellum's UI
121
            without requiring code changes.
122
        """
123
        # Label represents a human-friendly name that'll be used for all created
124
        # entities within Vellum. If not provided, a default will be generated.
125
        label = prompt.metadata.get(
126
            "vellum_deployment_label"
127
        ) or self._generate_default_label(prompt)
128

129
        # Name represents a kebab-cased unique identifier that'll be used for all
130
        # created entities within Vellum. If not provided, a default will be generated.
131
        name = prompt.metadata.get(
132
            "vellum_deployment_name"
133
        ) or self._generate_default_name(prompt)
134

135
        # Note: For now, the initial provider, model, and parameters used to register
136
        # the prompt are hard-coded. You can then update any of these from within
137
        # Vellum's UI. As a future improvement, we could allow these to be specified
138
        # upfront.
139
        provider, model, params = self._get_default_llm_meta()
140
        prompt_info = self._construct_prompt_info(prompt, for_chat_model=True)
141

142
        resp = self._vellum_client.registered_prompts.register_prompt(
143
            label=label,
144
            name=name,
145
            prompt=prompt_info,
146
            provider=provider,
147
            model=model,
148
            parameters=params,
149
            meta={
150
                "source": "llamaindex",
151
                "prompt_type": prompt.metadata["prompt_type"],
152
            },
153
        )
154

155
        return VellumRegisteredPrompt(
156
            deployment_id=resp.deployment.id,
157
            deployment_name=resp.deployment.name,
158
            model_version_id=resp.model_version.id,
159
            sandbox_id=resp.sandbox.id,
160
            sandbox_snapshot_id=resp.sandbox_snapshot.id,
161
            prompt_id=resp.prompt.id,
162
        )
163

164
    def _generate_default_label(self, prompt: BasePromptTemplate) -> str:
165
        prompt_type = prompt.metadata["prompt_type"]
166
        return f"LlamaIndex Demo: {prompt_type}'"
167

168
    def _generate_default_name(self, prompt: BasePromptTemplate) -> str:
169
        default_label = self._generate_default_label(prompt)
170
        return convert_to_kebab_case(default_label)
171

172
    def _construct_prompt_info(
173
        self, prompt: BasePromptTemplate, for_chat_model: bool = True
174
    ) -> vellum.RegisterPromptPromptInfoRequest:
175
        """Converts a LlamaIndex prompt into Vellum's prompt representation."""
176
        import vellum
177

178
        assert isinstance(prompt, PromptTemplate)
179
        prompt_template = prompt.template
180
        for input_variable in prompt.template_vars:
181
            prompt_template = prompt_template.replace(
182
                input_variable, f"{{ {input_variable} }}"
183
            )
184

185
        block: vellum.PromptTemplateBlockRequest
186
        jinja_block = vellum.PromptTemplateBlockRequest(
187
            id=str(uuid4()),
188
            block_type=vellum.BlockTypeEnum.JINJA,
189
            properties=vellum.PromptTemplateBlockPropertiesRequest(
190
                template=self._prepare_prompt_jinja_template(
191
                    prompt.template,
192
                    prompt.template_vars,
193
                ),
194
            ),
195
        )
196
        if for_chat_model:
197
            block = vellum.PromptTemplateBlockRequest(
198
                id=str(uuid4()),
199
                block_type=vellum.BlockTypeEnum.CHAT_MESSAGE,
200
                properties=vellum.PromptTemplateBlockPropertiesRequest(
201
                    chat_role=vellum.ChatMessageRole.SYSTEM,
202
                    blocks=[jinja_block],
203
                ),
204
            )
205
        else:
206
            block = jinja_block
207

208
        return vellum.RegisterPromptPromptInfoRequest(
209
            prompt_syntax_version=2,
210
            prompt_block_data=vellum.PromptTemplateBlockDataRequest(
211
                version=1,
212
                blocks=[block],
213
            ),
214
            input_variables=[{"key": input_var} for input_var in prompt.template_vars],
215
        )
216

217
    def _prepare_prompt_jinja_template(
218
        self, original_template: str, input_variables: List[str]
219
    ) -> str:
220
        """Converts a prompt template into a Jinja template."""
221
        prompt_template = original_template
222
        for input_variable in input_variables:
223
            prompt_template = prompt_template.replace(
224
                ("{" + input_variable + "}"), ("{{ " + input_variable + " }}")
225
            )
226

227
        return prompt_template
228

229
    def _get_default_llm_meta(
230
        self,
231
    ) -> Tuple[vellum.ProviderEnum, str, vellum.RegisterPromptModelParametersRequest]:
232
        import vellum
233

234
        return (
235
            vellum.ProviderEnum.OPENAI,
236
            "gpt-3.5-turbo",
237
            vellum.RegisterPromptModelParametersRequest(
238
                temperature=0.0,
239
                max_tokens=256,
240
                stop=[],
241
                top_p=1.0,
242
                top_k=0.0,
243
                frequency_penalty=0.0,
244
                presence_penalty=0.0,
245
                logit_bias=None,
246
            ),
247
        )
248

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.