llama-index

Форк
0
209 строк · 7.2 Кб
1
from typing import Any, Callable, Dict, Optional, Sequence
2

3
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
4
from llama_index.legacy.callbacks import CallbackManager
5
from llama_index.legacy.core.llms.types import (
6
    ChatMessage,
7
    ChatResponse,
8
    ChatResponseAsyncGen,
9
    ChatResponseGen,
10
    CompletionResponse,
11
    CompletionResponseAsyncGen,
12
    CompletionResponseGen,
13
    LLMMetadata,
14
)
15
from llama_index.legacy.llms.base import (
16
    llm_chat_callback,
17
    llm_completion_callback,
18
)
19
from llama_index.legacy.llms.llm import LLM
20
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
21

22
EXAMPLE_URL = "https://clarifai.com/anthropic/completion/models/claude-v2"
23

24

25
class Clarifai(LLM):
26
    model_url: Optional[str] = Field(
27
        description=f"Full URL of the model. e.g. `{EXAMPLE_URL}`"
28
    )
29
    model_version_id: Optional[str] = Field(description="Model Version ID.")
30
    app_id: Optional[str] = Field(description="Clarifai application ID of the model.")
31
    user_id: Optional[str] = Field(description="Clarifai user ID of the model.")
32
    pat: Optional[str] = Field(
33
        description="Personal Access Tokens(PAT) to validate requests."
34
    )
35

36
    _model: Any = PrivateAttr()
37
    _is_chat_model: bool = PrivateAttr()
38

39
    def __init__(
40
        self,
41
        model_name: Optional[str] = None,
42
        model_url: Optional[str] = None,
43
        model_version_id: Optional[str] = "",
44
        app_id: Optional[str] = None,
45
        user_id: Optional[str] = None,
46
        pat: Optional[str] = None,
47
        temperature: float = 0.1,
48
        max_tokens: int = 512,
49
        additional_kwargs: Optional[Dict[str, Any]] = None,
50
        callback_manager: Optional[CallbackManager] = None,
51
        system_prompt: Optional[str] = None,
52
        messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
53
        completion_to_prompt: Optional[Callable[[str], str]] = None,
54
        pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
55
        output_parser: Optional[BaseOutputParser] = None,
56
    ):
57
        try:
58
            import os
59

60
            from clarifai.client.model import Model
61
        except ImportError:
62
            raise ImportError("ClarifaiLLM requires `pip install clarifai`.")
63

64
        if pat is None and os.environ.get("CLARIFAI_PAT") is not None:
65
            pat = os.environ.get("CLARIFAI_PAT")
66

67
        if not pat and os.environ.get("CLARIFAI_PAT") is None:
68
            raise ValueError(
69
                "Set `CLARIFAI_PAT` as env variable or pass `pat` as constructor argument"
70
            )
71

72
        if model_url is not None and model_name is not None:
73
            raise ValueError("You can only specify one of model_url or model_name.")
74
        if model_url is None and model_name is None:
75
            raise ValueError("You must specify one of model_url or model_name.")
76

77
        if model_name is not None:
78
            if app_id is None or user_id is None:
79
                raise ValueError(
80
                    f"Missing one app ID or user ID of the model: {app_id=}, {user_id=}"
81
                )
82
            else:
83
                self._model = Model(
84
                    user_id=user_id,
85
                    app_id=app_id,
86
                    model_id=model_name,
87
                    model_version={"id": model_version_id},
88
                    pat=pat,
89
                )
90

91
        if model_url is not None:
92
            self._model = Model(model_url, pat=pat)
93
            model_name = self._model.id
94

95
        self._is_chat_model = False
96
        if "chat" in self._model.app_id or "chat" in self._model.id:
97
            self._is_chat_model = True
98

99
        additional_kwargs = additional_kwargs or {}
100

101
        super().__init__(
102
            temperature=temperature,
103
            max_tokens=max_tokens,
104
            additional_kwargs=additional_kwargs,
105
            callback_manager=callback_manager,
106
            model_name=model_name,
107
            system_prompt=system_prompt,
108
            messages_to_prompt=messages_to_prompt,
109
            completion_to_prompt=completion_to_prompt,
110
            pydantic_program_mode=pydantic_program_mode,
111
            output_parser=output_parser,
112
        )
113

114
    @classmethod
115
    def class_name(cls) -> str:
116
        return "ClarifaiLLM"
117

118
    @property
119
    def metadata(self) -> LLMMetadata:
120
        """LLM metadata."""
121
        return LLMMetadata(
122
            context_window=self.context_window,
123
            num_output=self.max_tokens,
124
            model_name=self._model,
125
            is_chat_model=self._is_chat_model,
126
        )
127

128
    # TODO: When the Clarifai python SDK supports inference params, add here.
129
    def chat(
130
        self,
131
        messages: Sequence[ChatMessage],
132
        inference_params: Optional[Dict] = {},
133
        **kwargs: Any,
134
    ) -> ChatResponse:
135
        """Chat endpoint for LLM."""
136
        prompt = "".join([str(m) for m in messages])
137
        try:
138
            response = (
139
                self._model.predict_by_bytes(
140
                    input_bytes=prompt.encode(encoding="UTF-8"),
141
                    input_type="text",
142
                    inference_params=inference_params,
143
                )
144
                .outputs[0]
145
                .data.text.raw
146
            )
147
        except Exception as e:
148
            raise Exception(f"Prediction failed: {e}")
149
        return ChatResponse(message=ChatMessage(content=response))
150

151
    def complete(
152
        self,
153
        prompt: str,
154
        formatted: bool = False,
155
        inference_params: Optional[Dict] = {},
156
        **kwargs: Any,
157
    ) -> CompletionResponse:
158
        """Completion endpoint for LLM."""
159
        try:
160
            response = (
161
                self._model.predict_by_bytes(
162
                    input_bytes=prompt.encode(encoding="utf-8"),
163
                    input_type="text",
164
                    inference_params=inference_params,
165
                )
166
                .outputs[0]
167
                .data.text.raw
168
            )
169
        except Exception as e:
170
            raise Exception(f"Prediction failed: {e}")
171
        return CompletionResponse(text=response)
172

173
    def stream_chat(
174
        self, messages: Sequence[ChatMessage], **kwargs: Any
175
    ) -> ChatResponseGen:
176
        raise NotImplementedError(
177
            "Clarifai does not currently support streaming completion."
178
        )
179

180
    def stream_complete(
181
        self, prompt: str, formatted: bool = False, **kwargs: Any
182
    ) -> CompletionResponseGen:
183
        raise NotImplementedError(
184
            "Clarifai does not currently support streaming completion."
185
        )
186

187
    @llm_chat_callback()
188
    async def achat(
189
        self, messages: Sequence[ChatMessage], **kwargs: Any
190
    ) -> ChatResponse:
191
        raise NotImplementedError("Currently not supported.")
192

193
    @llm_completion_callback()
194
    async def acomplete(
195
        self, prompt: str, formatted: bool = False, **kwargs: Any
196
    ) -> CompletionResponse:
197
        return self.complete(prompt, **kwargs)
198

199
    @llm_chat_callback()
200
    async def astream_chat(
201
        self, messages: Sequence[ChatMessage], **kwargs: Any
202
    ) -> ChatResponseAsyncGen:
203
        raise NotImplementedError("Currently not supported.")
204

205
    @llm_completion_callback()
206
    async def astream_complete(
207
        self, prompt: str, formatted: bool = False, **kwargs: Any
208
    ) -> CompletionResponseAsyncGen:
209
        raise NotImplementedError("Clarifai does not currently support this function.")
210

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.