llama-index
171 строка · 4.7 Кб
1"""
2Utility Tools for the Portkey Class.
3
4This file module contains a collection of utility functions designed to enhance
5the functionality and usability of the Portkey class
6"""
7
8from typing import TYPE_CHECKING, List9
10from llama_index.legacy.core.llms.types import LLMMetadata11from llama_index.legacy.llms.anthropic import Anthropic12from llama_index.legacy.llms.anthropic_utils import CLAUDE_MODELS13from llama_index.legacy.llms.openai import OpenAI14from llama_index.legacy.llms.openai_utils import (15AZURE_TURBO_MODELS,16GPT3_5_MODELS,17GPT3_MODELS,18GPT4_MODELS,19TURBO_MODELS,20)
21
22if TYPE_CHECKING:23from portkey import (24LLMOptions,25PortkeyResponse,26)27
28
29IMPORT_ERROR_MESSAGE = (30"Portkey is not installed.Please install it with `pip install portkey-ai`."31)
32
33
34DISCONTINUED_MODELS = {35"code-davinci-002": 8001,36"code-davinci-001": 8001,37"code-cushman-002": 2048,38"code-cushman-001": 2048,39}
40
41DEFAULT_MODEL = "gpt-3.5-turbo"42
43AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)44
45CLUADE_MODEL_FULLVERSION_MAP = {46"claude-instant-1": "claude-instant-1.2",47"claude-2": "claude-2.0",48}
49
50ALL_AVAILABLE_MODELS = {51**GPT4_MODELS,52**TURBO_MODELS,53**GPT3_5_MODELS,54**GPT3_MODELS,55**AZURE_TURBO_MODELS,56**CLAUDE_MODELS,57}
58
59CHAT_MODELS = {60**GPT4_MODELS,61**TURBO_MODELS,62**AZURE_TURBO_MODELS,63}
64
65
66def is_chat_model(model: str) -> bool:67"""Check if a given model is a chat-based language model.68
69This function takes a model name or identifier as input and determines whether
70the model is designed for chat-based language generation, conversation, or
71interaction.
72
73Args:
74model (str): The name or identifier of the model to be checked.
75
76Returns:
77bool: True if the provided model is a chat-based language model,
78False otherwise.
79"""
80return model in CHAT_MODELS81
82
83def modelname_to_contextsize(modelname: str) -> int:84"""Calculate the maximum number of tokens possible to generate for a model.85
86Args:
87modelname: The modelname we want to know the context size for.
88
89Returns:
90The maximum context size
91
92Example:
93.. code-block:: python
94
95max_tokens = modelname_to_contextsize("text-davinci-003")
96"""
97# handling finetuned models98if "ft-" in modelname: # legacy fine-tuning99modelname = modelname.split(":")[0]100elif modelname.startswith("ft:"):101modelname = modelname.split(":")[1]102
103if modelname in DISCONTINUED_MODELS:104raise ValueError(105f"Model {modelname} has been discontinued. " "Please choose another model."106)107
108context_size = ALL_AVAILABLE_MODELS.get(modelname, None)109
110if context_size is None:111raise ValueError(112f"Unknown model: {modelname}. Please provide a valid model name."113"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())114)115
116return context_size117
118
119def generate_llm_metadata(llm: "LLMOptions") -> LLMMetadata:120"""121Generate metadata for a Language Model (LLM) instance.
122
123This function takes an instance of a Language Model (LLM) and generates
124metadata based on the provided instance. The metadata includes information
125such as the context window, number of output tokens, chat model status,
126and model name.
127
128Parameters:
129llm (LLM): An instance of a Language Model (LLM) from which metadata
130will be generated.
131
132Returns:
133LLMMetadata: A data structure containing metadata attributes such as
134context window, number of output tokens, chat model status, and
135model name.
136
137Raises:
138ValueError: If the provided 'llm' is not an instance of
139llama_index.llms.base.LLM.
140"""
141try:142from portkey import LLMOptions143except ImportError as exc:144raise ImportError(IMPORT_ERROR_MESSAGE) from exc145if not isinstance(llm, LLMOptions):146raise ValueError("llm must be an instance of portkey.LLMOptions")147
148return LLMMetadata(149_context_window=modelname_to_contextsize(llm.model or ""),150is_chat_model=is_chat_model(llm.model or ""),151model_name=llm.model,152)153
154
155def get_llm(response: "PortkeyResponse", llms: List["LLMOptions"]) -> "LLMOptions":156# TODO: Update this logic over here.157try:158from portkey import LLMOptions159except ImportError as exc:160raise ImportError(IMPORT_ERROR_MESSAGE) from exc161fallback_llm = LLMOptions.construct()162for llm in llms:163model = llm.model164
165if model == response.model:166fallback_llm = llm167break168
169if fallback_llm is None:170raise ValueError("Failed to get the fallback LLM")171return fallback_llm172