llama-index
58 строк · 1.9 Кб
1import base64
2from typing import Any, Dict, Union
3
4from llama_index.legacy.llms import ChatMessage, MessageRole
5
6
7def is_gemini_model(model: str) -> bool:
8return model.startswith("gemini")
9
10
11def create_gemini_client(model: str) -> Any:
12from vertexai.preview.generative_models import GenerativeModel
13
14return GenerativeModel(model_name=model)
15
16
17def convert_chat_message_to_gemini_content(
18message: ChatMessage, is_history: bool = True
19) -> Any:
20from vertexai.preview.generative_models import Content, Part
21
22def _convert_gemini_part_to_prompt(part: Union[str, Dict]) -> Part:
23from vertexai.preview.generative_models import Image, Part
24
25if isinstance(part, str):
26return Part.from_text(part)
27
28if not isinstance(part, Dict):
29raise ValueError(
30f"Message's content is expected to be a dict, got {type(part)}!"
31)
32if part["type"] == "text":
33return Part.from_text(part["text"])
34elif part["type"] == "image_url":
35path = part["image_url"]
36if path.startswith("gs://"):
37raise ValueError("Only local image path is supported!")
38elif path.startswith("data:image/jpeg;base64,"):
39image = Image.from_bytes(base64.b64decode(path[23:]))
40else:
41image = Image.load_from_file(path)
42else:
43raise ValueError("Only text and image_url types are supported!")
44return Part.from_image(image)
45
46raw_content = message.content
47if raw_content is None:
48raw_content = ""
49if isinstance(raw_content, str):
50raw_content = [raw_content]
51parts = [_convert_gemini_part_to_prompt(part) for part in raw_content]
52if is_history:
53return Content(
54role="user" if message.role == MessageRole.USER else "model",
55parts=parts,
56)
57else:
58return parts
59