llama-index

Форк
0
116 строк · 3.9 Кб
1
from typing import Any, Dict, Optional, Sequence, Type, cast
2

3
from llama_index.legacy.bridge.pydantic import BaseModel
4
from llama_index.legacy.multi_modal_llms import MultiModalLLM, OpenAIMultiModal
5
from llama_index.legacy.output_parsers.pydantic import PydanticOutputParser
6
from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate
7
from llama_index.legacy.schema import ImageDocument
8
from llama_index.legacy.types import BasePydanticProgram
9
from llama_index.legacy.utils import print_text
10

11

12
class MultiModalLLMCompletionProgram(BasePydanticProgram[BaseModel]):
13
    """
14
    Multi Modal LLM Completion Program.
15

16
    Uses generic Multi Modal LLM completion + an output parser to generate a structured output.
17

18
    """
19

20
    def __init__(
21
        self,
22
        output_parser: PydanticOutputParser,
23
        prompt: BasePromptTemplate,
24
        multi_modal_llm: MultiModalLLM,
25
        image_documents: Sequence[ImageDocument],
26
        verbose: bool = False,
27
    ) -> None:
28
        self._output_parser = output_parser
29
        self._multi_modal_llm = multi_modal_llm
30
        self._prompt = prompt
31
        self._image_documents = image_documents
32
        self._verbose = verbose
33

34
        self._prompt.output_parser = output_parser
35

36
    @classmethod
37
    def from_defaults(
38
        cls,
39
        output_parser: PydanticOutputParser,
40
        prompt_template_str: Optional[str] = None,
41
        prompt: Optional[PromptTemplate] = None,
42
        multi_modal_llm: Optional[MultiModalLLM] = None,
43
        image_documents: Optional[Sequence[ImageDocument]] = None,
44
        verbose: bool = False,
45
        **kwargs: Any,
46
    ) -> "MultiModalLLMCompletionProgram":
47
        multi_modal_llm = multi_modal_llm or OpenAIMultiModal(
48
            temperature=0, model="gpt-4-vision-preview"
49
        )
50
        if prompt is None and prompt_template_str is None:
51
            raise ValueError("Must provide either prompt or prompt_template_str.")
52
        if prompt is not None and prompt_template_str is not None:
53
            raise ValueError("Must provide either prompt or prompt_template_str.")
54
        if prompt_template_str is not None:
55
            prompt = PromptTemplate(prompt_template_str)
56
        return cls(
57
            output_parser,
58
            prompt=cast(PromptTemplate, prompt),
59
            multi_modal_llm=multi_modal_llm,
60
            image_documents=image_documents or [],
61
            verbose=verbose,
62
        )
63

64
    @property
65
    def output_cls(self) -> Type[BaseModel]:
66
        return self._output_parser.output_cls
67

68
    @property
69
    def prompt(self) -> BasePromptTemplate:
70
        return self._prompt
71

72
    @prompt.setter
73
    def prompt(self, prompt: BasePromptTemplate) -> None:
74
        self._prompt = prompt
75

76
    def __call__(
77
        self,
78
        llm_kwargs: Optional[Dict[str, Any]] = None,
79
        *args: Any,
80
        **kwargs: Any,
81
    ) -> BaseModel:
82
        llm_kwargs = llm_kwargs or {}
83
        formatted_prompt = self._prompt.format(llm=self._multi_modal_llm, **kwargs)
84

85
        response = self._multi_modal_llm.complete(
86
            formatted_prompt,
87
            image_documents=self._image_documents,
88
            **llm_kwargs,
89
        )
90

91
        raw_output = response.text
92
        if self._verbose:
93
            print_text(f"> Raw output: {raw_output}\n", color="llama_blue")
94

95
        return self._output_parser.parse(raw_output)
96

97
    async def acall(
98
        self,
99
        llm_kwargs: Optional[Dict[str, Any]] = None,
100
        *args: Any,
101
        **kwargs: Any,
102
    ) -> BaseModel:
103
        llm_kwargs = llm_kwargs or {}
104
        formatted_prompt = self._prompt.format(llm=self._multi_modal_llm, **kwargs)
105

106
        response = await self._multi_modal_llm.acomplete(
107
            formatted_prompt,
108
            image_documents=self._image_documents,
109
            **llm_kwargs,
110
        )
111

112
        raw_output = response.text
113
        if self._verbose:
114
            print_text(f"> Raw output: {raw_output}\n", color="llama_blue")
115

116
        return self._output_parser.parse(raw_output)
117

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.