optimum-intel

Форк
0
/
test_exporters_cli.py 
200 строк · 8.6 Кб
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14
import subprocess
15
import unittest
16
from pathlib import Path
17
from tempfile import TemporaryDirectory
18

19
from parameterized import parameterized
20
from utils_tests import (
21
    _ARCHITECTURES_TO_EXPECTED_INT4_INT8,
22
    _ARCHITECTURES_TO_EXPECTED_INT8,
23
    MODEL_NAMES,
24
    get_num_quantized_nodes,
25
)
26

27
from optimum.exporters.openvino.__main__ import main_export
28
from optimum.intel import (  # noqa
29
    OVModelForAudioClassification,
30
    OVModelForCausalLM,
31
    OVModelForFeatureExtraction,
32
    OVModelForImageClassification,
33
    OVModelForMaskedLM,
34
    OVModelForQuestionAnswering,
35
    OVModelForSeq2SeqLM,
36
    OVModelForSequenceClassification,
37
    OVModelForTokenClassification,
38
    OVStableDiffusionPipeline,
39
    OVStableDiffusionXLPipeline,
40
)
41
from optimum.intel.openvino.utils import _HEAD_TO_AUTOMODELS
42
from optimum.intel.utils.import_utils import is_openvino_tokenizers_available
43

44

45
class OVCLIExportTestCase(unittest.TestCase):
46
    """
47
    Integration tests ensuring supported models are correctly exported.
48
    """
49

50
    SUPPORTED_ARCHITECTURES = (
51
        ("text-generation", "gpt2"),
52
        ("text-generation-with-past", "gpt2"),
53
        ("text2text-generation", "t5"),
54
        ("text2text-generation-with-past", "t5"),
55
        ("text-classification", "albert"),
56
        ("question-answering", "distilbert"),
57
        ("token-classification", "roberta"),
58
        ("image-classification", "vit"),
59
        ("audio-classification", "wav2vec2"),
60
        ("fill-mask", "bert"),
61
        ("feature-extraction", "blenderbot"),
62
        ("stable-diffusion", "stable-diffusion"),
63
        ("stable-diffusion-xl", "stable-diffusion-xl"),
64
        ("stable-diffusion-xl", "stable-diffusion-xl-refiner"),
65
    )
66
    EXPECTED_NUMBER_OF_TOKENIZER_MODELS = {
67
        "gpt2": 2,
68
        "t5": 0,  # failed internal sentencepiece check - no <s> token in the vocab
69
        "albert": 0,  # not supported yet
70
        "distilbert": 1,  # no detokenizer
71
        "roberta": 2,
72
        "vit": 0,  # no tokenizer for image model
73
        "wav2vec2": 0,  # no tokenizer
74
        "bert": 1,  # no detokenizer
75
        "blenderbot": 2,
76
        "stable-diffusion": 0,  # not supported
77
        "stable-diffusion-xl": 0,  # not supported
78
    }
79

80
    SUPPORTED_4BIT_ARCHITECTURES = (("text-generation-with-past", "opt125m"),)
81

82
    SUPPORTED_4BIT_OPTIONS = ["int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"]
83

84
    TEST_4BIT_CONFIGURATONS = []
85
    for arch in SUPPORTED_4BIT_ARCHITECTURES:
86
        for option in SUPPORTED_4BIT_OPTIONS:
87
            TEST_4BIT_CONFIGURATONS.append([arch[0], arch[1], option])
88

89
    def _openvino_export(
90
        self, model_name: str, task: str, compression_option: str = None, compression_ratio: float = None
91
    ):
92
        with TemporaryDirectory() as tmpdir:
93
            main_export(
94
                model_name_or_path=model_name,
95
                output=tmpdir,
96
                task=task,
97
                compression_option=compression_option,
98
                compression_ratio=compression_ratio,
99
            )
100

101
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
102
    def test_export(self, task: str, model_type: str):
103
        self._openvino_export(MODEL_NAMES[model_type], task)
104

105
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
106
    def test_exporters_cli(self, task: str, model_type: str):
107
        with TemporaryDirectory() as tmpdir:
108
            subprocess.run(
109
                f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --task {task} {tmpdir}",
110
                shell=True,
111
                check=True,
112
            )
113
            model_kwargs = {"use_cache": task.endswith("with-past")} if "generation" in task else {}
114
            eval(_HEAD_TO_AUTOMODELS[task.replace("-with-past", "")]).from_pretrained(tmpdir, **model_kwargs)
115

116
    @parameterized.expand(
117
        arch
118
        for arch in SUPPORTED_ARCHITECTURES
119
        if not arch[0].endswith("-with-past") and not arch[1].endswith("-refiner")
120
    )
121
    @unittest.skipIf(not is_openvino_tokenizers_available(), reason="OpenVINO Tokenizers not available")
122
    def test_exporters_cli_tokenizers(self, task: str, model_type: str):
123
        with TemporaryDirectory() as tmpdir:
124
            output = subprocess.check_output(
125
                f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --convert-tokenizer --task {task} {tmpdir}",
126
                shell=True,
127
                stderr=subprocess.STDOUT,
128
            ).decode()
129
            save_dir = Path(tmpdir)
130
            number_of_tokenizers = sum("tokenizer" in file for file in map(str, save_dir.rglob("*.xml")))
131
            self.assertEqual(
132
                self.EXPECTED_NUMBER_OF_TOKENIZER_MODELS[model_type],
133
                number_of_tokenizers,
134
                f"OVT: {is_openvino_tokenizers_available() }",
135
            )
136

137
            if number_of_tokenizers == 1:
138
                self.assertTrue("Detokenizer is not supported, convert tokenizer only." in output, output)
139
            elif number_of_tokenizers == 0 and task not in ("image-classification", "audio-classification"):
140
                self.assertTrue(("OpenVINO Tokenizer export for" in output and "is not supported." in output), output)
141

142
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
143
    def test_exporters_cli_fp16(self, task: str, model_type: str):
144
        with TemporaryDirectory() as tmpdir:
145
            subprocess.run(
146
                f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --task {task} --weight-format fp16 {tmpdir}",
147
                shell=True,
148
                check=True,
149
            )
150
            model_kwargs = {"use_cache": task.endswith("with-past")} if "generation" in task else {}
151
            eval(_HEAD_TO_AUTOMODELS[task.replace("-with-past", "")]).from_pretrained(tmpdir, **model_kwargs)
152

153
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
154
    def test_exporters_cli_int8(self, task: str, model_type: str):
155
        with TemporaryDirectory() as tmpdir:
156
            subprocess.run(
157
                f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --task {task}  --weight-format int8 {tmpdir}",
158
                shell=True,
159
                check=True,
160
            )
161
            model_kwargs = {"use_cache": task.endswith("with-past")} if "generation" in task else {}
162
            model = eval(_HEAD_TO_AUTOMODELS[task.replace("-with-past", "")]).from_pretrained(tmpdir, **model_kwargs)
163

164
            if task.startswith("text2text-generation"):
165
                models = [model.encoder, model.decoder]
166
                if task.endswith("with-past"):
167
                    models.append(model.decoder_with_past)
168
            elif task.startswith("stable-diffusion"):
169
                models = [model.unet, model.vae_encoder, model.vae_decoder]
170
                models.append(model.text_encoder if task == "stable-diffusion" else model.text_encoder_2)
171
            else:
172
                models = [model]
173

174
            expected_int8 = _ARCHITECTURES_TO_EXPECTED_INT8[model_type]
175
            for i, model in enumerate(models):
176
                _, num_int8, _ = get_num_quantized_nodes(model)
177
                self.assertEqual(expected_int8[i], num_int8)
178

179
    @parameterized.expand(TEST_4BIT_CONFIGURATONS)
180
    def test_exporters_cli_int4(self, task: str, model_type: str, option: str):
181
        with TemporaryDirectory() as tmpdir:
182
            subprocess.run(
183
                f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --task {task}  --weight-format {option} {tmpdir}",
184
                shell=True,
185
                check=True,
186
            )
187
            model_kwargs = {"use_cache": task.endswith("with-past")} if "generation" in task else {}
188
            model = eval(_HEAD_TO_AUTOMODELS[task.replace("-with-past", "")]).from_pretrained(tmpdir, **model_kwargs)
189

190
            expected_int8, expected_int4 = _ARCHITECTURES_TO_EXPECTED_INT4_INT8[model_type]
191
            _, num_int8, num_int4 = get_num_quantized_nodes(model)
192
            self.assertEqual(expected_int8, num_int8)
193
            self.assertEqual(expected_int4, num_int4)
194

195
    def test_exporters_cli_help(self):
196
        subprocess.run(
197
            "optimum-cli export openvino --help",
198
            shell=True,
199
            check=True,
200
        )
201

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.