optimum-intel

Форк
0
/
test_modeling.py 
1335 строк · 56.6 Кб
1
#  Copyright 2021 The HuggingFace Team. All rights reserved.
2
#
3
#  Licensed under the Apache License, Version 2.0 (the "License");
4
#  you may not use this file except in compliance with the License.
5
#  You may obtain a copy of the License at
6
#
7
#      http://www.apache.org/licenses/LICENSE-2.0
8
#
9
#  Unless required by applicable law or agreed to in writing, software
10
#  distributed under the License is distributed on an "AS IS" BASIS,
11
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
#  See the License for the specific language governing permissions and
13
#  limitations under the License.
14

15
import gc
16
import os
17
import tempfile
18
import time
19
import unittest
20
from typing import Dict
21

22
import numpy as np
23
import requests
24
import timm
25
import torch
26
from datasets import load_dataset
27
from evaluate import evaluator
28
from parameterized import parameterized
29
from PIL import Image
30
from transformers import (
31
    AutoFeatureExtractor,
32
    AutoModel,
33
    AutoModelForAudioClassification,
34
    AutoModelForAudioFrameClassification,
35
    AutoModelForAudioXVector,
36
    AutoModelForCausalLM,
37
    AutoModelForCTC,
38
    AutoModelForImageClassification,
39
    AutoModelForMaskedLM,
40
    AutoModelForQuestionAnswering,
41
    AutoModelForSeq2SeqLM,
42
    AutoModelForSequenceClassification,
43
    AutoModelForSpeechSeq2Seq,
44
    AutoModelForTokenClassification,
45
    AutoTokenizer,
46
    GenerationConfig,
47
    Pix2StructForConditionalGeneration,
48
    PretrainedConfig,
49
    pipeline,
50
    set_seed,
51
)
52
from transformers.onnx.utils import get_preprocessor
53
from utils_tests import MODEL_NAMES
54

55
from optimum.exporters.onnx import MODEL_TYPES_REQUIRING_POSITION_IDS
56
from optimum.intel import (
57
    OVModelForAudioClassification,
58
    OVModelForAudioFrameClassification,
59
    OVModelForAudioXVector,
60
    OVModelForCausalLM,
61
    OVModelForCTC,
62
    OVModelForFeatureExtraction,
63
    OVModelForImageClassification,
64
    OVModelForMaskedLM,
65
    OVModelForPix2Struct,
66
    OVModelForQuestionAnswering,
67
    OVModelForSeq2SeqLM,
68
    OVModelForSequenceClassification,
69
    OVModelForSpeechSeq2Seq,
70
    OVModelForTokenClassification,
71
    OVStableDiffusionPipeline,
72
)
73
from optimum.intel.openvino import OV_DECODER_NAME, OV_DECODER_WITH_PAST_NAME, OV_ENCODER_NAME, OV_XML_FILE_NAME
74
from optimum.intel.openvino.modeling_seq2seq import OVDecoder, OVEncoder
75
from optimum.intel.openvino.modeling_timm import TimmImageProcessor
76
from optimum.intel.openvino.utils import _print_compiled_model_properties
77
from optimum.intel.utils.import_utils import is_openvino_version
78
from optimum.utils import (
79
    DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
80
    DIFFUSION_MODEL_UNET_SUBFOLDER,
81
    DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
82
    DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
83
)
84
from optimum.utils.testing_utils import require_diffusers
85

86

87
TENSOR_ALIAS_TO_TYPE = {
88
    "pt": torch.Tensor,
89
    "np": np.ndarray,
90
}
91

92
SEED = 42
93

94
F32_CONFIG = {"INFERENCE_PRECISION_HINT": "f32"}
95

96

97
class Timer(object):
98
    def __enter__(self):
99
        self.elapsed = time.perf_counter()
100
        return self
101

102
    def __exit__(self, type, value, traceback):
103
        self.elapsed = (time.perf_counter() - self.elapsed) * 1e3
104

105

106
class OVModelIntegrationTest(unittest.TestCase):
107
    def __init__(self, *args, **kwargs):
108
        super().__init__(*args, **kwargs)
109
        self.OV_MODEL_ID = "echarlaix/distilbert-base-uncased-finetuned-sst-2-english-openvino"
110
        self.OV_DECODER_MODEL_ID = "helenai/gpt2-ov"
111
        self.OV_SEQ2SEQ_MODEL_ID = "echarlaix/t5-small-openvino"
112
        self.OV_DIFFUSION_MODEL_ID = "hf-internal-testing/tiny-stable-diffusion-openvino"
113

114
    def test_load_from_hub_and_save_model(self):
115
        tokenizer = AutoTokenizer.from_pretrained(self.OV_MODEL_ID)
116
        tokens = tokenizer("This is a sample input", return_tensors="pt")
117
        loaded_model = OVModelForSequenceClassification.from_pretrained(self.OV_MODEL_ID)
118
        self.assertIsInstance(loaded_model.config, PretrainedConfig)
119
        loaded_model_outputs = loaded_model(**tokens)
120

121
        # Test specifying ov_config with throughput hint and manual cache dir
122
        manual_openvino_cache_dir = loaded_model.model_save_dir / "manual_model_cache"
123
        ov_config = {"CACHE_DIR": str(manual_openvino_cache_dir), "PERFORMANCE_HINT": "THROUGHPUT"}
124
        loaded_model = OVModelForSequenceClassification.from_pretrained(self.OV_MODEL_ID, ov_config=ov_config)
125
        self.assertTrue(manual_openvino_cache_dir.is_dir())
126
        self.assertGreaterEqual(len(list(manual_openvino_cache_dir.glob("*.blob"))), 1)
127
        if is_openvino_version("<", "2023.3"):
128
            self.assertEqual(loaded_model.request.get_property("PERFORMANCE_HINT").name, "THROUGHPUT")
129
        else:
130
            self.assertEqual(loaded_model.request.get_property("PERFORMANCE_HINT"), "THROUGHPUT")
131

132
        with tempfile.TemporaryDirectory() as tmpdirname:
133
            loaded_model.save_pretrained(tmpdirname)
134
            folder_contents = os.listdir(tmpdirname)
135
            self.assertTrue(OV_XML_FILE_NAME in folder_contents)
136
            self.assertTrue(OV_XML_FILE_NAME.replace(".xml", ".bin") in folder_contents)
137
            model = OVModelForSequenceClassification.from_pretrained(tmpdirname)
138

139
        outputs = model(**tokens)
140
        self.assertTrue(torch.equal(loaded_model_outputs.logits, outputs.logits))
141

142
        del loaded_model
143
        del model
144
        gc.collect()
145

146
    @parameterized.expand((True, False))
147
    def test_load_from_hub_and_save_decoder_model(self, use_cache):
148
        model_id = "vuiseng9/ov-gpt2-fp32-kv-cache" if use_cache else "vuiseng9/ov-gpt2-fp32-no-cache"
149
        tokenizer = AutoTokenizer.from_pretrained(model_id)
150
        tokens = tokenizer("This is a sample input", return_tensors="pt")
151
        loaded_model = OVModelForCausalLM.from_pretrained(model_id, use_cache=use_cache)
152
        self.assertIsInstance(loaded_model.config, PretrainedConfig)
153
        loaded_model_outputs = loaded_model(**tokens)
154

155
        with tempfile.TemporaryDirectory() as tmpdirname:
156
            loaded_model.save_pretrained(tmpdirname)
157
            folder_contents = os.listdir(tmpdirname)
158
            self.assertTrue(OV_XML_FILE_NAME in folder_contents)
159
            self.assertTrue(OV_XML_FILE_NAME.replace(".xml", ".bin") in folder_contents)
160
            model = OVModelForCausalLM.from_pretrained(tmpdirname, use_cache=use_cache)
161
            self.assertEqual(model.use_cache, use_cache)
162

163
        outputs = model(**tokens)
164
        self.assertTrue(torch.equal(loaded_model_outputs.logits, outputs.logits))
165
        del loaded_model
166
        del model
167
        gc.collect()
168

169
    def test_load_from_hub_and_save_seq2seq_model(self):
170
        tokenizer = AutoTokenizer.from_pretrained(self.OV_SEQ2SEQ_MODEL_ID)
171
        tokens = tokenizer("This is a sample input", return_tensors="pt")
172
        loaded_model = OVModelForSeq2SeqLM.from_pretrained(self.OV_SEQ2SEQ_MODEL_ID, compile=False)
173
        self.assertIsInstance(loaded_model.config, PretrainedConfig)
174
        loaded_model.to("cpu")
175
        loaded_model_outputs = loaded_model.generate(**tokens)
176

177
        with tempfile.TemporaryDirectory() as tmpdirname:
178
            loaded_model.save_pretrained(tmpdirname)
179
            folder_contents = os.listdir(tmpdirname)
180
            self.assertTrue(OV_ENCODER_NAME in folder_contents)
181
            self.assertTrue(OV_DECODER_NAME in folder_contents)
182
            self.assertTrue(OV_DECODER_WITH_PAST_NAME in folder_contents)
183
            model = OVModelForSeq2SeqLM.from_pretrained(tmpdirname, device="cpu")
184

185
        outputs = model.generate(**tokens)
186
        self.assertTrue(torch.equal(loaded_model_outputs, outputs))
187
        del loaded_model
188
        del model
189
        gc.collect()
190

191
    @require_diffusers
192
    def test_load_from_hub_and_save_stable_diffusion_model(self):
193
        loaded_pipeline = OVStableDiffusionPipeline.from_pretrained(self.OV_DIFFUSION_MODEL_ID, compile=False)
194
        self.assertIsInstance(loaded_pipeline.config, Dict)
195
        batch_size, height, width = 2, 16, 16
196
        np.random.seed(0)
197
        inputs = {
198
            "prompt": ["sailing ship in storm by Leonardo da Vinci"] * batch_size,
199
            "height": height,
200
            "width": width,
201
            "num_inference_steps": 2,
202
            "output_type": "np",
203
        }
204
        pipeline_outputs = loaded_pipeline(**inputs).images
205
        self.assertEqual(pipeline_outputs.shape, (batch_size, height, width, 3))
206
        with tempfile.TemporaryDirectory() as tmpdirname:
207
            loaded_pipeline.save_pretrained(tmpdirname)
208
            pipeline = OVStableDiffusionPipeline.from_pretrained(tmpdirname)
209
            folder_contents = os.listdir(tmpdirname)
210
            self.assertIn(loaded_pipeline.config_name, folder_contents)
211
            for subfoler in {
212
                DIFFUSION_MODEL_UNET_SUBFOLDER,
213
                DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
214
                DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
215
                DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
216
            }:
217
                folder_contents = os.listdir(os.path.join(tmpdirname, subfoler))
218
                self.assertIn(OV_XML_FILE_NAME, folder_contents)
219
                self.assertIn(OV_XML_FILE_NAME.replace(".xml", ".bin"), folder_contents)
220
        np.random.seed(0)
221
        outputs = pipeline(**inputs).images
222
        self.assertTrue(np.array_equal(pipeline_outputs, outputs))
223
        del pipeline
224
        gc.collect()
225

226

227
class OVModelForSequenceClassificationIntegrationTest(unittest.TestCase):
228
    SUPPORTED_ARCHITECTURES = (
229
        "albert",
230
        "bert",
231
        # "camembert",
232
        "convbert",
233
        # "data2vec_text",
234
        # "deberta_v2",
235
        "distilbert",
236
        "electra",
237
        "flaubert",
238
        "ibert",
239
        # "mobilebert",
240
        # "nystromformer",
241
        "roberta",
242
        "roformer",
243
        "squeezebert",
244
        "xlm",
245
        # "xlm_roberta",
246
    )
247

248
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
249
    def test_compare_to_transformers(self, model_arch):
250
        model_id = MODEL_NAMES[model_arch]
251
        set_seed(SEED)
252
        ov_model = OVModelForSequenceClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
253
        self.assertIsInstance(ov_model.config, PretrainedConfig)
254
        transformers_model = AutoModelForSequenceClassification.from_pretrained(model_id)
255
        tokenizer = AutoTokenizer.from_pretrained(model_id)
256
        inputs = "This is a sample input"
257
        tokens = tokenizer(inputs, return_tensors="pt")
258
        with torch.no_grad():
259
            transformers_outputs = transformers_model(**tokens)
260
        for input_type in ["pt", "np"]:
261
            tokens = tokenizer(inputs, return_tensors=input_type)
262
            ov_outputs = ov_model(**tokens)
263
            self.assertIn("logits", ov_outputs)
264
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
265
            # Compare tensor outputs
266
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
267
        del transformers_model
268
        del ov_model
269
        gc.collect()
270

271
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
272
    def test_pipeline(self, model_arch):
273
        model_id = MODEL_NAMES[model_arch]
274
        model = OVModelForSequenceClassification.from_pretrained(model_id, export=True, compile=False)
275
        tokenizer = AutoTokenizer.from_pretrained(model_id)
276
        pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
277
        text = "This restaurant is awesome"
278
        outputs = pipe(text)
279
        self.assertTrue(model.is_dynamic)
280
        self.assertEqual(pipe.device, model.device)
281
        self.assertGreaterEqual(outputs[0]["score"], 0.0)
282
        self.assertIsInstance(outputs[0]["label"], str)
283
        if model_arch == "bert":
284
            # Test FP16 conversion
285
            model.half()
286
            model.to("cpu")
287
            model.compile()
288
            outputs = pipe(text)
289
            self.assertGreaterEqual(outputs[0]["score"], 0.0)
290
            self.assertIsInstance(outputs[0]["label"], str)
291
            # Test static shapes
292
            model.reshape(1, 25)
293
            model.compile()
294
            outputs = pipe(text)
295
            self.assertTrue(not model.is_dynamic)
296
            self.assertGreaterEqual(outputs[0]["score"], 0.0)
297
            self.assertIsInstance(outputs[0]["label"], str)
298
            # Test that model caching was not automatically enabled for exported model
299
            openvino_cache_dir = model.model_save_dir / "model_cache"
300
            self.assertFalse(openvino_cache_dir.is_dir())
301

302
        del model
303
        del pipe
304
        gc.collect()
305

306

307
class OVModelForQuestionAnsweringIntegrationTest(unittest.TestCase):
308
    SUPPORTED_ARCHITECTURES = (
309
        "bert",
310
        "distilbert",
311
        "roberta",
312
    )
313

314
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
315
    def test_compare_to_transformers(self, model_arch):
316
        model_id = MODEL_NAMES[model_arch]
317
        set_seed(SEED)
318
        ov_model = OVModelForQuestionAnswering.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
319
        self.assertIsInstance(ov_model.config, PretrainedConfig)
320
        transformers_model = AutoModelForQuestionAnswering.from_pretrained(model_id)
321
        tokenizer = AutoTokenizer.from_pretrained(model_id)
322
        inputs = "This is a sample input"
323
        tokens = tokenizer(inputs, return_tensors="pt")
324
        with torch.no_grad():
325
            transformers_outputs = transformers_model(**tokens)
326
        for input_type in ["pt", "np"]:
327
            tokens = tokenizer(inputs, return_tensors=input_type)
328
            ov_outputs = ov_model(**tokens)
329
            self.assertIn("start_logits", ov_outputs)
330
            self.assertIn("end_logits", ov_outputs)
331
            self.assertIsInstance(ov_outputs.start_logits, TENSOR_ALIAS_TO_TYPE[input_type])
332
            self.assertIsInstance(ov_outputs.end_logits, TENSOR_ALIAS_TO_TYPE[input_type])
333
            # Compare tensor outputs
334
            self.assertTrue(
335
                torch.allclose(torch.Tensor(ov_outputs.start_logits), transformers_outputs.start_logits, atol=1e-4)
336
            )
337
            self.assertTrue(
338
                torch.allclose(torch.Tensor(ov_outputs.end_logits), transformers_outputs.end_logits, atol=1e-4)
339
            )
340
        del ov_model
341
        del transformers_model
342
        gc.collect()
343

344
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
345
    def test_pipeline(self, model_arch):
346
        model_id = MODEL_NAMES[model_arch]
347
        model = OVModelForQuestionAnswering.from_pretrained(model_id, export=True)
348
        tokenizer = AutoTokenizer.from_pretrained(model_id)
349
        pipe = pipeline("question-answering", model=model, tokenizer=tokenizer)
350
        question = "What's my name?"
351
        context = "My Name is Arthur and I live in Lyon."
352
        outputs = pipe(question, context)
353
        self.assertEqual(pipe.device, model.device)
354
        self.assertGreaterEqual(outputs["score"], 0.0)
355
        self.assertIsInstance(outputs["answer"], str)
356
        del model
357
        gc.collect()
358

359
    def test_metric(self):
360
        model_id = "distilbert-base-cased-distilled-squad"
361
        set_seed(SEED)
362
        ov_model = OVModelForQuestionAnswering.from_pretrained(model_id, export=True)
363
        transformers_model = AutoModelForQuestionAnswering.from_pretrained(model_id)
364
        tokenizer = AutoTokenizer.from_pretrained(model_id)
365
        data = load_dataset("squad", split="validation").select(range(50))
366
        task_evaluator = evaluator("question-answering")
367
        transformers_pipe = pipeline("question-answering", model=transformers_model, tokenizer=tokenizer)
368
        ov_pipe = pipeline("question-answering", model=ov_model, tokenizer=tokenizer)
369
        transformers_metric = task_evaluator.compute(model_or_pipeline=transformers_pipe, data=data, metric="squad")
370
        ov_metric = task_evaluator.compute(model_or_pipeline=ov_pipe, data=data, metric="squad")
371
        self.assertEqual(ov_metric["exact_match"], transformers_metric["exact_match"])
372
        self.assertEqual(ov_metric["f1"], transformers_metric["f1"])
373
        del transformers_pipe
374
        del transformers_model
375
        del ov_pipe
376
        del ov_model
377
        gc.collect()
378

379

380
class OVModelForTokenClassificationIntegrationTest(unittest.TestCase):
381
    SUPPORTED_ARCHITECTURES = (
382
        "bert",
383
        "distilbert",
384
        "roberta",
385
    )
386

387
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
388
    def test_compare_to_transformers(self, model_arch):
389
        model_id = MODEL_NAMES[model_arch]
390
        set_seed(SEED)
391
        ov_model = OVModelForTokenClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
392
        self.assertIsInstance(ov_model.config, PretrainedConfig)
393
        transformers_model = AutoModelForTokenClassification.from_pretrained(model_id)
394
        tokenizer = AutoTokenizer.from_pretrained(model_id)
395
        inputs = "This is a sample input"
396
        tokens = tokenizer(inputs, return_tensors="pt")
397
        with torch.no_grad():
398
            transformers_outputs = transformers_model(**tokens)
399
        for input_type in ["pt", "np"]:
400
            tokens = tokenizer(inputs, return_tensors=input_type)
401
            ov_outputs = ov_model(**tokens)
402
            self.assertIn("logits", ov_outputs)
403
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
404
            # Compare tensor outputs
405
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
406
        del transformers_model
407
        del ov_model
408
        gc.collect()
409

410
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
411
    def test_pipeline(self, model_arch):
412
        model_id = MODEL_NAMES[model_arch]
413
        model = OVModelForTokenClassification.from_pretrained(model_id, export=True)
414
        tokenizer = AutoTokenizer.from_pretrained(model_id)
415
        pipe = pipeline("token-classification", model=model, tokenizer=tokenizer)
416
        outputs = pipe("My Name is Arthur and I live in Lyon.")
417
        self.assertEqual(pipe.device, model.device)
418
        self.assertTrue(all(item["score"] > 0.0 for item in outputs))
419
        del model
420
        del pipe
421
        gc.collect()
422

423

424
class OVModelForFeatureExtractionIntegrationTest(unittest.TestCase):
425
    SUPPORTED_ARCHITECTURES = (
426
        "bert",
427
        "distilbert",
428
        "roberta",
429
        "sentence-transformers-bert",
430
    )
431

432
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
433
    def test_compare_to_transformers(self, model_arch):
434
        model_id = MODEL_NAMES[model_arch]
435
        set_seed(SEED)
436
        ov_model = OVModelForFeatureExtraction.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
437
        self.assertIsInstance(ov_model.config, PretrainedConfig)
438
        transformers_model = AutoModel.from_pretrained(model_id)
439
        tokenizer = AutoTokenizer.from_pretrained(model_id)
440
        inputs = "This is a sample input"
441
        tokens = tokenizer(inputs, return_tensors="pt")
442
        with torch.no_grad():
443
            transformers_outputs = transformers_model(**tokens)
444
        for input_type in ["pt", "np"]:
445
            tokens = tokenizer(inputs, return_tensors=input_type)
446
            ov_outputs = ov_model(**tokens)
447
            self.assertIn("last_hidden_state", ov_outputs)
448
            self.assertIsInstance(ov_outputs.last_hidden_state, TENSOR_ALIAS_TO_TYPE[input_type])
449
            # Compare tensor outputs
450
            self.assertTrue(
451
                torch.allclose(
452
                    torch.Tensor(ov_outputs.last_hidden_state), transformers_outputs.last_hidden_state, atol=1e-4
453
                )
454
            )
455
        del transformers_model
456
        del ov_model
457
        gc.collect()
458

459
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
460
    def test_pipeline(self, model_arch):
461
        model_id = MODEL_NAMES[model_arch]
462
        model = OVModelForFeatureExtraction.from_pretrained(model_id, export=True)
463
        tokenizer = AutoTokenizer.from_pretrained(model_id)
464
        pipe = pipeline("feature-extraction", model=model, tokenizer=tokenizer)
465
        outputs = pipe("My Name is Arthur and I live in Lyon.")
466
        self.assertEqual(pipe.device, model.device)
467
        self.assertTrue(all(all(isinstance(item, float) for item in row) for row in outputs[0]))
468
        del pipe
469
        del model
470
        gc.collect()
471

472

473
class OVModelForCausalLMIntegrationTest(unittest.TestCase):
474
    SUPPORTED_ARCHITECTURES = (
475
        "bart",
476
        "gpt_bigcode",
477
        "blenderbot",
478
        "blenderbot-small",
479
        "bloom",
480
        "codegen",
481
        # "data2vec-text", # TODO : enable when enabled in exporters
482
        "gpt2",
483
        "gpt_neo",
484
        "gpt_neox",
485
        "llama",
486
        # "llama_gptq",
487
        "marian",
488
        "mistral",
489
        "mpt",
490
        "opt",
491
        "pegasus",
492
    )
493
    GENERATION_LENGTH = 100
494
    IS_SUPPORT_STATEFUL = is_openvino_version(">=", "2023.3")
495

496
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
497
    def test_compare_to_transformers(self, model_arch):
498
        model_id = MODEL_NAMES[model_arch]
499

500
        if "gptq" in model_arch:
501
            self.skipTest("GPTQ model loading unsupported with AutoModelForCausalLM")
502

503
        set_seed(SEED)
504
        ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
505
        self.assertIsInstance(ov_model.config, PretrainedConfig)
506
        self.assertTrue(ov_model.use_cache)
507

508
        transformers_model = AutoModelForCausalLM.from_pretrained(model_id)
509
        tokenizer = AutoTokenizer.from_pretrained(model_id)
510
        tokens = tokenizer(
511
            "This is a sample", return_tensors="pt", return_token_type_ids=False if model_arch == "llama" else None
512
        )
513
        position_ids = None
514
        if model_arch.replace("_", "-") in MODEL_TYPES_REQUIRING_POSITION_IDS:
515
            input_shape = tokens["input_ids"].shape
516
            position_ids = torch.arange(0, input_shape[-1], dtype=torch.long).unsqueeze(0).view(-1, input_shape[-1])
517
        ov_outputs = ov_model(**tokens, position_ids=position_ids)
518

519
        self.assertTrue("logits" in ov_outputs)
520
        self.assertIsInstance(ov_outputs.logits, torch.Tensor)
521
        self.assertTrue("past_key_values" in ov_outputs)
522
        self.assertIsInstance(ov_outputs.past_key_values, tuple)
523

524
        is_stateful = ov_model.config.model_type not in {"gpt_bigcode", "llama"} and self.IS_SUPPORT_STATEFUL
525
        self.assertEqual(ov_model.stateful, is_stateful)
526
        if is_stateful:
527
            self.assertTrue(len(ov_outputs.past_key_values) == 1 and len(ov_outputs.past_key_values[0]) == 0)
528

529
        with torch.no_grad():
530
            transformers_outputs = transformers_model(**tokens)
531

532
        # Compare tensor outputs
533
        self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, atol=1e-4))
534
        del transformers_model
535
        del ov_model
536
        gc.collect()
537

538
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
539
    def test_pipeline(self, model_arch):
540
        model_id = MODEL_NAMES[model_arch]
541
        tokenizer = AutoTokenizer.from_pretrained(model_id)
542
        model = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=False, compile=False)
543
        model.config.encoder_no_repeat_ngram_size = 0
544
        model.to("cpu")
545
        model.half()
546
        model.compile()
547
        pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
548
        outputs = pipe("This is a sample", max_length=20)
549
        self.assertEqual(pipe.device, model.device)
550
        self.assertTrue(all("This is a sample" in item["generated_text"] for item in outputs))
551
        del pipe
552
        del model
553
        gc.collect()
554

555
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
556
    def test_multiple_inputs(self, model_arch):
557
        model_id = MODEL_NAMES[model_arch]
558
        set_seed(SEED)
559
        model = OVModelForCausalLM.from_pretrained(model_id, export=True, compile=False)
560
        tokenizer = AutoTokenizer.from_pretrained(model_id)
561
        tokenizer.pad_token = tokenizer.eos_token
562
        texts = ["this is a simple input", "this is a second simple input", "this is a third simple input"]
563
        tokens = tokenizer(texts, padding=True, return_tensors="pt")
564
        generation_config = GenerationConfig(encoder_no_repeat_ngram_size=0, max_new_tokens=20, num_beams=2)
565
        outputs = model.generate(**tokens, generation_config=generation_config)
566
        self.assertIsInstance(outputs, torch.Tensor)
567
        self.assertEqual(outputs.shape[0], 3)
568
        del model
569
        gc.collect()
570

571
    def test_model_and_decoder_same_device(self):
572
        model_id = MODEL_NAMES["gpt2"]
573
        model = OVModelForCausalLM.from_pretrained(model_id, export=True)
574
        model.to("TEST")
575
        self.assertEqual(model._device, "TEST")
576
        # Verify that request is being reset
577
        self.assertEqual(model.request, None)
578
        del model
579
        gc.collect()
580

581
    def test_compare_with_and_without_past_key_values(self):
582
        model_id = MODEL_NAMES["gpt2"]
583
        tokenizer = AutoTokenizer.from_pretrained(model_id)
584
        tokens = tokenizer("This is a sample input", return_tensors="pt")
585
        model_with_pkv = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=True, stateful=False)
586
        outputs_model_with_pkv = model_with_pkv.generate(
587
            **tokens, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
588
        )
589
        model_without_pkv = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=False)
590
        outputs_model_without_pkv = model_without_pkv.generate(
591
            **tokens, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
592
        )
593
        self.assertTrue(torch.equal(outputs_model_with_pkv, outputs_model_without_pkv))
594
        self.assertEqual(outputs_model_with_pkv.shape[1], self.GENERATION_LENGTH)
595
        self.assertEqual(outputs_model_without_pkv.shape[1], self.GENERATION_LENGTH)
596
        if self.IS_SUPPORT_STATEFUL:
597
            model_stateful = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=True, stateful=True)
598
            outputs_model_stateful = model_stateful.generate(
599
                **tokens, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
600
            )
601
            self.assertTrue(torch.equal(outputs_model_without_pkv, outputs_model_stateful))
602

603
        del model_with_pkv
604
        del model_without_pkv
605
        gc.collect()
606

607
    def test_print_model_properties(self):
608
        # test setting OPENVINO_LOG_LEVEL to 3, which calls _print_compiled_model_properties
609
        openvino_log_level = os.environ.get("OPENVINO_LOG_LEVEL", None)
610
        os.environ["OPENVINO_LOG_LEVEL"] = "3"
611
        model = OVModelForSequenceClassification.from_pretrained(MODEL_NAMES["bert"], export=True)
612
        if openvino_log_level is not None:
613
            os.environ["OPENVINO_LOG_LEVEL"] = openvino_log_level
614
        # test calling function directly
615
        _print_compiled_model_properties(model.request)
616

617
    def test_auto_device_loading(self):
618
        OV_MODEL_ID = "echarlaix/distilbert-base-uncased-finetuned-sst-2-english-openvino"
619
        for device in ("AUTO", "AUTO:CPU"):
620
            model = OVModelForSequenceClassification.from_pretrained(OV_MODEL_ID, device=device)
621
            model.half()
622
            self.assertEqual(model._device, device)
623
            if device == "AUTO:CPU":
624
                model = OVModelForSequenceClassification.from_pretrained(OV_MODEL_ID, device=device)
625
                message = "Model should not be loaded from cache without explicitly setting CACHE_DIR"
626
                self.assertFalse(model.request.get_property("LOADED_FROM_CACHE"), message)
627
            del model
628
            gc.collect()
629

630
    def test_default_filling_attention_mask(self):
631
        model_id = MODEL_NAMES["gpt2"]
632
        model_with_cache = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=True)
633
        tokenizer = AutoTokenizer.from_pretrained(model_id)
634
        tokenizer.pad_token = tokenizer.eos_token
635
        texts = ["this is a simple input"]
636
        tokens = tokenizer(texts, return_tensors="pt")
637
        self.assertTrue("attention_mask" in model_with_cache.input_names)
638
        outs = model_with_cache(**tokens)
639
        attention_mask = tokens.pop("attention_mask")
640
        outs_without_attn_mask = model_with_cache(**tokens)
641
        self.assertTrue(torch.allclose(outs.logits, outs_without_attn_mask.logits))
642
        input_ids = torch.argmax(outs.logits[:, -1:, :], dim=2)
643
        past_key_values = outs.past_key_values
644
        attention_mask = torch.ones((input_ids.shape[0], tokens.input_ids.shape[1] + 1), dtype=torch.long)
645
        outs_step2 = model_with_cache(
646
            input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values
647
        )
648
        outs_without_attn_mask_step2 = model_with_cache(input_ids=input_ids, past_key_values=past_key_values)
649
        self.assertTrue(torch.allclose(outs_step2.logits, outs_without_attn_mask_step2.logits))
650
        del model_with_cache
651
        gc.collect()
652

653
    def test_default_filling_attention_mask_and_position_ids(self):
654
        model_id = MODEL_NAMES["llama"]
655
        model_with_cache = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=True)
656
        tokenizer = AutoTokenizer.from_pretrained(model_id)
657
        tokenizer.pad_token = tokenizer.eos_token
658
        texts = ["this is a simple input"]
659
        tokens = tokenizer(texts, return_tensors="pt")
660
        self.assertTrue("position_ids" in model_with_cache.input_names)
661
        outs = model_with_cache(**tokens)
662
        attention_mask = tokens.pop("attention_mask")
663
        outs_without_attn_mask = model_with_cache(**tokens)
664
        self.assertTrue(torch.allclose(outs.logits, outs_without_attn_mask.logits))
665
        input_ids = torch.argmax(outs.logits[:, -1:, :], dim=2)
666
        past_key_values = outs.past_key_values
667
        attention_mask = torch.ones((input_ids.shape[0], tokens.input_ids.shape[1] + 1), dtype=torch.long)
668
        outs_step2 = model_with_cache(
669
            input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values
670
        )
671
        outs_without_attn_mask_step2 = model_with_cache(input_ids=input_ids, past_key_values=past_key_values)
672
        self.assertTrue(torch.allclose(outs_step2.logits, outs_without_attn_mask_step2.logits))
673
        del model_with_cache
674
        gc.collect()
675

676

677
class OVModelForMaskedLMIntegrationTest(unittest.TestCase):
678
    SUPPORTED_ARCHITECTURES = (
679
        "albert",
680
        "bert",
681
        # "camembert",
682
        # "convbert",
683
        # "data2vec_text",
684
        "deberta",
685
        # "deberta_v2",
686
        "distilbert",
687
        "electra",
688
        "flaubert",
689
        "ibert",
690
        # "mobilebert",
691
        "roberta",
692
        "roformer",
693
        "squeezebert",
694
        "xlm",
695
        "xlm_roberta",
696
    )
697

698
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
699
    def test_compare_to_transformers(self, model_arch):
700
        model_id = MODEL_NAMES[model_arch]
701
        set_seed(SEED)
702
        ov_model = OVModelForMaskedLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
703
        self.assertIsInstance(ov_model.config, PretrainedConfig)
704
        transformers_model = AutoModelForMaskedLM.from_pretrained(model_id)
705
        tokenizer = AutoTokenizer.from_pretrained(model_id)
706
        inputs = f"This is a sample {tokenizer.mask_token}"
707
        tokens = tokenizer(inputs, return_tensors="pt")
708
        with torch.no_grad():
709
            transformers_outputs = transformers_model(**tokens)
710
        for input_type in ["pt", "np"]:
711
            tokens = tokenizer(inputs, return_tensors=input_type)
712
            ov_outputs = ov_model(**tokens)
713
            self.assertIn("logits", ov_outputs)
714
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
715
            # Compare tensor outputs
716
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
717
        del transformers_model
718
        del ov_model
719
        gc.collect()
720

721
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
722
    def test_pipeline(self, model_arch):
723
        model_id = MODEL_NAMES[model_arch]
724
        model = OVModelForMaskedLM.from_pretrained(model_id, export=True)
725
        tokenizer = AutoTokenizer.from_pretrained(model_id)
726
        pipe = pipeline("fill-mask", model=model, tokenizer=tokenizer)
727
        outputs = pipe(f"This is a {tokenizer.mask_token}.")
728
        self.assertEqual(pipe.device, model.device)
729
        self.assertTrue(all(item["score"] > 0.0 for item in outputs))
730
        del pipe
731
        del model
732
        gc.collect()
733

734

735
class OVModelForImageClassificationIntegrationTest(unittest.TestCase):
736
    SUPPORTED_ARCHITECTURES = (
737
        "beit",
738
        "convnext",
739
        # "data2vec_vision",
740
        # "deit",
741
        "levit",
742
        "mobilenet_v1",
743
        "mobilenet_v2",
744
        "mobilevit",
745
        # "poolformer",
746
        "resnet",
747
        # "segformer",
748
        # "swin",
749
        "vit",
750
    )
751

752
    TIMM_MODELS = ("timm/pit_s_distilled_224.in1k", "timm/vit_tiny_patch16_224.augreg_in21k")
753

754
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
755
    def test_compare_to_transformers(self, model_arch):
756
        model_id = MODEL_NAMES[model_arch]
757
        set_seed(SEED)
758
        ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
759
        self.assertIsInstance(ov_model.config, PretrainedConfig)
760
        transformers_model = AutoModelForImageClassification.from_pretrained(model_id)
761
        preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
762
        url = "http://images.cocodataset.org/val2017/000000039769.jpg"
763
        image = Image.open(requests.get(url, stream=True).raw)
764
        inputs = preprocessor(images=image, return_tensors="pt")
765
        with torch.no_grad():
766
            transformers_outputs = transformers_model(**inputs)
767
        for input_type in ["pt", "np"]:
768
            inputs = preprocessor(images=image, return_tensors=input_type)
769
            ov_outputs = ov_model(**inputs)
770
            self.assertIn("logits", ov_outputs)
771
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
772
            # Compare tensor outputs
773
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
774
        del transformers_model
775
        del ov_model
776
        gc.collect()
777

778
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
779
    def test_pipeline(self, model_arch):
780
        model_id = MODEL_NAMES[model_arch]
781
        model = OVModelForImageClassification.from_pretrained(model_id, export=True)
782
        preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
783
        pipe = pipeline("image-classification", model=model, feature_extractor=preprocessor)
784
        outputs = pipe("http://images.cocodataset.org/val2017/000000039769.jpg")
785
        self.assertEqual(pipe.device, model.device)
786
        self.assertGreaterEqual(outputs[0]["score"], 0.0)
787
        self.assertTrue(isinstance(outputs[0]["label"], str))
788
        del model
789
        del pipe
790
        gc.collect()
791

792
    @parameterized.expand(TIMM_MODELS)
793
    def test_compare_to_timm(self, model_id):
794
        ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
795
        self.assertEqual(ov_model.request.get_property("INFERENCE_PRECISION_HINT").to_string(), "f32")
796
        self.assertIsInstance(ov_model.config, PretrainedConfig)
797
        timm_model = timm.create_model(model_id, pretrained=True)
798
        preprocessor = TimmImageProcessor.from_pretrained(model_id)
799
        url = "http://images.cocodataset.org/val2017/000000039769.jpg"
800
        image = Image.open(requests.get(url, stream=True).raw)
801
        inputs = preprocessor(images=image, return_tensors="pt")
802
        with torch.no_grad():
803
            timm_model.eval()
804
            timm_outputs = timm_model(inputs["pixel_values"].float())
805
        for input_type in ["pt", "np"]:
806
            inputs = preprocessor(images=image, return_tensors=input_type)
807
            ov_outputs = ov_model(**inputs)
808
            self.assertIn("logits", ov_outputs)
809
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
810
            # Compare tensor outputs
811
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), timm_outputs, atol=1e-3))
812
        gc.collect()
813

814
    @parameterized.expand(TIMM_MODELS)
815
    def test_timm_save_and_infer(self, model_id):
816
        ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True)
817
        with tempfile.TemporaryDirectory() as tmpdirname:
818
            model_save_path = os.path.join(tmpdirname, "timm_ov_model")
819
            ov_model.save_pretrained(model_save_path)
820
            model = OVModelForImageClassification.from_pretrained(model_save_path)
821
            model(pixel_values=torch.zeros((5, 3, model.config.image_size, model.config.image_size)))
822
        gc.collect()
823

824

825
class OVModelForSeq2SeqLMIntegrationTest(unittest.TestCase):
826
    SUPPORTED_ARCHITECTURES = (
827
        "bart",
828
        # "bigbird_pegasus",
829
        "blenderbot",
830
        "blenderbot-small",
831
        # "longt5",
832
        "m2m_100",
833
        "marian",
834
        "mbart",
835
        "mt5",
836
        "pegasus",
837
        "t5",
838
    )
839

840
    GENERATION_LENGTH = 100
841
    SPEEDUP_CACHE = 1.1
842

843
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
844
    def test_compare_to_transformers(self, model_arch):
845
        model_id = MODEL_NAMES[model_arch]
846
        set_seed(SEED)
847
        ov_model = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
848

849
        self.assertIsInstance(ov_model.encoder, OVEncoder)
850
        self.assertIsInstance(ov_model.decoder, OVDecoder)
851
        self.assertIsInstance(ov_model.decoder_with_past, OVDecoder)
852
        self.assertIsInstance(ov_model.config, PretrainedConfig)
853

854
        transformers_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
855
        tokenizer = AutoTokenizer.from_pretrained(model_id)
856
        tokens = tokenizer("This is a sample input", return_tensors="pt")
857
        decoder_start_token_id = transformers_model.config.decoder_start_token_id if model_arch != "mbart" else 2
858
        decoder_inputs = {"decoder_input_ids": torch.ones((1, 1), dtype=torch.long) * decoder_start_token_id}
859
        ov_outputs = ov_model(**tokens, **decoder_inputs)
860

861
        self.assertTrue("logits" in ov_outputs)
862
        self.assertIsInstance(ov_outputs.logits, torch.Tensor)
863

864
        with torch.no_grad():
865
            transformers_outputs = transformers_model(**tokens, **decoder_inputs)
866
        # Compare tensor outputs
867
        self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, atol=1e-4))
868
        del transformers_model
869
        del ov_model
870

871
        gc.collect()
872

873
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
874
    def test_pipeline(self, model_arch):
875
        model_id = MODEL_NAMES[model_arch]
876
        tokenizer = AutoTokenizer.from_pretrained(model_id)
877
        model = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True, compile=False)
878
        model.half()
879
        model.to("cpu")
880
        model.compile()
881

882
        # Text2Text generation
883
        pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
884
        text = "This is a test"
885
        outputs = pipe(text)
886
        self.assertEqual(pipe.device, model.device)
887
        self.assertIsInstance(outputs[0]["generated_text"], str)
888

889
        # Summarization
890
        pipe = pipeline("summarization", model=model, tokenizer=tokenizer)
891
        text = "This is a test"
892
        outputs = pipe(text)
893
        self.assertEqual(pipe.device, model.device)
894
        self.assertIsInstance(outputs[0]["summary_text"], str)
895

896
        # Translation
897
        pipe = pipeline("translation_en_to_fr", model=model, tokenizer=tokenizer)
898
        text = "This is a test"
899
        outputs = pipe(text)
900
        self.assertEqual(pipe.device, model.device)
901
        self.assertIsInstance(outputs[0]["translation_text"], str)
902
        del pipe
903
        del model
904
        gc.collect()
905

906
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
907
    def test_generate_utils(self, model_arch):
908
        model_id = MODEL_NAMES[model_arch]
909
        model = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True)
910
        tokenizer = AutoTokenizer.from_pretrained(model_id)
911
        text = "This is a sample input"
912
        tokens = tokenizer(text, return_tensors="pt")
913

914
        # General case
915
        outputs = model.generate(**tokens)
916
        outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
917
        self.assertIsInstance(outputs[0], str)
918

919
        # With input ids
920
        outputs = model.generate(input_ids=tokens["input_ids"])
921
        outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
922
        self.assertIsInstance(outputs[0], str)
923
        del model
924

925
        gc.collect()
926

927
    def test_compare_with_and_without_past_key_values(self):
928
        model_id = MODEL_NAMES["t5"]
929
        tokenizer = AutoTokenizer.from_pretrained(model_id)
930
        text = "This is a sample input"
931
        tokens = tokenizer(text, return_tensors="pt")
932

933
        model_with_pkv = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True, use_cache=True)
934
        _ = model_with_pkv.generate(**tokens)  # warmup
935
        with Timer() as with_pkv_timer:
936
            outputs_model_with_pkv = model_with_pkv.generate(
937
                **tokens, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
938
            )
939

940
        model_without_pkv = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True, use_cache=False)
941
        _ = model_without_pkv.generate(**tokens)  # warmup
942
        with Timer() as without_pkv_timer:
943
            outputs_model_without_pkv = model_without_pkv.generate(
944
                **tokens, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
945
            )
946

947
        self.assertTrue(torch.equal(outputs_model_with_pkv, outputs_model_without_pkv))
948
        self.assertEqual(outputs_model_with_pkv.shape[1], self.GENERATION_LENGTH)
949
        self.assertEqual(outputs_model_without_pkv.shape[1], self.GENERATION_LENGTH)
950
        self.assertTrue(
951
            without_pkv_timer.elapsed / with_pkv_timer.elapsed > self.SPEEDUP_CACHE,
952
            f"With pkv latency: {with_pkv_timer.elapsed:.3f} ms, without pkv latency: {without_pkv_timer.elapsed:.3f} ms,"
953
            f" speedup: {without_pkv_timer.elapsed / with_pkv_timer.elapsed:.3f}",
954
        )
955
        del model_with_pkv
956
        del model_without_pkv
957
        gc.collect()
958

959

960
class OVModelForAudioClassificationIntegrationTest(unittest.TestCase):
961
    SUPPORTED_ARCHITECTURES = (
962
        # "audio_spectrogram_transformer",
963
        # "data2vec_audio",
964
        # "hubert",
965
        # "sew",
966
        # "sew_d",
967
        # "wav2vec2-conformer",
968
        "unispeech",
969
        # "unispeech_sat",
970
        # "wavlm",
971
        "wav2vec2",
972
        # "wav2vec2-conformer",
973
    )
974

975
    def _generate_random_audio_data(self):
976
        np.random.seed(10)
977
        t = np.linspace(0, 5.0, int(5.0 * 22050), endpoint=False)
978
        # generate pure sine wave at 220 Hz
979
        audio_data = 0.5 * np.sin(2 * np.pi * 220 * t)
980
        return audio_data
981

982
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
983
    def test_compare_to_transformers(self, model_arch):
984
        model_id = MODEL_NAMES[model_arch]
985
        set_seed(SEED)
986
        ov_model = OVModelForAudioClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
987
        self.assertIsInstance(ov_model.config, PretrainedConfig)
988
        transformers_model = AutoModelForAudioClassification.from_pretrained(model_id)
989
        preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
990
        inputs = preprocessor(self._generate_random_audio_data(), return_tensors="pt")
991

992
        with torch.no_grad():
993
            transformers_outputs = transformers_model(**inputs)
994

995
        for input_type in ["pt", "np"]:
996
            inputs = preprocessor(self._generate_random_audio_data(), return_tensors=input_type)
997
            ov_outputs = ov_model(**inputs)
998
            self.assertIn("logits", ov_outputs)
999
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
1000
            # Compare tensor outputs
1001
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-3))
1002

1003
        del transformers_model
1004
        del ov_model
1005
        gc.collect()
1006

1007
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1008
    def test_pipeline(self, model_arch):
1009
        model_id = MODEL_NAMES[model_arch]
1010
        model = OVModelForAudioClassification.from_pretrained(model_id, export=True)
1011
        preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
1012
        pipe = pipeline("audio-classification", model=model, feature_extractor=preprocessor)
1013
        outputs = pipe([np.random.random(16000)])
1014
        self.assertEqual(pipe.device, model.device)
1015
        self.assertTrue(all(item["score"] > 0.0 for item in outputs[0]))
1016
        del pipe
1017
        del model
1018
        gc.collect()
1019

1020

1021
class OVModelForCTCIntegrationTest(unittest.TestCase):
1022
    SUPPORTED_ARCHITECTURES = [
1023
        "data2vec_audio",
1024
        "hubert",
1025
        "sew",
1026
        "sew_d",
1027
        "unispeech",
1028
        "unispeech_sat",
1029
        "wavlm",
1030
        "wav2vec2-hf",
1031
        "wav2vec2-conformer",
1032
    ]
1033

1034
    def _generate_random_audio_data(self):
1035
        np.random.seed(10)
1036
        t = np.linspace(0, 5.0, int(5.0 * 22050), endpoint=False)
1037
        # generate pure sine wave at 220 Hz
1038
        audio_data = 0.5 * np.sin(2 * np.pi * 220 * t)
1039
        return audio_data
1040

1041
    def test_load_vanilla_transformers_which_is_not_supported(self):
1042
        with self.assertRaises(Exception) as context:
1043
            _ = OVModelForCTC.from_pretrained(MODEL_NAMES["t5"], export=True)
1044

1045
        self.assertIn("only supports the tasks", str(context.exception))
1046

1047
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1048
    def test_compare_to_transformers(self, model_arch):
1049
        model_id = MODEL_NAMES[model_arch]
1050
        set_seed(SEED)
1051
        ov_model = OVModelForCTC.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
1052
        self.assertIsInstance(ov_model.config, PretrainedConfig)
1053

1054
        set_seed(SEED)
1055
        transformers_model = AutoModelForCTC.from_pretrained(model_id)
1056
        processor = AutoFeatureExtractor.from_pretrained(model_id)
1057
        input_values = processor(self._generate_random_audio_data(), return_tensors="pt")
1058

1059
        with torch.no_grad():
1060
            transformers_outputs = transformers_model(**input_values)
1061

1062
        for input_type in ["pt", "np"]:
1063
            input_values = processor(self._generate_random_audio_data(), return_tensors=input_type)
1064
            ov_outputs = ov_model(**input_values)
1065

1066
            self.assertTrue("logits" in ov_outputs)
1067
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
1068

1069
            # compare tensor outputs
1070
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
1071

1072
        del transformers_model
1073
        del ov_model
1074
        gc.collect()
1075

1076

1077
class OVModelForAudioXVectorIntegrationTest(unittest.TestCase):
1078
    SUPPORTED_ARCHITECTURES = [
1079
        "data2vec_audio",
1080
        "unispeech_sat",
1081
        "wavlm",
1082
        "wav2vec2-hf",
1083
        "wav2vec2-conformer",
1084
    ]
1085

1086
    def _generate_random_audio_data(self):
1087
        np.random.seed(10)
1088
        t = np.linspace(0, 5.0, int(5.0 * 22050), endpoint=False)
1089
        # generate pure sine wave at 220 Hz
1090
        audio_data = 0.5 * np.sin(2 * np.pi * 220 * t)
1091
        return audio_data
1092

1093
    def test_load_vanilla_transformers_which_is_not_supported(self):
1094
        with self.assertRaises(Exception) as context:
1095
            _ = OVModelForAudioXVector.from_pretrained(MODEL_NAMES["t5"], export=True)
1096

1097
        self.assertIn("only supports the tasks", str(context.exception))
1098

1099
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1100
    def test_compare_to_transformers(self, model_arch):
1101
        model_id = MODEL_NAMES[model_arch]
1102
        set_seed(SEED)
1103
        ov_model = OVModelForAudioXVector.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
1104
        self.assertIsInstance(ov_model.config, PretrainedConfig)
1105

1106
        set_seed(SEED)
1107
        transformers_model = AutoModelForAudioXVector.from_pretrained(model_id)
1108
        processor = AutoFeatureExtractor.from_pretrained(model_id)
1109
        input_values = processor(self._generate_random_audio_data(), return_tensors="pt")
1110

1111
        with torch.no_grad():
1112
            transformers_outputs = transformers_model(**input_values)
1113
        for input_type in ["pt", "np"]:
1114
            input_values = processor(self._generate_random_audio_data(), return_tensors=input_type)
1115
            ov_outputs = ov_model(**input_values)
1116

1117
            self.assertTrue("logits" in ov_outputs)
1118
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
1119

1120
            # compare tensor outputs
1121
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
1122
            self.assertTrue(
1123
                torch.allclose(torch.Tensor(ov_outputs.embeddings), transformers_outputs.embeddings, atol=1e-4)
1124
            )
1125

1126
        del transformers_model
1127
        del ov_model
1128
        gc.collect()
1129

1130

1131
class OVModelForAudioFrameClassificationIntegrationTest(unittest.TestCase):
1132
    SUPPORTED_ARCHITECTURES = [
1133
        "data2vec_audio",
1134
        "unispeech_sat",
1135
        "wavlm",
1136
        "wav2vec2-hf",
1137
        "wav2vec2-conformer",
1138
    ]
1139

1140
    def _generate_random_audio_data(self):
1141
        np.random.seed(10)
1142
        t = np.linspace(0, 5.0, int(5.0 * 22050), endpoint=False)
1143
        # generate pure sine wave at 220 Hz
1144
        audio_data = 0.5 * np.sin(2 * np.pi * 220 * t)
1145
        return audio_data
1146

1147
    def test_load_vanilla_transformers_which_is_not_supported(self):
1148
        with self.assertRaises(Exception) as context:
1149
            _ = OVModelForAudioFrameClassification.from_pretrained(MODEL_NAMES["t5"], export=True)
1150

1151
        self.assertIn("only supports the tasks", str(context.exception))
1152

1153
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1154
    def test_compare_to_transformers(self, model_arch):
1155
        model_id = MODEL_NAMES[model_arch]
1156
        set_seed(SEED)
1157
        ov_model = OVModelForAudioFrameClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
1158
        self.assertIsInstance(ov_model.config, PretrainedConfig)
1159

1160
        set_seed(SEED)
1161
        transformers_model = AutoModelForAudioFrameClassification.from_pretrained(model_id)
1162
        processor = AutoFeatureExtractor.from_pretrained(model_id)
1163
        input_values = processor(self._generate_random_audio_data(), return_tensors="pt")
1164

1165
        with torch.no_grad():
1166
            transformers_outputs = transformers_model(**input_values)
1167
        for input_type in ["pt", "np"]:
1168
            input_values = processor(self._generate_random_audio_data(), return_tensors=input_type)
1169
            ov_outputs = ov_model(**input_values)
1170

1171
            self.assertTrue("logits" in ov_outputs)
1172
            self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
1173

1174
            # compare tensor outputs
1175
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
1176

1177
        del transformers_model
1178
        del ov_model
1179
        gc.collect()
1180

1181

1182
class OVModelForPix2StructIntegrationTest(unittest.TestCase):
1183
    SUPPORTED_ARCHITECTURES = ["pix2struct"]
1184
    TASK = "image-to-text"  # is it fine as well with visual-question-answering?
1185

1186
    GENERATION_LENGTH = 100
1187
    SPEEDUP_CACHE = 1.1
1188

1189
    IMAGE = Image.open(
1190
        requests.get(
1191
            "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg",
1192
            stream=True,
1193
        ).raw
1194
    )
1195

1196
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1197
    def test_compare_to_transformers(self, model_arch):
1198
        model_id = MODEL_NAMES[model_arch]
1199
        set_seed(SEED)
1200
        ov_model = OVModelForPix2Struct.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
1201

1202
        self.assertIsInstance(ov_model.encoder, OVEncoder)
1203
        self.assertIsInstance(ov_model.decoder, OVDecoder)
1204
        self.assertIsInstance(ov_model.decoder_with_past, OVDecoder)
1205
        self.assertIsInstance(ov_model.config, PretrainedConfig)
1206

1207
        question = "Who am I?"
1208
        transformers_model = Pix2StructForConditionalGeneration.from_pretrained(model_id)
1209
        preprocessor = get_preprocessor(model_id)
1210

1211
        inputs = preprocessor(images=self.IMAGE, text=question, padding=True, return_tensors="pt")
1212
        ov_outputs = ov_model(**inputs)
1213

1214
        self.assertTrue("logits" in ov_outputs)
1215
        self.assertIsInstance(ov_outputs.logits, torch.Tensor)
1216

1217
        with torch.no_grad():
1218
            transformers_outputs = transformers_model(**inputs)
1219
        # Compare tensor outputs
1220
        self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, atol=1e-4))
1221
        del transformers_model
1222
        del ov_model
1223

1224
        gc.collect()
1225

1226
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1227
    def test_generate_utils(self, model_arch):
1228
        model_id = MODEL_NAMES[model_arch]
1229
        model = OVModelForPix2Struct.from_pretrained(model_id, export=True)
1230
        preprocessor = get_preprocessor(model_id)
1231
        question = "Who am I?"
1232
        inputs = preprocessor(images=self.IMAGE, text=question, return_tensors="pt")
1233

1234
        # General case
1235
        outputs = model.generate(**inputs)
1236
        outputs = preprocessor.batch_decode(outputs, skip_special_tokens=True)
1237
        self.assertIsInstance(outputs[0], str)
1238
        del model
1239

1240
        gc.collect()
1241

1242
    def test_compare_with_and_without_past_key_values(self):
1243
        model_id = MODEL_NAMES["pix2struct"]
1244
        preprocessor = get_preprocessor(model_id)
1245
        question = "Who am I?"
1246
        inputs = preprocessor(images=self.IMAGE, text=question, return_tensors="pt")
1247

1248
        model_with_pkv = OVModelForPix2Struct.from_pretrained(model_id, export=True, use_cache=True)
1249
        _ = model_with_pkv.generate(**inputs)  # warmup
1250
        with Timer() as with_pkv_timer:
1251
            outputs_model_with_pkv = model_with_pkv.generate(
1252
                **inputs, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
1253
            )
1254

1255
        model_without_pkv = OVModelForPix2Struct.from_pretrained(model_id, export=True, use_cache=False)
1256
        _ = model_without_pkv.generate(**inputs)  # warmup
1257
        with Timer() as without_pkv_timer:
1258
            outputs_model_without_pkv = model_without_pkv.generate(
1259
                **inputs, min_length=self.GENERATION_LENGTH, max_length=self.GENERATION_LENGTH, num_beams=1
1260
            )
1261

1262
        self.assertTrue(torch.equal(outputs_model_with_pkv, outputs_model_without_pkv))
1263
        self.assertEqual(outputs_model_with_pkv.shape[1], self.GENERATION_LENGTH)
1264
        self.assertEqual(outputs_model_without_pkv.shape[1], self.GENERATION_LENGTH)
1265
        self.assertTrue(
1266
            without_pkv_timer.elapsed / with_pkv_timer.elapsed > self.SPEEDUP_CACHE,
1267
            f"With pkv latency: {with_pkv_timer.elapsed:.3f} ms, without pkv latency: {without_pkv_timer.elapsed:.3f} ms,"
1268
            f" speedup: {without_pkv_timer.elapsed / with_pkv_timer.elapsed:.3f}",
1269
        )
1270
        del model_with_pkv
1271
        del model_without_pkv
1272
        gc.collect()
1273

1274

1275
class OVModelForSpeechSeq2SeqIntegrationTest(unittest.TestCase):
1276
    SUPPORTED_ARCHITECTURES = ("whisper",)
1277

1278
    def _generate_random_audio_data(self):
1279
        np.random.seed(10)
1280
        t = np.linspace(0, 5.0, int(5.0 * 22050), endpoint=False)
1281
        # generate pure sine wave at 220 Hz
1282
        audio_data = 0.5 * np.sin(2 * np.pi * 220 * t)
1283
        return audio_data
1284

1285
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1286
    def test_compare_to_transformers(self, model_arch):
1287
        model_id = MODEL_NAMES[model_arch]
1288
        set_seed(SEED)
1289
        ov_model = OVModelForSpeechSeq2Seq.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
1290
        self.assertIsInstance(ov_model.config, PretrainedConfig)
1291
        transformers_model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id)
1292
        processor = get_preprocessor(model_id)
1293
        data = self._generate_random_audio_data()
1294
        features = processor.feature_extractor(data, return_tensors="pt")
1295

1296
        decoder_start_token_id = transformers_model.config.decoder_start_token_id
1297
        decoder_inputs = {"decoder_input_ids": torch.ones((1, 1), dtype=torch.long) * decoder_start_token_id}
1298

1299
        with torch.no_grad():
1300
            transformers_outputs = transformers_model(**features, **decoder_inputs)
1301

1302
        for input_type in ["pt", "np"]:
1303
            features = processor.feature_extractor(data, return_tensors=input_type)
1304

1305
            if input_type == "np":
1306
                decoder_inputs = {"decoder_input_ids": np.ones((1, 1), dtype=np.int64) * decoder_start_token_id}
1307

1308
            ov_outputs = ov_model(**features, **decoder_inputs)
1309
            self.assertIn("logits", ov_outputs)
1310
            # Compare tensor outputs
1311
            self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-3))
1312

1313
        del transformers_model
1314
        del ov_model
1315
        gc.collect()
1316

1317
    @parameterized.expand(SUPPORTED_ARCHITECTURES)
1318
    def test_pipeline(self, model_arch):
1319
        model_id = MODEL_NAMES[model_arch]
1320
        model = OVModelForSpeechSeq2Seq.from_pretrained(model_id, export=True)
1321
        processor = get_preprocessor(model_id)
1322
        GenerationConfig.from_pretrained(model_id)
1323
        pipe = pipeline(
1324
            "automatic-speech-recognition",
1325
            model=model,
1326
            tokenizer=processor.tokenizer,
1327
            feature_extractor=processor.feature_extractor,
1328
        )
1329
        data = self._generate_random_audio_data()
1330
        outputs = pipe(data)
1331
        self.assertIsInstance(outputs["text"], str)
1332

1333
        del pipe
1334
        del model
1335
        gc.collect()
1336

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.