transformers
730 строк · 27.3 Кб
1# coding=utf-8
2# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15""" Testing suite for the PyTorch XCLIP model. """
16
17
18import inspect
19import os
20import tempfile
21import unittest
22
23import numpy as np
24from huggingface_hub import hf_hub_download
25
26from transformers import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig
27from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
28from transformers.utils import is_torch_available, is_vision_available
29
30from ...test_configuration_common import ConfigTester
31from ...test_modeling_common import (
32ModelTesterMixin,
33_config_zero_init,
34floats_tensor,
35ids_tensor,
36random_attention_mask,
37)
38from ...test_pipeline_mixin import PipelineTesterMixin
39
40
41if is_torch_available():
42import torch
43from torch import nn
44
45from transformers import XCLIPModel, XCLIPTextModel, XCLIPVisionModel
46from transformers.models.x_clip.modeling_x_clip import XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST
47
48
49if is_vision_available():
50from transformers import XCLIPProcessor
51
52
53class XCLIPVisionModelTester:
54def __init__(
55self,
56parent,
57batch_size=8,
58image_size=30,
59patch_size=2,
60num_channels=3,
61num_frames=8, # important; the batch size * time must be divisible by the number of frames
62is_training=True,
63hidden_size=32,
64num_hidden_layers=2,
65num_attention_heads=4,
66intermediate_size=37,
67mit_hidden_size=64,
68dropout=0.1,
69attention_dropout=0.1,
70initializer_range=0.02,
71scope=None,
72):
73self.parent = parent
74self.batch_size = batch_size
75self.image_size = image_size
76self.patch_size = patch_size
77self.num_channels = num_channels
78self.num_frames = num_frames
79self.is_training = is_training
80self.hidden_size = hidden_size
81self.num_hidden_layers = num_hidden_layers
82self.num_attention_heads = num_attention_heads
83self.intermediate_size = intermediate_size
84self.mit_hidden_size = mit_hidden_size
85self.dropout = dropout
86self.attention_dropout = attention_dropout
87self.initializer_range = initializer_range
88self.scope = scope
89
90# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
91num_patches = (image_size // patch_size) ** 2
92self.seq_length = num_patches + 1
93
94def prepare_config_and_inputs(self):
95pixel_values = floats_tensor(
96[self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size]
97)
98config = self.get_config()
99
100return config, pixel_values
101
102def get_config(self):
103return XCLIPVisionConfig(
104image_size=self.image_size,
105patch_size=self.patch_size,
106num_channels=self.num_channels,
107num_frames=self.num_frames,
108hidden_size=self.hidden_size,
109num_hidden_layers=self.num_hidden_layers,
110num_attention_heads=self.num_attention_heads,
111intermediate_size=self.intermediate_size,
112mit_hidden_size=self.mit_hidden_size,
113dropout=self.dropout,
114attention_dropout=self.attention_dropout,
115initializer_range=self.initializer_range,
116)
117
118def create_and_check_model(self, config, pixel_values):
119model = XCLIPVisionModel(config=config)
120model.to(torch_device)
121model.eval()
122with torch.no_grad():
123result = model(pixel_values)
124# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
125image_size = (self.image_size, self.image_size)
126patch_size = (self.patch_size, self.patch_size)
127num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
128self.parent.assertEqual(
129result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size)
130)
131self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size))
132
133def prepare_config_and_inputs_for_common(self):
134config_and_inputs = self.prepare_config_and_inputs()
135config, pixel_values = config_and_inputs
136inputs_dict = {"pixel_values": pixel_values}
137return config, inputs_dict
138
139
140@require_torch
141class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
142"""
143Here we also overwrite some of the tests of test_modeling_common.py, as X-CLIP does not use input_ids, inputs_embeds,
144attention_mask and seq_length.
145"""
146
147all_model_classes = (XCLIPVisionModel,) if is_torch_available() else ()
148fx_compatible = False
149test_pruning = False
150test_resize_embeddings = False
151test_head_masking = False
152
153def setUp(self):
154self.model_tester = XCLIPVisionModelTester(self)
155self.config_tester = ConfigTester(
156self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37
157)
158
159def test_config(self):
160self.config_tester.run_common_tests()
161
162@unittest.skip(reason="X-CLIP does not use inputs_embeds")
163def test_inputs_embeds(self):
164pass
165
166def test_model_common_attributes(self):
167config, _ = self.model_tester.prepare_config_and_inputs_for_common()
168
169for model_class in self.all_model_classes:
170model = model_class(config)
171self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
172x = model.get_output_embeddings()
173self.assertTrue(x is None or isinstance(x, nn.Linear))
174
175def test_forward_signature(self):
176config, _ = self.model_tester.prepare_config_and_inputs_for_common()
177
178for model_class in self.all_model_classes:
179model = model_class(config)
180signature = inspect.signature(model.forward)
181# signature.parameters is an OrderedDict => so arg_names order is deterministic
182arg_names = [*signature.parameters.keys()]
183
184expected_arg_names = ["pixel_values"]
185self.assertListEqual(arg_names[:1], expected_arg_names)
186
187def test_model(self):
188config_and_inputs = self.model_tester.prepare_config_and_inputs()
189self.model_tester.create_and_check_model(*config_and_inputs)
190
191def test_training(self):
192pass
193
194def test_training_gradient_checkpointing(self):
195pass
196
197@unittest.skip(
198reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
199)
200def test_training_gradient_checkpointing_use_reentrant(self):
201pass
202
203@unittest.skip(
204reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
205)
206def test_training_gradient_checkpointing_use_reentrant_false(self):
207pass
208
209@unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING")
210def test_save_load_fast_init_from_base(self):
211pass
212
213@unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING")
214def test_save_load_fast_init_to_base(self):
215pass
216
217@slow
218def test_model_from_pretrained(self):
219for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
220model = XCLIPVisionModel.from_pretrained(model_name)
221self.assertIsNotNone(model)
222
223def test_gradient_checkpointing_backward_compatibility(self):
224config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
225
226for model_class in self.all_model_classes:
227if not model_class.supports_gradient_checkpointing:
228continue
229
230print("Model class:", model_class)
231
232config.gradient_checkpointing = True
233model = model_class(config)
234self.assertTrue(model.is_gradient_checkpointing)
235
236def test_attention_outputs(self):
237config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
238config.return_dict = True
239
240# we add 1 here due to the special message token in X-CLIP's vision encoder
241seq_len = getattr(self.model_tester, "seq_length", None) + 1
242encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
243
244for model_class in self.all_model_classes:
245inputs_dict["output_attentions"] = True
246inputs_dict["output_hidden_states"] = False
247config.return_dict = True
248model = model_class(config)
249model.to(torch_device)
250model.eval()
251with torch.no_grad():
252outputs = model(**self._prepare_for_class(inputs_dict, model_class))
253self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers)
254
255# check that output_attentions also work using config
256del inputs_dict["output_attentions"]
257config.output_attentions = True
258model = model_class(config)
259model.to(torch_device)
260model.eval()
261with torch.no_grad():
262outputs = model(**self._prepare_for_class(inputs_dict, model_class))
263self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers)
264
265self.assertListEqual(
266list(outputs.attentions[0].shape[-3:]),
267[self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length],
268)
269out_len = len(outputs)
270
271# Check attention is always last and order is fine
272inputs_dict["output_attentions"] = True
273inputs_dict["output_hidden_states"] = True
274model = model_class(config)
275model.to(torch_device)
276model.eval()
277with torch.no_grad():
278outputs = model(**self._prepare_for_class(inputs_dict, model_class))
279
280self.assertEqual(out_len + 1, len(outputs))
281
282self_attentions = outputs.attentions
283
284self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
285self.assertListEqual(
286list(self_attentions[0].shape[-3:]),
287[self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length],
288)
289
290@require_torch_multi_gpu
291def test_multi_gpu_data_parallel_forward(self):
292config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
293
294# some params shouldn't be scattered by nn.DataParallel
295# so just remove them if they are present.
296blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"]
297for k in blacklist_non_batched_params:
298inputs_dict.pop(k, None)
299
300# move input tensors to cuda:O
301for k, v in inputs_dict.items():
302if torch.is_tensor(v):
303inputs_dict[k] = v.to(0)
304
305for model_class in self.all_model_classes:
306model = model_class(config=config)
307model.to(0)
308model.eval()
309
310# Wrap model in nn.DataParallel
311model = nn.DataParallel(model)
312with torch.no_grad():
313test = self._prepare_for_class(inputs_dict, model_class)
314for k, v in test.items():
315if isinstance(v, torch.Tensor):
316print(k, v.shape)
317else:
318print(k, v)
319_ = model(**self._prepare_for_class(inputs_dict, model_class))
320
321
322class XCLIPTextModelTester:
323def __init__(
324self,
325parent,
326batch_size=8,
327seq_length=7,
328is_training=True,
329use_input_mask=True,
330use_labels=True,
331vocab_size=99,
332hidden_size=32,
333num_hidden_layers=2,
334num_attention_heads=4,
335intermediate_size=37,
336dropout=0.1,
337attention_dropout=0.1,
338max_position_embeddings=512,
339initializer_range=0.02,
340scope=None,
341):
342self.parent = parent
343self.batch_size = batch_size
344self.seq_length = seq_length
345self.is_training = is_training
346self.use_input_mask = use_input_mask
347self.use_labels = use_labels
348self.vocab_size = vocab_size
349self.hidden_size = hidden_size
350self.num_hidden_layers = num_hidden_layers
351self.num_attention_heads = num_attention_heads
352self.intermediate_size = intermediate_size
353self.dropout = dropout
354self.attention_dropout = attention_dropout
355self.max_position_embeddings = max_position_embeddings
356self.initializer_range = initializer_range
357self.scope = scope
358
359def prepare_config_and_inputs(self):
360input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
361
362input_mask = None
363if self.use_input_mask:
364input_mask = random_attention_mask([self.batch_size, self.seq_length])
365
366if input_mask is not None:
367batch_size, seq_length = input_mask.shape
368rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
369for batch_idx, start_index in enumerate(rnd_start_indices):
370input_mask[batch_idx, :start_index] = 1
371input_mask[batch_idx, start_index:] = 0
372
373config = self.get_config()
374
375return config, input_ids, input_mask
376
377def get_config(self):
378return XCLIPTextConfig(
379vocab_size=self.vocab_size,
380hidden_size=self.hidden_size,
381num_hidden_layers=self.num_hidden_layers,
382num_attention_heads=self.num_attention_heads,
383intermediate_size=self.intermediate_size,
384dropout=self.dropout,
385attention_dropout=self.attention_dropout,
386max_position_embeddings=self.max_position_embeddings,
387initializer_range=self.initializer_range,
388)
389
390def create_and_check_model(self, config, input_ids, input_mask):
391model = XCLIPTextModel(config=config)
392model.to(torch_device)
393model.eval()
394with torch.no_grad():
395result = model(input_ids, attention_mask=input_mask)
396result = model(input_ids)
397self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
398self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
399
400def prepare_config_and_inputs_for_common(self):
401config_and_inputs = self.prepare_config_and_inputs()
402config, input_ids, input_mask = config_and_inputs
403inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
404return config, inputs_dict
405
406
407@require_torch
408class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
409all_model_classes = (XCLIPTextModel,) if is_torch_available() else ()
410fx_compatible = False
411test_pruning = False
412test_head_masking = False
413
414def setUp(self):
415self.model_tester = XCLIPTextModelTester(self)
416self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37)
417
418def test_config(self):
419self.config_tester.run_common_tests()
420
421def test_model(self):
422config_and_inputs = self.model_tester.prepare_config_and_inputs()
423self.model_tester.create_and_check_model(*config_and_inputs)
424
425def test_training(self):
426pass
427
428def test_training_gradient_checkpointing(self):
429pass
430
431@unittest.skip(
432reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
433)
434def test_training_gradient_checkpointing_use_reentrant(self):
435pass
436
437@unittest.skip(
438reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
439)
440def test_training_gradient_checkpointing_use_reentrant_false(self):
441pass
442
443@unittest.skip(reason="X-CLIP does not use inputs_embeds")
444def test_inputs_embeds(self):
445pass
446
447@unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING")
448def test_save_load_fast_init_from_base(self):
449pass
450
451@unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING")
452def test_save_load_fast_init_to_base(self):
453pass
454
455@slow
456def test_model_from_pretrained(self):
457for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
458model = XCLIPTextModel.from_pretrained(model_name)
459self.assertIsNotNone(model)
460
461
462class XCLIPModelTester:
463def __init__(
464self,
465parent,
466text_kwargs=None,
467vision_kwargs=None,
468projection_dim=64,
469mit_hidden_size=64,
470is_training=True,
471):
472if text_kwargs is None:
473text_kwargs = {}
474if vision_kwargs is None:
475vision_kwargs = {}
476
477self.parent = parent
478self.projection_dim = projection_dim
479self.mit_hidden_size = mit_hidden_size
480self.text_model_tester = XCLIPTextModelTester(parent, **text_kwargs)
481self.vision_model_tester = XCLIPVisionModelTester(parent, **vision_kwargs)
482self.is_training = is_training
483
484def prepare_config_and_inputs(self):
485text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
486vision_config, _ = self.vision_model_tester.prepare_config_and_inputs()
487pixel_values = floats_tensor(
488[
489self.vision_model_tester.batch_size,
490self.vision_model_tester.num_frames,
491self.vision_model_tester.num_channels,
492self.vision_model_tester.image_size,
493self.vision_model_tester.image_size,
494]
495)
496
497config = self.get_config()
498
499return config, input_ids, attention_mask, pixel_values
500
501def get_config(self):
502return XCLIPConfig.from_text_vision_configs(
503self.text_model_tester.get_config(),
504self.vision_model_tester.get_config(),
505projection_dim=self.projection_dim,
506)
507
508def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
509model = XCLIPModel(config).to(torch_device).eval()
510with torch.no_grad():
511result = model(input_ids, pixel_values, attention_mask)
512self.parent.assertEqual(
513result.logits_per_video.shape,
514(self.vision_model_tester.batch_size, self.text_model_tester.batch_size),
515)
516self.parent.assertEqual(
517result.logits_per_text.shape,
518(self.text_model_tester.batch_size, self.vision_model_tester.batch_size),
519)
520
521def prepare_config_and_inputs_for_common(self):
522config_and_inputs = self.prepare_config_and_inputs()
523config, input_ids, attention_mask, pixel_values = config_and_inputs
524inputs_dict = {
525"input_ids": input_ids,
526"attention_mask": attention_mask,
527"pixel_values": pixel_values,
528"return_loss": True,
529}
530return config, inputs_dict
531
532
533@require_torch
534class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
535all_model_classes = (XCLIPModel,) if is_torch_available() else ()
536pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {}
537fx_compatible = False
538test_head_masking = False
539test_pruning = False
540test_resize_embeddings = False
541test_attention_outputs = False
542test_torchscript = False
543maxdiff = None
544
545def setUp(self):
546self.model_tester = XCLIPModelTester(self)
547
548def test_model(self):
549config_and_inputs = self.model_tester.prepare_config_and_inputs()
550self.model_tester.create_and_check_model(*config_and_inputs)
551
552@unittest.skip(reason="Hidden_states is tested in individual model tests")
553def test_hidden_states_output(self):
554pass
555
556@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
557def test_inputs_embeds(self):
558pass
559
560@unittest.skip(reason="Retain_grad is tested in individual model tests")
561def test_retain_grad_hidden_states_attentions(self):
562pass
563
564@unittest.skip(reason="XCLIPModel does not have input/output embeddings")
565def test_model_common_attributes(self):
566pass
567
568@unittest.skip(reason="XCLIPModel does not support feedforward chunking")
569def test_feed_forward_chunking(self):
570pass
571
572# override as the `logit_scale`, `prompts_generator.alpha` parameters require special treatment
573def test_initialization(self):
574config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
575
576configs_no_init = _config_zero_init(config)
577for model_class in self.all_model_classes:
578model = model_class(config=configs_no_init)
579for name, param in model.named_parameters():
580if param.requires_grad:
581# check if `logit_scale` is initilized as per the original implementation
582if name == "logit_scale":
583self.assertAlmostEqual(
584param.data.item(),
585np.log(1 / 0.07),
586delta=1e-3,
587msg=f"Parameter {name} of model {model_class} seems not properly initialized",
588)
589elif name == "prompts_generator.alpha":
590self.assertAlmostEqual(param.data.mean().item(), model.config.prompt_alpha)
591else:
592self.assertIn(
593((param.data.mean() * 1e9).round() / 1e9).item(),
594[0.0, 1.0],
595msg=f"Parameter {name} of model {model_class} seems not properly initialized",
596)
597
598def _create_and_check_torchscript(self, config, inputs_dict):
599if not self.test_torchscript:
600return
601
602configs_no_init = _config_zero_init(config) # To be sure we have no Nan
603configs_no_init.torchscript = True
604configs_no_init.return_dict = False
605for model_class in self.all_model_classes:
606model = model_class(config=configs_no_init)
607model.to(torch_device)
608model.eval()
609
610try:
611input_ids = inputs_dict["input_ids"]
612pixel_values = inputs_dict["pixel_values"] # X-CLIP needs pixel_values
613traced_model = torch.jit.trace(model, (input_ids, pixel_values))
614except RuntimeError:
615self.fail("Couldn't trace module.")
616
617with tempfile.TemporaryDirectory() as tmp_dir_name:
618pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
619
620try:
621torch.jit.save(traced_model, pt_file_name)
622except Exception:
623self.fail("Couldn't save module.")
624
625try:
626loaded_model = torch.jit.load(pt_file_name)
627except Exception:
628self.fail("Couldn't load module.")
629
630model.to(torch_device)
631model.eval()
632
633loaded_model.to(torch_device)
634loaded_model.eval()
635
636model_state_dict = model.state_dict()
637loaded_model_state_dict = loaded_model.state_dict()
638
639non_persistent_buffers = {}
640for key in loaded_model_state_dict.keys():
641if key not in model_state_dict.keys():
642non_persistent_buffers[key] = loaded_model_state_dict[key]
643
644loaded_model_state_dict = {
645key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
646}
647
648self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
649
650model_buffers = list(model.buffers())
651for non_persistent_buffer in non_persistent_buffers.values():
652found_buffer = False
653for i, model_buffer in enumerate(model_buffers):
654if torch.equal(non_persistent_buffer, model_buffer):
655found_buffer = True
656break
657
658self.assertTrue(found_buffer)
659model_buffers.pop(i)
660
661models_equal = True
662for layer_name, p1 in model_state_dict.items():
663p2 = loaded_model_state_dict[layer_name]
664if p1.data.ne(p2.data).sum() > 0:
665models_equal = False
666
667self.assertTrue(models_equal)
668
669def test_load_vision_text_config(self):
670config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
671
672# Save XCLIPConfig and check if we can load XCLIPVisionConfig from it
673with tempfile.TemporaryDirectory() as tmp_dir_name:
674config.save_pretrained(tmp_dir_name)
675vision_config = XCLIPVisionConfig.from_pretrained(tmp_dir_name)
676self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
677
678# Save XCLIPConfig and check if we can load XCLIPTextConfig from it
679with tempfile.TemporaryDirectory() as tmp_dir_name:
680config.save_pretrained(tmp_dir_name)
681text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name)
682self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
683
684@slow
685def test_model_from_pretrained(self):
686for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
687model = XCLIPModel.from_pretrained(model_name)
688self.assertIsNotNone(model)
689
690
691# We will verify our results on a spaghetti video
692def prepare_video():
693file = hf_hub_download(
694repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy", repo_type="dataset"
695)
696video = np.load(file)
697return list(video)
698
699
700@require_vision
701@require_torch
702class XCLIPModelIntegrationTest(unittest.TestCase):
703@slow
704def test_inference(self):
705model_name = "microsoft/xclip-base-patch32"
706model = XCLIPModel.from_pretrained(model_name).to(torch_device)
707processor = XCLIPProcessor.from_pretrained(model_name)
708
709video = prepare_video()
710inputs = processor(
711text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True
712).to(torch_device)
713
714# forward pass
715with torch.no_grad():
716outputs = model(**inputs)
717
718# verify the logits
719self.assertEqual(
720outputs.logits_per_video.shape,
721torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
722)
723self.assertEqual(
724outputs.logits_per_text.shape,
725torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
726)
727
728expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device)
729
730self.assertTrue(torch.allclose(outputs.logits_per_video, expected_logits, atol=1e-3))
731