pytorch

Форк
0
/
test_ops_fwd_gradients.py 
80 строк · 3.8 Кб
1
# Owner(s): ["module: unknown"]
2

3
from functools import partial
4
import platform
5
from unittest import skipIf as skipif
6
import torch
7

8
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
9
from torch.testing._internal.common_utils import (
10
    TestGradients, run_tests, skipIfTorchInductor, IS_MACOS, TestCase)
11
from torch.testing._internal.common_methods_invocations import op_db
12
from torch.testing._internal.common_device_type import \
13
    (instantiate_device_type_tests, ops, OpDTypes)
14

15
# TODO: mitigate flaky issue on macOS https://github.com/pytorch/pytorch/issues/66033
16
# AFAIK, c10::ThreadPool looks correct in the way it uses condition_variable wait. The
17
# issue seems to point to macOS itself https://github.com/graphia-app/graphia/issues/33
18
if IS_MACOS:
19
    torch.set_num_threads(1)
20

21
# gradcheck requires double precision
22
_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
23
                         allowed_dtypes=[torch.double, torch.cdouble])
24

25
@unMarkDynamoStrictTest
26
class TestFwdGradients(TestGradients):
27
    # Test that forward-over-reverse gradgrad is computed correctly
28
    @_gradcheck_ops(op_db)
29
    def test_fn_fwgrad_bwgrad(self, device, dtype, op):
30
        self._skip_helper(op, device, dtype)
31

32
        if op.supports_fwgrad_bwgrad:
33
            self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
34
        else:
35
            err_msg = r"Trying to use forward AD with .* that does not support it"
36
            hint_msg = ("Running forward-over-backward gradgrad for an OP that has does not support it did not "
37
                        "raise any error. If your op supports forward AD, you should set supports_fwgrad_bwgrad=True.")
38
            with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
39
                self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
40

41

42
    def _forward_grad_helper(self, device, dtype, op, variant, is_inplace):
43
        # TODO: clean up how attributes are passed to gradcheck from OpInfos
44
        def call_grad_test_helper():
45
            check_batched_forward_grad = ((op.check_batched_forward_grad and not is_inplace) or
46
                                          (op.check_inplace_batched_forward_grad and is_inplace))
47
            self._grad_test_helper(device, dtype, op, variant, check_forward_ad=True, check_backward_ad=False,
48
                                   check_batched_grad=False, check_batched_forward_grad=check_batched_forward_grad)
49
        if op.supports_forward_ad:
50
            call_grad_test_helper()
51
        else:
52
            err_msg = r"Trying to use forward AD with .* that does not support it"
53
            hint_msg = ("Running forward AD for an OP that has does not support it did not "
54
                        "raise any error. If your op supports forward AD, you should set supports_forward_ad=True")
55
            with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
56
                call_grad_test_helper()
57

58
    @_gradcheck_ops(op_db)
59
    @skipif(platform.machine() == "s390x",
60
            reason="Different precision of openblas functions: https://github.com/OpenMathLib/OpenBLAS/issues/4194")
61
    def test_forward_mode_AD(self, device, dtype, op):
62
        self._skip_helper(op, device, dtype)
63

64
        self._forward_grad_helper(device, dtype, op, op.get_op(), is_inplace=False)
65

66
    @_gradcheck_ops(op_db)
67
    @skipIfTorchInductor("to be fixed")
68
    def test_inplace_forward_mode_AD(self, device, dtype, op):
69
        self._skip_helper(op, device, dtype)
70

71
        if not op.inplace_variant or not op.supports_inplace_autograd:
72
            self.skipTest("Skipped! Operation does not support inplace autograd.")
73

74
        self._forward_grad_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()), is_inplace=True)
75

76
instantiate_device_type_tests(TestFwdGradients, globals())
77

78
if __name__ == '__main__':
79
    TestCase._default_dtype_check_enabled = True
80
    run_tests()
81

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.