intel-extension-for-pytorch
432 строки · 16.7 Кб
1#include <torch/torch.h>2#include "csrc/cpu/runtime/CPUPool.h"3#include "csrc/cpu/runtime/Task.h"4#include "csrc/cpu/runtime/TaskExecutor.h"5#include "gtest/gtest.h"6
7#define ASSERT_VARIABLE_EQ(a, b) ASSERT_TRUE(torch::allclose((a), (b)))8#define EXPECT_VARIABLE_EQ(a, b) EXPECT_TRUE(torch::allclose((a), (b)))9
10TEST(TestRuntimeAPI, TestMainThreadCoreBind) {11// 1. Get the default thread affinity information in main thread.12// 2. Set the new thread affinity information in main thread.13// 3. Run the function in main thread.14// 4. Restore the default thread affinity information in main thread.15if (!torch_ipex::runtime::is_runtime_ext_enabled()) {16GTEST_SKIP()17<< "Skip TestRuntimeAPI::TestMainThreadCoreBind. Didn't preload IOMP.";18}19at::Tensor input_tensor = at::rand({100, 8276});20// Get the reference result.21auto res_ref = at::softmax(input_tensor, -1);22// Get current cpu_pool information.23torch_ipex::runtime::CPUPool previous_cpu_pool =24torch_ipex::runtime::get_cpu_pool_from_mask_affinity();25// Ping CPU Cores.26std::vector<int32_t> cpu_core_list({0});27torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);28torch_ipex::runtime::_pin_cpu_cores(cpu_pool);29auto res = at::softmax(input_tensor, -1);30ASSERT_VARIABLE_EQ(res, res_ref);31// restore the cpu pool information.32torch_ipex::runtime::set_mask_affinity_from_cpu_pool(previous_cpu_pool);33}
34
35TEST(TestRuntimeAPI, TestMainThreadCoreBindWithCPUPool) {36if (!torch_ipex::runtime::is_runtime_ext_enabled()) {37GTEST_SKIP()38<< "Skip TestRuntimeAPI::TestMainThreadCoreBindWithCPUPool. Didn't preload IOMP.";39}40at::Tensor input_tensor = at::rand({100, 8276});41std::vector<int32_t> cpu_core_list({0});42torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);43{44torch_ipex::runtime::WithCPUPool with_cpu_pool(std::move(cpu_pool));45auto res = at::softmax(input_tensor, -1);46}47auto res_ = at::softmax(input_tensor, -1);48}
49
50TEST(TestRuntimeTaskAPI, TestTaskAPINativeTorchOperation) {51if (!torch_ipex::runtime::is_runtime_ext_enabled()) {52GTEST_SKIP()53<< "Skip TestRuntimeTaskAPI::TestTaskAPINativeTorchOperation. Didn't preload IOMP.";54}55std::vector<int32_t> cpu_core_list({0});56torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);57std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =58std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);59at::Tensor input_tensor = at::rand({100, 8276});60// Get the reference result61auto res_ref = at::softmax(input_tensor, -1);62// Create the task63torch_ipex::runtime::Task<64at::Tensor (*)(const at::Tensor&, int64_t, c10::optional<at::ScalarType>),65const at::Tensor&,66int64_t,67c10::optional<at::ScalarType>&>68task(at::softmax, task_executor);69c10::optional<at::ScalarType> dtype = c10::nullopt;70// or71// c10::optional<at::ScalarType> dtype = input_tensor.scalar_type();72auto res_future = task(input_tensor, -1, dtype);73auto res = res_future.get();74
75// Test Rvalue Input Tensor76auto res_future_rinput = task(std::move(input_tensor), -1, dtype);77auto res_rinput = res_future_rinput.get();78
79// Test Const Input Tensor80const at::Tensor input_tensor2 = at::rand({100, 8276});81auto res_ref_const_input = at::softmax(input_tensor2, -1);82auto res_future_const_input = task(input_tensor2, -1, dtype);83auto res_const_input = res_future_const_input.get();84
85// Assert the result86ASSERT_VARIABLE_EQ(res, res_ref);87ASSERT_VARIABLE_EQ(res_rinput, res_ref);88ASSERT_VARIABLE_EQ(res_const_input, res_ref_const_input);89}
90
91TEST(TestRuntimeTaskAPI, TestTaskAPILambdaFunction) {92if (!torch_ipex::runtime::is_runtime_ext_enabled()) {93GTEST_SKIP()94<< "Skip TestRuntimeTaskAPI::TestTaskAPILambdaFunction. Didn't preload IOMP.";95}96std::vector<int32_t> cpu_core_list({0});97torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);98std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =99std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);100at::Tensor input_tensor = at::rand({100, 8276});101// Get the reference result102auto res_ref = at::softmax(input_tensor, -1);103// Create the task104torch_ipex::runtime::105Task<at::Tensor (*)(const at::Tensor&), const at::Tensor&>106task(107[](const at::Tensor& input) -> at::Tensor {108return at::softmax(input, -1);109},110task_executor);111auto res_future = task(input_tensor);112auto res = res_future.get();113// Assert the result114ASSERT_VARIABLE_EQ(res, res_ref);115}
116
117at::Tensor taskfunction_native_input(at::Tensor input) {118at::Tensor output;119output = at::softmax(input, -1);120return output;121}
122
123TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionNativeInput) {124if (!torch_ipex::runtime::is_runtime_ext_enabled()) {125GTEST_SKIP()126<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionNativeInput. Didn't preload IOMP.";127}128std::vector<int32_t> cpu_core_list({0});129torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);130std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =131std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);132at::Tensor input_tensor = at::rand({100, 8276});133// Get the reference result134auto res_ref = taskfunction_native_input(input_tensor);135// Create the task136torch_ipex::runtime::Task<at::Tensor (*)(at::Tensor), at::Tensor> task(137taskfunction_native_input, task_executor);138auto res_future = task(std::move(input_tensor));139auto res = res_future.get();140// Assert the result141ASSERT_VARIABLE_EQ(res, res_ref);142}
143
144TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionNativeInputLValue) {145if (!torch_ipex::runtime::is_runtime_ext_enabled()) {146GTEST_SKIP()147<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionNativeInput. Didn't preload IOMP.";148}149std::vector<int32_t> cpu_core_list({0});150torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);151std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =152std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);153at::Tensor input_tensor = at::rand({100, 8276});154// Get the reference result155auto res_ref = taskfunction_native_input(input_tensor);156// Create the task157torch_ipex::runtime::Task<at::Tensor (*)(at::Tensor), at::Tensor&> task(158taskfunction_native_input, task_executor);159auto res_future = task(input_tensor);160auto res = res_future.get();161// Assert the result162ASSERT_VARIABLE_EQ(res, res_ref);163}
164
165at::Tensor taskfunction_lvalue_reference(at::Tensor& input) {166at::Tensor output;167output = at::softmax(input, -1);168return output;169}
170
171TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionLValueReference) {172if (!torch_ipex::runtime::is_runtime_ext_enabled()) {173GTEST_SKIP()174<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionLValueReference. Didn't preload IOMP.";175}176std::vector<int32_t> cpu_core_list({0});177torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);178std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =179std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);180at::Tensor input_tensor = at::rand({100, 8276});181// Get the reference result182auto res_ref = taskfunction_lvalue_reference(input_tensor);183// Create the task184torch_ipex::runtime::Task<at::Tensor (*)(at::Tensor&), at::Tensor&> task(185taskfunction_lvalue_reference, task_executor);186auto res_future = task(input_tensor);187auto res = res_future.get();188// Assert the result189ASSERT_VARIABLE_EQ(res, res_ref);190}
191
192at::Tensor taskfunction_const_lvalue_reference(const at::Tensor& input) {193at::Tensor output;194output = at::softmax(input, -1);195return output;196}
197
198TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionConstLValueReference) {199if (!torch_ipex::runtime::is_runtime_ext_enabled()) {200GTEST_SKIP()201<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionConstLValueReference. Didn't preload IOMP.";202}203std::vector<int32_t> cpu_core_list({0});204torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);205std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =206std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);207at::Tensor input_tensor = at::rand({100, 8276});208// Get the reference result209auto res_ref = taskfunction_const_lvalue_reference(input_tensor);210// Create the task211torch_ipex::runtime::212Task<at::Tensor (*)(const at::Tensor&), const at::Tensor&>213task(taskfunction_const_lvalue_reference, task_executor);214auto res_future = task(input_tensor);215auto res = res_future.get();216// Assert the result217ASSERT_VARIABLE_EQ(res, res_ref);218}
219
220at::Tensor taskfunction_rvalue_reference(at::Tensor&& input) {221at::Tensor output;222output = at::softmax(input, -1);223return output;224}
225
226TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionRvalueReference) {227if (!torch_ipex::runtime::is_runtime_ext_enabled()) {228GTEST_SKIP()229<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionRvalueReference. Didn't preload IOMP.";230}231std::vector<int32_t> cpu_core_list({0});232torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);233std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =234std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);235at::Tensor input_tensor = at::rand({100, 8276});236at::Tensor input_tensor2 = input_tensor;237// Get the reference result238auto res_ref = taskfunction_rvalue_reference(std::move(input_tensor));239// Create the task240torch_ipex::runtime::Task<at::Tensor (*)(at::Tensor &&), at::Tensor&&> task(241taskfunction_rvalue_reference, task_executor);242auto res_future = task(std::move(input_tensor2));243auto res = res_future.get();244// Assert the result245ASSERT_VARIABLE_EQ(res, res_ref);246}
247
248std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>249taskfunction_mix_lvalue_rvalue_reference(250at::Tensor input1,251at::Tensor& input2,252const at::Tensor& input3,253at::Tensor&& input4) {254at::Tensor output1 = at::softmax(input1, -1);255at::Tensor output2 = at::softmax(input2, -1);256at::Tensor output3 = at::softmax(input3, -1);257at::Tensor output4 = at::softmax(input4, -1);258return std::make_tuple(output1, output2, output3, output4);259}
260
261TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionMixLvalueRvalueReference) {262if (!torch_ipex::runtime::is_runtime_ext_enabled()) {263GTEST_SKIP()264<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionMixLvalueRvalueReference. Didn't preload IOMP.";265}266std::vector<int32_t> cpu_core_list({0});267torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);268std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =269std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);270at::Tensor input_tensor = at::rand({100, 8276});271at::Tensor input_tensor2 = input_tensor;272at::Tensor input_tensor3 = input_tensor;273at::Tensor input_tensor4 = input_tensor;274// Get the reference result275auto res_ref = taskfunction_mix_lvalue_rvalue_reference(276std::move(input_tensor),277input_tensor2,278input_tensor2,279std::move(input_tensor2));280// Create the task281torch_ipex::runtime::Task<282std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> (*)(283at::Tensor, at::Tensor&, const at::Tensor&, at::Tensor&&),284at::Tensor,285at::Tensor&,286const at::Tensor&,287at::Tensor&&>288task(taskfunction_mix_lvalue_rvalue_reference, task_executor);289auto res_future = task(290std::move(input_tensor3),291input_tensor4,292input_tensor4,293std::move(input_tensor4));294auto res = res_future.get();295// Assert the result296ASSERT_VARIABLE_EQ(std::get<0>(res), std::get<0>(res_ref));297ASSERT_VARIABLE_EQ(std::get<1>(res), std::get<1>(res_ref));298ASSERT_VARIABLE_EQ(std::get<2>(res), std::get<2>(res_ref));299ASSERT_VARIABLE_EQ(std::get<3>(res), std::get<3>(res_ref));300}
301
302at::Tensor taskfunction_input_vector(std::vector<at::Tensor>& inputs) {303at::Tensor output;304output = at::softmax(inputs[0], -1);305return output;306}
307
308TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionInputVectorTensor) {309if (!torch_ipex::runtime::is_runtime_ext_enabled()) {310GTEST_SKIP()311<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionInputVectorTensor. Didn't preload IOMP.";312}313std::vector<int32_t> cpu_core_list({0});314torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);315std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =316std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);317at::Tensor input_tensor = at::rand({100, 8276});318std::vector<at::Tensor> input_tenosrs;319input_tenosrs.emplace_back(input_tensor);320// Get the reference result321auto res_ref = taskfunction_input_vector(input_tenosrs);322// Create the task323torch_ipex::runtime::324Task<at::Tensor (*)(std::vector<at::Tensor>&), std::vector<at::Tensor>&>325task(taskfunction_input_vector, task_executor);326auto res_future = task(input_tenosrs);327auto res = res_future.get();328// Assert the result329ASSERT_VARIABLE_EQ(res, res_ref);330}
331
332at::Tensor& taskfunction_input_reference_output_lvalue_reference(333at::Tensor& input,334at::Tensor& output) {335output = at::softmax(input, -1);336return output;337}
338
339TEST(TestRuntimeTaskAPI, TestTaskAPICPPFunctionOutputTensorLValueReference) {340if (!torch_ipex::runtime::is_runtime_ext_enabled()) {341GTEST_SKIP()342<< "Skip TestRuntimeTaskAPI::TestTaskAPICPPFunctionOutputTensorLValueReference. Didn't preload IOMP.";343}344std::vector<int32_t> cpu_core_list({0});345torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);346std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =347std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);348at::Tensor input_tensor = at::rand({100, 8276});349at::Tensor output_tensor;350at::Tensor output_tensor2;351// Get the reference result352auto res_ref = taskfunction_input_reference_output_lvalue_reference(353input_tensor, output_tensor);354// Create the task355torch_ipex::runtime::356Task<at::Tensor& (*)(at::Tensor&, at::Tensor&), at::Tensor&, at::Tensor&>357task(358taskfunction_input_reference_output_lvalue_reference,359task_executor);360auto res_future = task(input_tensor, output_tensor2);361auto res = res_future.get();362// Assert the result363ASSERT_VARIABLE_EQ(res, res_ref);364ASSERT_VARIABLE_EQ(output_tensor, res_ref);365ASSERT_VARIABLE_EQ(output_tensor2, res_ref);366ASSERT_VARIABLE_EQ(output_tensor2, output_tensor);367}
368
369TEST(TestRuntimeTaskAPI, TestTaskAPIMultiTasksSameTensorInput) {370if (!torch_ipex::runtime::is_runtime_ext_enabled()) {371GTEST_SKIP()372<< "Skip TestRuntimeTaskAPI::TestTaskAPIMultiTasksSameTensorInput. Didn't preload IOMP.";373}374std::vector<int32_t> cpu_core_list({0});375torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);376std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =377std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);378
379std::vector<int32_t> cpu_core_list2({1});380torch_ipex::runtime::CPUPool cpu_pool2(cpu_core_list2);381std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor2 =382std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool2);383
384at::Tensor input_tensor = at::rand({100, 8276});385// Get the reference result386auto res_ref = at::softmax(input_tensor, -1);387// Create the task388torch_ipex::runtime::389Task<at::Tensor (*)(const at::Tensor&), const at::Tensor&>390task(taskfunction_const_lvalue_reference, task_executor);391
392torch_ipex::runtime::393Task<at::Tensor (*)(const at::Tensor&), const at::Tensor&>394task2(taskfunction_const_lvalue_reference, task_executor2);395
396auto res_future = task(input_tensor);397auto res_future2 = task2(input_tensor);398auto res = res_future.get();399auto res2 = res_future2.get();400// Assert the result401ASSERT_VARIABLE_EQ(res, res_ref);402ASSERT_VARIABLE_EQ(res2, res_ref);403}
404
405TEST(TestRuntimeTaskAPI, TestTaskAPISameTasksMultiTensorInputs) {406if (!torch_ipex::runtime::is_runtime_ext_enabled()) {407GTEST_SKIP()408<< "Skip TestRuntimeTaskAPI::TestTaskAPISameTasksMultiTensorInputs. Didn't preload IOMP.";409}410std::vector<int32_t> cpu_core_list({0});411torch_ipex::runtime::CPUPool cpu_pool(cpu_core_list);412std::shared_ptr<torch_ipex::runtime::TaskExecutor> task_executor =413std::make_shared<torch_ipex::runtime::TaskExecutor>(cpu_pool);414
415at::Tensor input_tensor = at::rand({100, 8276});416at::Tensor input_tensor2 = at::rand({100, 8276});417// Get the reference result418auto res_ref = at::softmax(input_tensor, -1);419auto res_ref2 = at::softmax(input_tensor2, -1);420// Create the task421torch_ipex::runtime::422Task<at::Tensor (*)(const at::Tensor&), const at::Tensor&>423task(taskfunction_const_lvalue_reference, task_executor);424
425auto res_future = task(input_tensor);426auto res_future2 = task(input_tensor2);427auto res = res_future.get();428auto res2 = res_future2.get();429// Assert the result430ASSERT_VARIABLE_EQ(res, res_ref);431ASSERT_VARIABLE_EQ(res2, res_ref2);432}
433