onnxruntime

Форк
0
/
onnxruntime_providers_tensorrt.cmake 
223 строки · 11.0 Кб
1
# Copyright (c) Microsoft Corporation. All rights reserved.
2
# Licensed under the MIT License.
3
  if(onnxruntime_DISABLE_CONTRIB_OPS)
4
    message( FATAL_ERROR "To compile TensorRT execution provider contrib ops have to be enabled to dump an engine using com.microsoft:EPContext node." )
5
  endif()
6
  add_definitions(-DUSE_TENSORRT=1)
7
  if (onnxruntime_TENSORRT_PLACEHOLDER_BUILDER)
8
    add_definitions(-DORT_TENSORRT_PLACEHOLDER_BUILDER)
9
  endif()
10
  set(BUILD_LIBRARY_ONLY 1)
11
  add_definitions("-DONNX_ML=1")
12
  add_definitions("-DONNX_NAMESPACE=onnx")
13
  set(CUDA_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIRS})
14
  set(TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME})
15
  set(OLD_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
16
  set(PROTOBUF_LIBRARY ${PROTOBUF_LIB})
17
  if (WIN32)
18
    set(OLD_CMAKE_CUDA_FLAGS ${CMAKE_CUDA_FLAGS})
19
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4099 /wd4551 /wd4505 /wd4515 /wd4706 /wd4456 /wd4324 /wd4701 /wd4804 /wd4702 /wd4458 /wd4703")
20
    if (CMAKE_BUILD_TYPE STREQUAL "Debug")
21
      set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4805")
22
    endif()
23
    set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -include algorithm")
24
    set(DISABLED_WARNINGS_FOR_TRT /wd4456)
25
  endif()
26
  if ( CMAKE_COMPILER_IS_GNUCC )
27
    set(CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-missing-field-initializers")
28
  endif()
29
  set(CXX_VERSION_DEFINED TRUE)
30

31
  # There is an issue when running "Debug build" TRT EP with "Release build" TRT builtin parser on Windows.
32
  # We enforce following workaround for now until the real fix.
33
  if (WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")
34
    set(onnxruntime_USE_TENSORRT_BUILTIN_PARSER OFF)
35
    MESSAGE(STATUS "[Note] There is an issue when running \"Debug build\" TRT EP with \"Release build\" TRT built-in parser on Windows. This build will use tensorrt oss parser instead.")
36
  endif()
37

38
  find_path(TENSORRT_INCLUDE_DIR NvInfer.h
39
    HINTS ${TENSORRT_ROOT}
40
    PATH_SUFFIXES include)
41

42

43
  file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h NVINFER_VER_CONTENT)
44
  string(REGEX MATCH "define NV_TENSORRT_MAJOR * +([0-9]+)" NV_TENSORRT_MAJOR "${NVINFER_VER_CONTENT}")
45
  string(REGEX REPLACE "define NV_TENSORRT_MAJOR * +([0-9]+)" "\\1" NV_TENSORRT_MAJOR "${NV_TENSORRT_MAJOR}")
46
  string(REGEX MATCH "define NV_TENSORRT_MINOR * +([0-9]+)" NV_TENSORRT_MINOR "${NVINFER_VER_CONTENT}")
47
  string(REGEX REPLACE "define NV_TENSORRT_MINOR * +([0-9]+)" "\\1" NV_TENSORRT_MINOR "${NV_TENSORRT_MINOR}")
48
  string(REGEX MATCH "define NV_TENSORRT_PATCH * +([0-9]+)" NV_TENSORRT_PATCH "${NVINFER_VER_CONTENT}")
49
  string(REGEX REPLACE "define NV_TENSORRT_PATCH * +([0-9]+)" "\\1" NV_TENSORRT_PATCH "${NV_TENSORRT_PATCH}")
50
  math(EXPR NV_TENSORRT_MAJOR_INT "${NV_TENSORRT_MAJOR}")
51
  math(EXPR NV_TENSORRT_MINOR_INT "${NV_TENSORRT_MINOR}")
52
  math(EXPR NV_TENSORRT_PATCH_INT "${NV_TENSORRT_PATCH}")
53

54
  if (NV_TENSORRT_MAJOR)
55
    MESSAGE(STATUS "NV_TENSORRT_MAJOR is ${NV_TENSORRT_MAJOR}")
56
  else()
57
    MESSAGE(STATUS "Can't find NV_TENSORRT_MAJOR macro")
58
  endif()
59

60
  # Check TRT version >= 10.0.1.6
61
  if ((NV_TENSORRT_MAJOR_INT GREATER 10) OR
62
      (NV_TENSORRT_MAJOR_INT EQUAL 10 AND NV_TENSORRT_MINOR_INT GREATER 0) OR
63
      (NV_TENSORRT_MAJOR_INT EQUAL 10 AND NV_TENSORRT_PATCH_INT GREATER 0))
64
    set(TRT_GREATER_OR_EQUAL_TRT_10_GA ON)
65
  endif()
66

67
  # TensorRT 10 GA onwards, the TensorRT libraries will have major version appended to the end on Windows,
68
  # for example, nvinfer_10.dll, nvinfer_plugin_10.dll, nvonnxparser_10.dll ...
69
  if (WIN32 AND TRT_GREATER_OR_EQUAL_TRT_10_GA)
70
    set(NVINFER_LIB "nvinfer_${NV_TENSORRT_MAJOR}")
71
    set(NVINFER_PLUGIN_LIB "nvinfer_plugin_${NV_TENSORRT_MAJOR}")
72
    set(PARSER_LIB "nvonnxparser_${NV_TENSORRT_MAJOR}")
73
  endif()
74

75
  if (NOT NVINFER_LIB)
76
     set(NVINFER_LIB "nvinfer")
77
  endif()
78

79
  if (NOT NVINFER_PLUGIN_LIB)
80
     set(NVINFER_PLUGIN_LIB "nvinfer_plugin")
81
  endif()
82

83
  if (NOT PARSER_LIB)
84
     set(PARSER_LIB "nvonnxparser")
85
  endif()
86

87
  MESSAGE(STATUS "Looking for ${NVINFER_LIB} and ${NVINFER_PLUGIN_LIB}")
88

89
  find_library(TENSORRT_LIBRARY_INFER ${NVINFER_LIB}
90
    HINTS ${TENSORRT_ROOT}
91
    PATH_SUFFIXES lib lib64 lib/x64)
92

93
  if (NOT TENSORRT_LIBRARY_INFER)
94
    MESSAGE(STATUS "Can't find ${NVINFER_LIB}")
95
  endif()
96

97
  find_library(TENSORRT_LIBRARY_INFER_PLUGIN ${NVINFER_PLUGIN_LIB}
98
    HINTS  ${TENSORRT_ROOT}
99
    PATH_SUFFIXES lib lib64 lib/x64)
100

101
  if (NOT TENSORRT_LIBRARY_INFER_PLUGIN)
102
    MESSAGE(STATUS "Can't find ${NVINFER_PLUGIN_LIB}")
103
  endif()
104

105
  if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
106
    MESSAGE(STATUS "Looking for ${PARSER_LIB}")
107

108
    find_library(TENSORRT_LIBRARY_NVONNXPARSER ${PARSER_LIB}
109
      HINTS  ${TENSORRT_ROOT}
110
      PATH_SUFFIXES lib lib64 lib/x64)
111

112
    if (NOT TENSORRT_LIBRARY_NVONNXPARSER)
113
      MESSAGE(STATUS "Can't find ${PARSER_LIB}")
114
    endif()
115

116
    set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_INFER_PLUGIN} ${TENSORRT_LIBRARY_NVONNXPARSER})
117
    MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
118
  else()
119
    if (TRT_GREATER_OR_EQUAL_TRT_10_GA)
120
      set(ONNX_USE_LITE_PROTO ON)
121
    endif()
122
    FetchContent_Declare(
123
      onnx_tensorrt
124
      URL ${DEP_URL_onnx_tensorrt}
125
      URL_HASH SHA1=${DEP_SHA1_onnx_tensorrt}
126
    )
127
    if (NOT CUDA_INCLUDE_DIR)
128
      set(CUDA_INCLUDE_DIR ${CUDAToolkit_INCLUDE_DIRS}) # onnx-tensorrt repo needs this variable to build
129
    endif()
130
    # The onnx_tensorrt repo contains a test program, getSupportedAPITest, which doesn't support Windows. It uses
131
    # unistd.h. So we must exclude it from our build. onnxruntime_fetchcontent_makeavailable is for the purpose.
132
    onnxruntime_fetchcontent_makeavailable(onnx_tensorrt)
133
    include_directories(${onnx_tensorrt_SOURCE_DIR})
134
    set(CMAKE_CXX_FLAGS ${OLD_CMAKE_CXX_FLAGS})
135
    if ( CMAKE_COMPILER_IS_GNUCC )
136
      set(CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
137
    endif()
138
    if (WIN32)
139
      set(CMAKE_CUDA_FLAGS ${OLD_CMAKE_CUDA_FLAGS})
140
      unset(PROTOBUF_LIBRARY)
141
      unset(OLD_CMAKE_CXX_FLAGS)
142
      unset(OLD_CMAKE_CUDA_FLAGS)
143
      set_target_properties(${PARSER_LIB} PROPERTIES LINK_FLAGS "/ignore:4199")
144
      target_compile_options(nvonnxparser_static PRIVATE /FIio.h /wd4100)
145
      target_compile_options(${PARSER_LIB} PRIVATE /FIio.h /wd4100)
146
    endif()
147
    # Static libraries are just nvonnxparser_static on all platforms
148
    set(onnxparser_link_libs nvonnxparser_static)
149
    set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_INFER_PLUGIN})
150
    MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
151
  endif()
152

153
  include_directories(${TENSORRT_INCLUDE_DIR})
154
  # ${TENSORRT_LIBRARY} is empty if we link nvonnxparser_static.
155
  # nvonnxparser_static is linked against tensorrt libraries in onnx-tensorrt
156
  # See https://github.com/onnx/onnx-tensorrt/blob/8af13d1b106f58df1e98945a5e7c851ddb5f0791/CMakeLists.txt#L121
157
  # However, starting from TRT 10 GA, nvonnxparser_static doesn't link against tensorrt libraries.
158
  # Therefore, the above code finds ${TENSORRT_LIBRARY_INFER} and ${TENSORRT_LIBRARY_INFER_PLUGIN}.
159
  if(onnxruntime_CUDA_MINIMAL)
160
    set(trt_link_libs ${CMAKE_DL_LIBS} ${TENSORRT_LIBRARY})
161
  else()
162
    set(trt_link_libs CUDNN::cudnn_all cublas ${CMAKE_DL_LIBS} ${TENSORRT_LIBRARY})
163
  endif()
164
  file(GLOB_RECURSE onnxruntime_providers_tensorrt_cc_srcs CONFIGURE_DEPENDS
165
    "${ONNXRUNTIME_ROOT}/core/providers/tensorrt/*.h"
166
    "${ONNXRUNTIME_ROOT}/core/providers/tensorrt/*.cc"
167
    "${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.h"
168
    "${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.cc"
169
    "${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.h"
170
    "${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.cc"
171
    "${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.h"
172
    "${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.cc"
173
  )
174

175
  source_group(TREE ${ONNXRUNTIME_ROOT}/core FILES ${onnxruntime_providers_tensorrt_cc_srcs})
176
  onnxruntime_add_shared_library_module(onnxruntime_providers_tensorrt ${onnxruntime_providers_tensorrt_cc_srcs})
177
  onnxruntime_add_include_to_target(onnxruntime_providers_tensorrt onnxruntime_common onnx flatbuffers::flatbuffers Boost::mp11 safeint_interface)
178
  add_dependencies(onnxruntime_providers_tensorrt onnxruntime_providers_shared ${onnxruntime_EXTERNAL_DEPENDENCIES})
179
  if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
180
    target_link_libraries(onnxruntime_providers_tensorrt PRIVATE ${trt_link_libs} ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers Boost::mp11 safeint_interface ${ABSEIL_LIBS} PUBLIC CUDA::cudart)
181
  else()
182
    target_link_libraries(onnxruntime_providers_tensorrt PRIVATE ${onnxparser_link_libs} ${trt_link_libs} ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers ${ABSEIL_LIBS} PUBLIC CUDA::cudart)
183
  endif()
184
  target_include_directories(onnxruntime_providers_tensorrt PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR} ${eigen_INCLUDE_DIRS}
185
    PUBLIC ${CUDAToolkit_INCLUDE_DIRS})
186

187
  # ${CMAKE_CURRENT_BINARY_DIR} is so that #include "onnxruntime_config.h" inside tensor_shape.h is found
188
  set_target_properties(onnxruntime_providers_tensorrt PROPERTIES LINKER_LANGUAGE CUDA)
189
  set_target_properties(onnxruntime_providers_tensorrt PROPERTIES FOLDER "ONNXRuntime")
190
  target_compile_definitions(onnxruntime_providers_tensorrt PRIVATE ONNXIFI_BUILD_LIBRARY=1)
191
  target_compile_options(onnxruntime_providers_tensorrt PRIVATE ${DISABLED_WARNINGS_FOR_TRT})
192
  if (WIN32)
193
    target_compile_options(onnxruntime_providers_tensorrt INTERFACE /wd4456)
194
  endif()
195
  if(onnxruntime_CUDA_MINIMAL)
196
    target_compile_definitions(onnxruntime_providers_tensorrt PRIVATE USE_CUDA_MINIMAL=1)
197
  endif()
198

199
  # Needed for the provider interface, as it includes training headers when training is enabled
200
  if (onnxruntime_ENABLE_TRAINING_OPS)
201
    target_include_directories(onnxruntime_providers_tensorrt PRIVATE ${ORTTRAINING_ROOT})
202
    if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
203
      onnxruntime_add_include_to_target(onnxruntime_providers_tensorrt Python::Module)
204
    endif()
205
  endif()
206

207
  if(APPLE)
208
    set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker -exported_symbols_list ${ONNXRUNTIME_ROOT}/core/providers/tensorrt/exported_symbols.lst")
209
    target_link_libraries(onnxruntime_providers_tensorrt PRIVATE nsync::nsync_cpp)
210
  elseif(UNIX)
211
    set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY COMPILE_FLAGS "-Wno-deprecated-declarations")
212
    set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/core/providers/tensorrt/version_script.lds -Xlinker --gc-sections")
213
    target_link_libraries(onnxruntime_providers_tensorrt PRIVATE nsync::nsync_cpp)
214
  elseif(WIN32)
215
    set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-DEF:${ONNXRUNTIME_ROOT}/core/providers/tensorrt/symbols.def")
216
  else()
217
    message(FATAL_ERROR "onnxruntime_providers_tensorrt unknown platform, need to specify shared library exports for it")
218
  endif()
219

220
  install(TARGETS onnxruntime_providers_tensorrt
221
          ARCHIVE  DESTINATION ${CMAKE_INSTALL_LIBDIR}
222
          LIBRARY  DESTINATION ${CMAKE_INSTALL_LIBDIR}
223
          RUNTIME  DESTINATION ${CMAKE_INSTALL_BINDIR})
224

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.