C++ Helper Class for Deep Learning Inference Frameworks: TensorFlow Lite, TensorRT, OpenCV, OpenVINO, ncnn, MNN, SNPE, Arm NN, NNabla, ONNX Runtime, LibTorch, TensorFlow
No Library
means a pre-built library is not provided so that I cannot confirm it in CI. It may work if you build a library by yourself.Please refer to https://github.com/iwatake2222/InferenceHelper_Sample
git submodule
is recommended)sh third_party/download_prebuilt_libraries.sh
You need some additional steps if you use the frameworks listed below
OpenCV_DIR
and PATH
environment variableC:\Program Files (x86)\Intel\openvino_2021\bin\setupvars.bat
or source /opt/intel/openvino_2021/bin/setupvars.sh
C:\Windows\System32\edgetpu.dll
to ensure the program uses our pre-built library(Optional) Debuggable Shader API Libraries -64-bit
, so that you can use Debug in Visual Studiowget https://sdk.lunarg.com/sdk/download/latest/linux/vulkan-sdk.tar.gz
tar xzvf vulkan-sdk.tar.gz
export VULKAN_SDK=$(pwd)/1.2.198.1/x86_64
sudo apt install -y vulkan-utils libvulkan1 libvulkan-dev
snpe-1.51.0.zip
, then place lib
and include
folders to third_party/snpe_prebuilt
Debug
mode in Visual Studio doesn't work for ncnn, NNabla and LibTorch because debuggable libraries are not provided
Debug
will cause unexpected bahavior, so use Release
or RelWithDebInfo
third_party/download_prebuilt_libraries.sh
and third_party/cmakes/*
to check which libraries are being used. For instance, libraries without GPU(CUDA/Vulkan) are used to be safe. So, if you want to use GPU, modify these files.set(INFERENCE_HELPER_DIR ${CMAKE_CURRENT_LIST_DIR}/../../InferenceHelper/)
add_subdirectory(${INFERENCE_HELPER_DIR}/inference_helper inference_helper)
target_include_directories(${LibraryName} PUBLIC ${INFERENCE_HELPER_DIR}/inference_helper)
target_link_libraries(${LibraryName} InferenceHelper)
Deep learning framework:
# OpenCV (dnn), OpenVINO
cmake .. -DINFERENCE_HELPER_ENABLE_OPENCV=on
# Tensorflow Lite
cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE=on
# Tensorflow Lite (XNNPACK)
cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK=on
# Tensorflow Lite (GPU)
cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU=on
# Tensorflow Lite (EdgeTPU)
cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU=on
# Tensorflow Lite (NNAPI)
cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_NNAPI=on
# TensorRT
cmake .. -DINFERENCE_HELPER_ENABLE_TENSORRT=on
# ncnn, ncnn + vulkan
cmake .. -DINFERENCE_HELPER_ENABLE_NCNN=on
# MNN (+ Vulkan)
cmake .. -DINFERENCE_HELPER_ENABLE_MNN=on
# SNPE
cmake .. -DINFERENCE_HELPER_ENABLE_SNPE=on
# Arm NN
cmake .. -DINFERENCE_HELPER_ENABLE_ARMNN=on
# NNabla
cmake .. -DINFERENCE_HELPER_ENABLE_NNABLA=on
# NNabla with CUDA
cmake .. -DINFERENCE_HELPER_ENABLE_NNABLA_CUDA=on
# ONNX Runtime
cmake .. -DINFERENCE_HELPER_ENABLE_ONNX_RUNTIME=on
# ONNX Runtime with CUDA
cmake .. -DINFERENCE_HELPER_ENABLE_ONNX_RUNTIME_CUDA=on
# LibTorch
cmake .. -DINFERENCE_HELPER_ENABLE_LIBTORCH=on
# LibTorch with CUDA
cmake .. -DINFERENCE_HELPER_ENABLE_LIBTORCH_CUDA=on
# TensorFlow
cmake .. -DINFERENCE_HELPER_ENABLE_TENSORFLOW=on
# TensorFlow with GPU
cmake .. -DINFERENCE_HELPER_ENABLE_TENSORFLOW_GPU=on
Enable/Disable preprocess using OpenCV:
cmake .. -INFERENCE_HELPER_ENABLE_PRE_PROCESS_BY_OPENCV=off
typedef enum {
kOpencv,
kOpencvGpu,
kTensorflowLite,
kTensorflowLiteXnnpack,
kTensorflowLiteGpu,
kTensorflowLiteEdgetpu,
kTensorflowLiteNnapi,
kTensorrt,
kNcnn,
kNcnnVulkan,
kMnn,
kSnpe,
kArmnn,
kNnabla,
kNnablaCuda,
kOnnxRuntime,
kOnnxRuntimeCuda,
kLibtorch,
kLibtorchCuda,
kTensorflow,
kTensorflowGpu,
} HelperType;
std::unique_ptr<InferenceHelper> inference_helper(InferenceHelper::Create(InferenceHelper::kTensorflowLite));
INFERENCE_HELPER_ENABLE_PRE_PROCESS_BY_OPENCV=on
InferenceHelper::PreProcessByOpenCV(input_tensor_info, false, img_blob);
inference_helper->SetNumThreads(4);
std::vector<std::pair<const char*, const void*>> custom_ops;
custom_ops.push_back(std::pair<const char*, const void*>("Convolution2DTransposeBias", (const void*)mediapipe::tflite_operations::RegisterConvolution2DTransposeBias()));
inference_helper->SetCustomOps(custom_ops);
std::vector<InputTensorInfo> input_tensor_list;
InputTensorInfo input_tensor_info("input", TensorInfo::TENSOR_TYPE_FP32, false); /* name, data_type, NCHW or NHWC */
input_tensor_info.tensor_dims = { 1, 224, 224, 3 };
input_tensor_info.data_type = InputTensorInfo::kDataTypeImage;
input_tensor_info.data = img_src.data;
input_tensor_info.image_info.width = img_src.cols;
input_tensor_info.image_info.height = img_src.rows;
input_tensor_info.image_info.channel = img_src.channels();
input_tensor_info.image_info.crop_x = 0;
input_tensor_info.image_info.crop_y = 0;
input_tensor_info.image_info.crop_width = img_src.cols;
input_tensor_info.image_info.crop_height = img_src.rows;
input_tensor_info.image_info.is_bgr = false;
input_tensor_info.image_info.swap_color = false;
input_tensor_info.normalize.mean[0] = 0.485f; /* https://github.com/onnx/models/tree/master/vision/classification/mobilenet#preprocessing */
input_tensor_info.normalize.mean[1] = 0.456f;
input_tensor_info.normalize.mean[2] = 0.406f;
input_tensor_info.normalize.norm[0] = 0.229f;
input_tensor_info.normalize.norm[1] = 0.224f;
input_tensor_info.normalize.norm[2] = 0.225f;
input_tensor_list.push_back(input_tensor_info);
std::vector<OutputTensorInfo> output_tensor_list;
output_tensor_list.push_back(OutputTensorInfo("MobilenetV2/Predictions/Reshape_1", TensorInfo::TENSOR_TYPE_FP32));
inference_helper->initialize("mobilenet_v2_1.0_224.tflite", input_tensor_list, output_tensor_list);
inference_helper->Finalize();
inference_helper->PreProcess(input_tensor_list);
inference_helper->Process(output_tensor_info_list)
enum {
kTensorTypeNone,
kTensorTypeUint8,
kTensorTypeInt8,
kTensorTypeFp32,
kTensorTypeInt32,
kTensorTypeInt64,
};
std::string name; // [In] Set the name_ of tensor
int32_t id; // [Out] Do not modify (Used in InferenceHelper)
int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32)
std::vector<int32_t> tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.)
// OutputTensorInfo: [Out] The dimentions of tensor is set from model information
bool is_nchw; // [IN] NCHW or NHWC
enum {
kDataTypeImage,
kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.)
kDataTypeBlobNchw,
};
void* data; // [In] Set the pointer to image/blob
int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage)
struct {
int32_t width;
int32_t height;
int32_t channel;
int32_t crop_x;
int32_t crop_y;
int32_t crop_width;
int32_t crop_height;
bool is_bgr; // used when channel == 3 (true: BGR, false: RGB)
bool swap_color;
} image_info; // [In] used when data_type_ == kDataTypeImage
struct {
float mean[3];
float norm[3];
} normalize; // [In] used when data_type_ == kDataTypeImage
void* data; // [Out] Pointer to the output data_
struct {
float scale;
uint8_t zero_point;
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
const float* val_float = output_tensor_list[0].GetDataAsFloat();