File inference_calculator.pb.cc
File List > calculators > tensor > inference_calculator.pb.cc
Go to the documentation of this file
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: mediapipe/calculators/tensor/inference_calculator.proto
#include "mediapipe/calculators/tensor/inference_calculator.pb.h"
#include <algorithm>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/wire_format_lite.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
PROTOBUF_PRAGMA_INIT_SEG
namespace mediapipe {
constexpr InferenceCalculatorOptions_Delegate_TfLite::InferenceCalculatorOptions_Delegate_TfLite(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){}
struct InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal {
constexpr InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_Delegate_TfLite _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal _InferenceCalculatorOptions_Delegate_TfLite_default_instance_;
constexpr InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: cached_kernel_path_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, serialized_model_dir_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, model_token_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, use_advanced_gpu_api_(false)
, api_(0)
, cache_writing_behavior_(2)
, allow_precision_loss_(true)
, usage_(2)
{}
struct InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal {
constexpr InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_Delegate_Gpu _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Gpu_default_instance_;
constexpr InferenceCalculatorOptions_Delegate_Nnapi::InferenceCalculatorOptions_Delegate_Nnapi(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: cache_dir_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, model_token_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, accelerator_name_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string){}
struct InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal {
constexpr InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_Delegate_Nnapi _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Nnapi_default_instance_;
constexpr InferenceCalculatorOptions_Delegate_Xnnpack::InferenceCalculatorOptions_Delegate_Xnnpack(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: num_threads_(-1){}
struct InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal {
constexpr InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_Delegate_Xnnpack _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Xnnpack_default_instance_;
constexpr InferenceCalculatorOptions_Delegate::InferenceCalculatorOptions_Delegate(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: _oneof_case_{}{}
struct InferenceCalculatorOptions_DelegateDefaultTypeInternal {
constexpr InferenceCalculatorOptions_DelegateDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_DelegateDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_Delegate _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_DelegateDefaultTypeInternal _InferenceCalculatorOptions_Delegate_default_instance_;
constexpr InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: model_tensor_indices_()
, _model_tensor_indices_cached_byte_size_(0){}
struct InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal {
constexpr InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_;
constexpr InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: tensor_names_(){}
struct InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal {
constexpr InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_;
constexpr InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: from_output_tensor_name_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, to_input_tensor_name_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string){}
struct InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal {
constexpr InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink_default_instance_;
constexpr InferenceCalculatorOptions_InputOutputConfig::InferenceCalculatorOptions_InputOutputConfig(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: feedback_tensor_links_()
, _oneof_case_{}{}
struct InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal {
constexpr InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal() {}
union {
InferenceCalculatorOptions_InputOutputConfig _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_default_instance_;
constexpr InferenceCalculatorOptions::InferenceCalculatorOptions(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
: model_path_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string)
, delegate_(nullptr)
, input_output_config_(nullptr)
, try_mmap_model_(false)
, use_gpu_(false)
, use_nnapi_(false)
, cpu_num_thread_(-1){}
struct InferenceCalculatorOptionsDefaultTypeInternal {
constexpr InferenceCalculatorOptionsDefaultTypeInternal()
: _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {}
~InferenceCalculatorOptionsDefaultTypeInternal() {}
union {
InferenceCalculatorOptions _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT InferenceCalculatorOptionsDefaultTypeInternal _InferenceCalculatorOptions_default_instance_;
} // namespace mediapipe
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[10];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[3];
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto = nullptr;
const uint32_t TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_TfLite, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, _has_bits_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, use_advanced_gpu_api_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, api_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, allow_precision_loss_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, cached_kernel_path_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, serialized_model_dir_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, cache_writing_behavior_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, model_token_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu, usage_),
3,
4,
6,
0,
1,
5,
2,
7,
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi, _has_bits_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi, cache_dir_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi, model_token_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi, accelerator_name_),
0,
1,
2,
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack, _has_bits_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack, num_threads_),
0,
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate, _internal_metadata_),
~0u, // no _extensions_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate, _oneof_case_[0]),
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_Delegate, delegate_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap, model_tensor_indices_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap, tensor_names_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink, _has_bits_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink, from_output_tensor_name_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink, to_input_tensor_name_),
0,
1,
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig, _internal_metadata_),
~0u, // no _extensions_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig, _oneof_case_[0]),
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag,
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig, feedback_tensor_links_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig, InputTensorMap_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions_InputOutputConfig, OutputTensorMap_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, _has_bits_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, model_path_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, try_mmap_model_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, use_gpu_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, use_nnapi_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, cpu_num_thread_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, delegate_),
PROTOBUF_FIELD_OFFSET(::mediapipe::InferenceCalculatorOptions, input_output_config_),
0,
3,
4,
5,
6,
1,
2,
};
static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
{ 0, -1, -1, sizeof(::mediapipe::InferenceCalculatorOptions_Delegate_TfLite)},
{ 6, 20, -1, sizeof(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu)},
{ 28, 37, -1, sizeof(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi)},
{ 40, 47, -1, sizeof(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack)},
{ 48, -1, -1, sizeof(::mediapipe::InferenceCalculatorOptions_Delegate)},
{ 59, -1, -1, sizeof(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap)},
{ 66, -1, -1, sizeof(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap)},
{ 73, 81, -1, sizeof(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink)},
{ 83, -1, -1, sizeof(::mediapipe::InferenceCalculatorOptions_InputOutputConfig)},
{ 96, 109, -1, sizeof(::mediapipe::InferenceCalculatorOptions)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_Delegate_TfLite_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_Delegate_Gpu_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_Delegate_Nnapi_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_Delegate_Xnnpack_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_Delegate_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::mediapipe::_InferenceCalculatorOptions_default_instance_),
};
const char descriptor_table_protodef_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
"\n7mediapipe/calculators/tensor/inference"
"_calculator.proto\022\tmediapipe\032$mediapipe/"
"framework/calculator.proto\032,mediapipe/fr"
"amework/calculator_options.proto\"\316\021\n\032Inf"
"erenceCalculatorOptions\022\022\n\nmodel_path\030\001 "
"\001(\t\022\026\n\016try_mmap_model\030\007 \001(\010\022\032\n\007use_gpu\030\002"
" \001(\010:\005falseB\002\030\001\022\034\n\tuse_nnapi\030\003 \001(\010:\005fals"
"eB\002\030\001\022\032\n\016cpu_num_thread\030\004 \001(\005:\002-1\022@\n\010del"
"egate\030\005 \001(\0132..mediapipe.InferenceCalcula"
"torOptions.Delegate\022T\n\023input_output_conf"
"ig\030\010 \001(\01327.mediapipe.InferenceCalculator"
"Options.InputOutputConfig\032\264\010\n\010Delegate\022G"
"\n\006tflite\030\001 \001(\01325.mediapipe.InferenceCalc"
"ulatorOptions.Delegate.TfLiteH\000\022A\n\003gpu\030\002"
" \001(\01322.mediapipe.InferenceCalculatorOpti"
"ons.Delegate.GpuH\000\022E\n\005nnapi\030\003 \001(\01324.medi"
"apipe.InferenceCalculatorOptions.Delegat"
"e.NnapiH\000\022I\n\007xnnpack\030\004 \001(\01326.mediapipe.I"
"nferenceCalculatorOptions.Delegate.Xnnpa"
"ckH\000\032\010\n\006TfLite\032\204\005\n\003Gpu\022#\n\024use_advanced_g"
"pu_api\030\001 \001(\010:\005false\022H\n\003api\030\004 \001(\01626.media"
"pipe.InferenceCalculatorOptions.Delegate"
".Gpu.Api:\003ANY\022\"\n\024allow_precision_loss\030\003 "
"\001(\010:\004true\022\032\n\022cached_kernel_path\030\002 \001(\t\022\034\n"
"\024serialized_model_dir\030\007 \001(\t\022w\n\026cache_wri"
"ting_behavior\030\n \001(\0162G.mediapipe.Inferenc"
"eCalculatorOptions.Delegate.Gpu.CacheWri"
"tingBehavior:\016WRITE_OR_ERROR\022\023\n\013model_to"
"ken\030\010 \001(\t\022a\n\005usage\030\005 \001(\0162A.mediapipe.Inf"
"erenceCalculatorOptions.Delegate.Gpu.Inf"
"erenceUsage:\017SUSTAINED_SPEED\"&\n\003Api\022\007\n\003A"
"NY\020\000\022\n\n\006OPENGL\020\001\022\n\n\006OPENCL\020\002\"G\n\024CacheWri"
"tingBehavior\022\014\n\010NO_WRITE\020\000\022\r\n\tTRY_WRITE\020"
"\001\022\022\n\016WRITE_OR_ERROR\020\002\"N\n\016InferenceUsage\022"
"\017\n\013UNSPECIFIED\020\000\022\026\n\022FAST_SINGLE_ANSWER\020\001"
"\022\023\n\017SUSTAINED_SPEED\020\002\032I\n\005Nnapi\022\021\n\tcache_"
"dir\030\001 \001(\t\022\023\n\013model_token\030\002 \001(\t\022\030\n\020accele"
"rator_name\030\003 \001(\t\032\"\n\007Xnnpack\022\027\n\013num_threa"
"ds\030\001 \001(\005:\002-1B\n\n\010delegate\032\210\006\n\021InputOutput"
"Config\022l\n\030input_tensor_indices_map\030\001 \001(\013"
"2H.mediapipe.InferenceCalculatorOptions."
"InputOutputConfig.TensorIndicesMapH\000\022h\n\026"
"input_tensor_names_map\030\003 \001(\0132F.mediapipe"
".InferenceCalculatorOptions.InputOutputC"
"onfig.TensorNamesMapH\000\022m\n\031output_tensor_"
"indices_map\030\002 \001(\0132H.mediapipe.InferenceC"
"alculatorOptions.InputOutputConfig.Tenso"
"rIndicesMapH\001\022i\n\027output_tensor_names_map"
"\030\004 \001(\0132F.mediapipe.InferenceCalculatorOp"
"tions.InputOutputConfig.TensorNamesMapH\001"
"\022i\n\025feedback_tensor_links\030\005 \003(\0132J.mediap"
"ipe.InferenceCalculatorOptions.InputOutp"
"utConfig.FeedbackTensorLink\0324\n\020TensorInd"
"icesMap\022 \n\024model_tensor_indices\030\001 \003(\005B\002\020"
"\001\032&\n\016TensorNamesMap\022\024\n\014tensor_names\030\001 \003("
"\t\032S\n\022FeedbackTensorLink\022\037\n\027from_output_t"
"ensor_name\030\001 \001(\t\022\034\n\024to_input_tensor_name"
"\030\002 \001(\tB\020\n\016InputTensorMapB\021\n\017OutputTensor"
"Map2T\n\003ext\022\034.mediapipe.CalculatorOptions"
"\030\367\323\313\240\001 \001(\0132%.mediapipe.InferenceCalculat"
"orOptionsBA\n%com.google.mediapipe.calcul"
"ator.protoB\030InferenceCalculatorProto"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_deps[2] = {
&::descriptor_table_mediapipe_2fframework_2fcalculator_2eproto,
&::descriptor_table_mediapipe_2fframework_2fcalculator_5foptions_2eproto,
};
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto = {
false, false, 2476, descriptor_table_protodef_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto, "mediapipe/calculators/tensor/inference_calculator.proto",
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once, descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_deps, 2, 10,
schemas, file_default_instances, TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto::offsets,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto, file_level_enum_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto, file_level_service_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto,
};
PROTOBUF_ATTRIBUTE_WEAK const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter() {
return &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
}
// Force running AddDescriptors() at dynamic initialization time.
PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto(&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto);
namespace mediapipe {
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto);
return file_level_enum_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[0];
}
bool InferenceCalculatorOptions_Delegate_Gpu_Api_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
return true;
default:
return false;
}
}
#if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::ANY;
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::OPENGL;
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::OPENCL;
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::Api_MIN;
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::Api_MAX;
constexpr int InferenceCalculatorOptions_Delegate_Gpu::Api_ARRAYSIZE;
#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto);
return file_level_enum_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[1];
}
bool InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
return true;
default:
return false;
}
}
#if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::NO_WRITE;
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::TRY_WRITE;
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::WRITE_OR_ERROR;
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_MIN;
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_MAX;
constexpr int InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_ARRAYSIZE;
#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto);
return file_level_enum_descriptors_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[2];
}
bool InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
return true;
default:
return false;
}
}
#if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::UNSPECIFIED;
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::FAST_SINGLE_ANSWER;
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::SUSTAINED_SPEED;
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_MIN;
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_MAX;
constexpr int InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_ARRAYSIZE;
#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
// ===================================================================
class InferenceCalculatorOptions_Delegate_TfLite::_Internal {
public:
};
InferenceCalculatorOptions_Delegate_TfLite::InferenceCalculatorOptions_Delegate_TfLite(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena, is_message_owned) {
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.Delegate.TfLite)
}
InferenceCalculatorOptions_Delegate_TfLite::InferenceCalculatorOptions_Delegate_TfLite(const InferenceCalculatorOptions_Delegate_TfLite& from)
: ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase() {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.Delegate.TfLite)
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_Delegate_TfLite::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl,
::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl,
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_Delegate_TfLite::GetClassData() const { return &_class_data_; }
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_Delegate_TfLite::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[0]);
}
// ===================================================================
class InferenceCalculatorOptions_Delegate_Gpu::_Internal {
public:
using HasBits = decltype(std::declval<InferenceCalculatorOptions_Delegate_Gpu>()._has_bits_);
static void set_has_use_advanced_gpu_api(HasBits* has_bits) {
(*has_bits)[0] |= 8u;
}
static void set_has_api(HasBits* has_bits) {
(*has_bits)[0] |= 16u;
}
static void set_has_allow_precision_loss(HasBits* has_bits) {
(*has_bits)[0] |= 64u;
}
static void set_has_cached_kernel_path(HasBits* has_bits) {
(*has_bits)[0] |= 1u;
}
static void set_has_serialized_model_dir(HasBits* has_bits) {
(*has_bits)[0] |= 2u;
}
static void set_has_cache_writing_behavior(HasBits* has_bits) {
(*has_bits)[0] |= 32u;
}
static void set_has_model_token(HasBits* has_bits) {
(*has_bits)[0] |= 4u;
}
static void set_has_usage(HasBits* has_bits) {
(*has_bits)[0] |= 128u;
}
};
InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
}
InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu(const InferenceCalculatorOptions_Delegate_Gpu& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_has_bits_(from._has_bits_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
cached_kernel_path_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
cached_kernel_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_cached_kernel_path()) {
cached_kernel_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_cached_kernel_path(),
GetArenaForAllocation());
}
serialized_model_dir_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
serialized_model_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_serialized_model_dir()) {
serialized_model_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_serialized_model_dir(),
GetArenaForAllocation());
}
model_token_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_model_token()) {
model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_model_token(),
GetArenaForAllocation());
}
::memcpy(&use_advanced_gpu_api_, &from.use_advanced_gpu_api_,
static_cast<size_t>(reinterpret_cast<char*>(&usage_) -
reinterpret_cast<char*>(&use_advanced_gpu_api_)) + sizeof(usage_));
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
}
inline void InferenceCalculatorOptions_Delegate_Gpu::SharedCtor() {
cached_kernel_path_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
cached_kernel_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
serialized_model_dir_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
serialized_model_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
::memset(reinterpret_cast<char*>(this) + static_cast<size_t>(
reinterpret_cast<char*>(&use_advanced_gpu_api_) - reinterpret_cast<char*>(this)),
0, static_cast<size_t>(reinterpret_cast<char*>(&api_) -
reinterpret_cast<char*>(&use_advanced_gpu_api_)) + sizeof(api_));
cache_writing_behavior_ = 2;
allow_precision_loss_ = true;
usage_ = 2;
}
InferenceCalculatorOptions_Delegate_Gpu::~InferenceCalculatorOptions_Delegate_Gpu() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
cached_kernel_path_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
serialized_model_dir_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
model_token_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
void InferenceCalculatorOptions_Delegate_Gpu::ArenaDtor(void* object) {
InferenceCalculatorOptions_Delegate_Gpu* _this = reinterpret_cast< InferenceCalculatorOptions_Delegate_Gpu* >(object);
(void)_this;
}
void InferenceCalculatorOptions_Delegate_Gpu::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_Delegate_Gpu::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_Delegate_Gpu::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000007u) {
if (cached_has_bits & 0x00000001u) {
cached_kernel_path_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000002u) {
serialized_model_dir_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000004u) {
model_token_.ClearNonDefaultToEmpty();
}
}
if (cached_has_bits & 0x000000f8u) {
::memset(&use_advanced_gpu_api_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&api_) -
reinterpret_cast<char*>(&use_advanced_gpu_api_)) + sizeof(api_));
cache_writing_behavior_ = 2;
allow_precision_loss_ = true;
usage_ = 2;
}
_has_bits_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_Delegate_Gpu::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
_Internal::HasBits has_bits{};
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// optional bool use_advanced_gpu_api = 1 [default = false];
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 8)) {
_Internal::set_has_use_advanced_gpu_api(&has_bits);
use_advanced_gpu_api_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional string cached_kernel_path = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 18)) {
auto str = _internal_mutable_cached_kernel_path();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional bool allow_precision_loss = 3 [default = true];
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 24)) {
_Internal::set_has_allow_precision_loss(&has_bits);
allow_precision_loss_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api api = 4 [default = ANY];
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 32)) {
uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
if (PROTOBUF_PREDICT_TRUE(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api_IsValid(val))) {
_internal_set_api(static_cast<::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api>(val));
} else {
::PROTOBUF_NAMESPACE_ID::internal::WriteVarint(4, val, mutable_unknown_fields());
}
} else
goto handle_unusual;
continue;
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage usage = 5 [default = SUSTAINED_SPEED];
case 5:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 40)) {
uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
if (PROTOBUF_PREDICT_TRUE(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_IsValid(val))) {
_internal_set_usage(static_cast<::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage>(val));
} else {
::PROTOBUF_NAMESPACE_ID::internal::WriteVarint(5, val, mutable_unknown_fields());
}
} else
goto handle_unusual;
continue;
// optional string serialized_model_dir = 7;
case 7:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 58)) {
auto str = _internal_mutable_serialized_model_dir();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional string model_token = 8;
case 8:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 66)) {
auto str = _internal_mutable_model_token();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior cache_writing_behavior = 10 [default = WRITE_OR_ERROR];
case 10:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 80)) {
uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
if (PROTOBUF_PREDICT_TRUE(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_IsValid(val))) {
_internal_set_cache_writing_behavior(static_cast<::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior>(val));
} else {
::PROTOBUF_NAMESPACE_ID::internal::WriteVarint(10, val, mutable_unknown_fields());
}
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
_has_bits_.Or(has_bits);
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_Delegate_Gpu::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional bool use_advanced_gpu_api = 1 [default = false];
if (cached_has_bits & 0x00000008u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(1, this->_internal_use_advanced_gpu_api(), target);
}
// optional string cached_kernel_path = 2;
if (cached_has_bits & 0x00000001u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_cached_kernel_path().data(), static_cast<int>(this->_internal_cached_kernel_path().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path");
target = stream->WriteStringMaybeAliased(
2, this->_internal_cached_kernel_path(), target);
}
// optional bool allow_precision_loss = 3 [default = true];
if (cached_has_bits & 0x00000040u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_allow_precision_loss(), target);
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api api = 4 [default = ANY];
if (cached_has_bits & 0x00000010u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
4, this->_internal_api(), target);
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage usage = 5 [default = SUSTAINED_SPEED];
if (cached_has_bits & 0x00000080u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
5, this->_internal_usage(), target);
}
// optional string serialized_model_dir = 7;
if (cached_has_bits & 0x00000002u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_serialized_model_dir().data(), static_cast<int>(this->_internal_serialized_model_dir().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir");
target = stream->WriteStringMaybeAliased(
7, this->_internal_serialized_model_dir(), target);
}
// optional string model_token = 8;
if (cached_has_bits & 0x00000004u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_model_token().data(), static_cast<int>(this->_internal_model_token().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token");
target = stream->WriteStringMaybeAliased(
8, this->_internal_model_token(), target);
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior cache_writing_behavior = 10 [default = WRITE_OR_ERROR];
if (cached_has_bits & 0x00000020u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
10, this->_internal_cache_writing_behavior(), target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
return target;
}
size_t InferenceCalculatorOptions_Delegate_Gpu::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x000000ffu) {
// optional string cached_kernel_path = 2;
if (cached_has_bits & 0x00000001u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_cached_kernel_path());
}
// optional string serialized_model_dir = 7;
if (cached_has_bits & 0x00000002u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_serialized_model_dir());
}
// optional string model_token = 8;
if (cached_has_bits & 0x00000004u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_model_token());
}
// optional bool use_advanced_gpu_api = 1 [default = false];
if (cached_has_bits & 0x00000008u) {
total_size += 1 + 1;
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api api = 4 [default = ANY];
if (cached_has_bits & 0x00000010u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->_internal_api());
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior cache_writing_behavior = 10 [default = WRITE_OR_ERROR];
if (cached_has_bits & 0x00000020u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->_internal_cache_writing_behavior());
}
// optional bool allow_precision_loss = 3 [default = true];
if (cached_has_bits & 0x00000040u) {
total_size += 1 + 1;
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage usage = 5 [default = SUSTAINED_SPEED];
if (cached_has_bits & 0x00000080u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->_internal_usage());
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_Delegate_Gpu::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_Delegate_Gpu::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_Delegate_Gpu::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_Delegate_Gpu::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_Delegate_Gpu *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_Delegate_Gpu &>(from));
}
void InferenceCalculatorOptions_Delegate_Gpu::MergeFrom(const InferenceCalculatorOptions_Delegate_Gpu& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = from._has_bits_[0];
if (cached_has_bits & 0x000000ffu) {
if (cached_has_bits & 0x00000001u) {
_internal_set_cached_kernel_path(from._internal_cached_kernel_path());
}
if (cached_has_bits & 0x00000002u) {
_internal_set_serialized_model_dir(from._internal_serialized_model_dir());
}
if (cached_has_bits & 0x00000004u) {
_internal_set_model_token(from._internal_model_token());
}
if (cached_has_bits & 0x00000008u) {
use_advanced_gpu_api_ = from.use_advanced_gpu_api_;
}
if (cached_has_bits & 0x00000010u) {
api_ = from.api_;
}
if (cached_has_bits & 0x00000020u) {
cache_writing_behavior_ = from.cache_writing_behavior_;
}
if (cached_has_bits & 0x00000040u) {
allow_precision_loss_ = from.allow_precision_loss_;
}
if (cached_has_bits & 0x00000080u) {
usage_ = from.usage_;
}
_has_bits_[0] |= cached_has_bits;
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_Delegate_Gpu::CopyFrom(const InferenceCalculatorOptions_Delegate_Gpu& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_Delegate_Gpu::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_Delegate_Gpu::InternalSwap(InferenceCalculatorOptions_Delegate_Gpu* other) {
using std::swap;
auto* lhs_arena = GetArenaForAllocation();
auto* rhs_arena = other->GetArenaForAllocation();
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_has_bits_[0], other->_has_bits_[0]);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&cached_kernel_path_, lhs_arena,
&other->cached_kernel_path_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&serialized_model_dir_, lhs_arena,
&other->serialized_model_dir_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&model_token_, lhs_arena,
&other->model_token_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::memswap<
PROTOBUF_FIELD_OFFSET(InferenceCalculatorOptions_Delegate_Gpu, api_)
+ sizeof(InferenceCalculatorOptions_Delegate_Gpu::api_)
- PROTOBUF_FIELD_OFFSET(InferenceCalculatorOptions_Delegate_Gpu, use_advanced_gpu_api_)>(
reinterpret_cast<char*>(&use_advanced_gpu_api_),
reinterpret_cast<char*>(&other->use_advanced_gpu_api_));
swap(cache_writing_behavior_, other->cache_writing_behavior_);
swap(allow_precision_loss_, other->allow_precision_loss_);
swap(usage_, other->usage_);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_Delegate_Gpu::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[1]);
}
// ===================================================================
class InferenceCalculatorOptions_Delegate_Nnapi::_Internal {
public:
using HasBits = decltype(std::declval<InferenceCalculatorOptions_Delegate_Nnapi>()._has_bits_);
static void set_has_cache_dir(HasBits* has_bits) {
(*has_bits)[0] |= 1u;
}
static void set_has_model_token(HasBits* has_bits) {
(*has_bits)[0] |= 2u;
}
static void set_has_accelerator_name(HasBits* has_bits) {
(*has_bits)[0] |= 4u;
}
};
InferenceCalculatorOptions_Delegate_Nnapi::InferenceCalculatorOptions_Delegate_Nnapi(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
}
InferenceCalculatorOptions_Delegate_Nnapi::InferenceCalculatorOptions_Delegate_Nnapi(const InferenceCalculatorOptions_Delegate_Nnapi& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_has_bits_(from._has_bits_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
cache_dir_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
cache_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_cache_dir()) {
cache_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_cache_dir(),
GetArenaForAllocation());
}
model_token_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_model_token()) {
model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_model_token(),
GetArenaForAllocation());
}
accelerator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
accelerator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_accelerator_name()) {
accelerator_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_accelerator_name(),
GetArenaForAllocation());
}
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::SharedCtor() {
cache_dir_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
cache_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
accelerator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
accelerator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
}
InferenceCalculatorOptions_Delegate_Nnapi::~InferenceCalculatorOptions_Delegate_Nnapi() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
cache_dir_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
model_token_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
accelerator_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
void InferenceCalculatorOptions_Delegate_Nnapi::ArenaDtor(void* object) {
InferenceCalculatorOptions_Delegate_Nnapi* _this = reinterpret_cast< InferenceCalculatorOptions_Delegate_Nnapi* >(object);
(void)_this;
}
void InferenceCalculatorOptions_Delegate_Nnapi::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_Delegate_Nnapi::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_Delegate_Nnapi::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000007u) {
if (cached_has_bits & 0x00000001u) {
cache_dir_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000002u) {
model_token_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000004u) {
accelerator_name_.ClearNonDefaultToEmpty();
}
}
_has_bits_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_Delegate_Nnapi::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
_Internal::HasBits has_bits{};
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// optional string cache_dir = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
auto str = _internal_mutable_cache_dir();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional string model_token = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 18)) {
auto str = _internal_mutable_model_token();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional string accelerator_name = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 26)) {
auto str = _internal_mutable_accelerator_name();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
_has_bits_.Or(has_bits);
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_Delegate_Nnapi::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional string cache_dir = 1;
if (cached_has_bits & 0x00000001u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_cache_dir().data(), static_cast<int>(this->_internal_cache_dir().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir");
target = stream->WriteStringMaybeAliased(
1, this->_internal_cache_dir(), target);
}
// optional string model_token = 2;
if (cached_has_bits & 0x00000002u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_model_token().data(), static_cast<int>(this->_internal_model_token().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token");
target = stream->WriteStringMaybeAliased(
2, this->_internal_model_token(), target);
}
// optional string accelerator_name = 3;
if (cached_has_bits & 0x00000004u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_accelerator_name().data(), static_cast<int>(this->_internal_accelerator_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name");
target = stream->WriteStringMaybeAliased(
3, this->_internal_accelerator_name(), target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
return target;
}
size_t InferenceCalculatorOptions_Delegate_Nnapi::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000007u) {
// optional string cache_dir = 1;
if (cached_has_bits & 0x00000001u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_cache_dir());
}
// optional string model_token = 2;
if (cached_has_bits & 0x00000002u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_model_token());
}
// optional string accelerator_name = 3;
if (cached_has_bits & 0x00000004u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_accelerator_name());
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_Delegate_Nnapi::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_Delegate_Nnapi::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_Delegate_Nnapi::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_Delegate_Nnapi::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_Delegate_Nnapi *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_Delegate_Nnapi &>(from));
}
void InferenceCalculatorOptions_Delegate_Nnapi::MergeFrom(const InferenceCalculatorOptions_Delegate_Nnapi& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = from._has_bits_[0];
if (cached_has_bits & 0x00000007u) {
if (cached_has_bits & 0x00000001u) {
_internal_set_cache_dir(from._internal_cache_dir());
}
if (cached_has_bits & 0x00000002u) {
_internal_set_model_token(from._internal_model_token());
}
if (cached_has_bits & 0x00000004u) {
_internal_set_accelerator_name(from._internal_accelerator_name());
}
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_Delegate_Nnapi::CopyFrom(const InferenceCalculatorOptions_Delegate_Nnapi& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_Delegate_Nnapi::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_Delegate_Nnapi::InternalSwap(InferenceCalculatorOptions_Delegate_Nnapi* other) {
using std::swap;
auto* lhs_arena = GetArenaForAllocation();
auto* rhs_arena = other->GetArenaForAllocation();
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_has_bits_[0], other->_has_bits_[0]);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&cache_dir_, lhs_arena,
&other->cache_dir_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&model_token_, lhs_arena,
&other->model_token_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&accelerator_name_, lhs_arena,
&other->accelerator_name_, rhs_arena
);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_Delegate_Nnapi::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[2]);
}
// ===================================================================
class InferenceCalculatorOptions_Delegate_Xnnpack::_Internal {
public:
using HasBits = decltype(std::declval<InferenceCalculatorOptions_Delegate_Xnnpack>()._has_bits_);
static void set_has_num_threads(HasBits* has_bits) {
(*has_bits)[0] |= 1u;
}
};
InferenceCalculatorOptions_Delegate_Xnnpack::InferenceCalculatorOptions_Delegate_Xnnpack(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
}
InferenceCalculatorOptions_Delegate_Xnnpack::InferenceCalculatorOptions_Delegate_Xnnpack(const InferenceCalculatorOptions_Delegate_Xnnpack& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_has_bits_(from._has_bits_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
num_threads_ = from.num_threads_;
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
}
inline void InferenceCalculatorOptions_Delegate_Xnnpack::SharedCtor() {
num_threads_ = -1;
}
InferenceCalculatorOptions_Delegate_Xnnpack::~InferenceCalculatorOptions_Delegate_Xnnpack() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_Delegate_Xnnpack::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
}
void InferenceCalculatorOptions_Delegate_Xnnpack::ArenaDtor(void* object) {
InferenceCalculatorOptions_Delegate_Xnnpack* _this = reinterpret_cast< InferenceCalculatorOptions_Delegate_Xnnpack* >(object);
(void)_this;
}
void InferenceCalculatorOptions_Delegate_Xnnpack::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_Delegate_Xnnpack::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_Delegate_Xnnpack::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
num_threads_ = -1;
_has_bits_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_Delegate_Xnnpack::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
_Internal::HasBits has_bits{};
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// optional int32 num_threads = 1 [default = -1];
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 8)) {
_Internal::set_has_num_threads(&has_bits);
num_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
_has_bits_.Or(has_bits);
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_Delegate_Xnnpack::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional int32 num_threads = 1 [default = -1];
if (cached_has_bits & 0x00000001u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(1, this->_internal_num_threads(), target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
return target;
}
size_t InferenceCalculatorOptions_Delegate_Xnnpack::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// optional int32 num_threads = 1 [default = -1];
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000001u) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32SizePlusOne(this->_internal_num_threads());
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_Delegate_Xnnpack::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_Delegate_Xnnpack::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_Delegate_Xnnpack::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_Delegate_Xnnpack::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_Delegate_Xnnpack *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_Delegate_Xnnpack &>(from));
}
void InferenceCalculatorOptions_Delegate_Xnnpack::MergeFrom(const InferenceCalculatorOptions_Delegate_Xnnpack& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
if (from._internal_has_num_threads()) {
_internal_set_num_threads(from._internal_num_threads());
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_Delegate_Xnnpack::CopyFrom(const InferenceCalculatorOptions_Delegate_Xnnpack& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_Delegate_Xnnpack::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_Delegate_Xnnpack::InternalSwap(InferenceCalculatorOptions_Delegate_Xnnpack* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_has_bits_[0], other->_has_bits_[0]);
swap(num_threads_, other->num_threads_);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_Delegate_Xnnpack::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[3]);
}
// ===================================================================
class InferenceCalculatorOptions_Delegate::_Internal {
public:
static const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite& tflite(const InferenceCalculatorOptions_Delegate* msg);
static const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu& gpu(const InferenceCalculatorOptions_Delegate* msg);
static const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi& nnapi(const InferenceCalculatorOptions_Delegate* msg);
static const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack& xnnpack(const InferenceCalculatorOptions_Delegate* msg);
};
const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite&
InferenceCalculatorOptions_Delegate::_Internal::tflite(const InferenceCalculatorOptions_Delegate* msg) {
return *msg->delegate_.tflite_;
}
const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu&
InferenceCalculatorOptions_Delegate::_Internal::gpu(const InferenceCalculatorOptions_Delegate* msg) {
return *msg->delegate_.gpu_;
}
const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi&
InferenceCalculatorOptions_Delegate::_Internal::nnapi(const InferenceCalculatorOptions_Delegate* msg) {
return *msg->delegate_.nnapi_;
}
const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack&
InferenceCalculatorOptions_Delegate::_Internal::xnnpack(const InferenceCalculatorOptions_Delegate* msg) {
return *msg->delegate_.xnnpack_;
}
void InferenceCalculatorOptions_Delegate::set_allocated_tflite(::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* tflite) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_delegate();
if (tflite) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_Delegate_TfLite>::GetOwningArena(tflite);
if (message_arena != submessage_arena) {
tflite = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, tflite, submessage_arena);
}
set_has_tflite();
delegate_.tflite_ = tflite;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
}
void InferenceCalculatorOptions_Delegate::set_allocated_gpu(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* gpu) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_delegate();
if (gpu) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_Delegate_Gpu>::GetOwningArena(gpu);
if (message_arena != submessage_arena) {
gpu = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, gpu, submessage_arena);
}
set_has_gpu();
delegate_.gpu_ = gpu;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
}
void InferenceCalculatorOptions_Delegate::set_allocated_nnapi(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* nnapi) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_delegate();
if (nnapi) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi>::GetOwningArena(nnapi);
if (message_arena != submessage_arena) {
nnapi = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, nnapi, submessage_arena);
}
set_has_nnapi();
delegate_.nnapi_ = nnapi;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
}
void InferenceCalculatorOptions_Delegate::set_allocated_xnnpack(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* xnnpack) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_delegate();
if (xnnpack) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack>::GetOwningArena(xnnpack);
if (message_arena != submessage_arena) {
xnnpack = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, xnnpack, submessage_arena);
}
set_has_xnnpack();
delegate_.xnnpack_ = xnnpack;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
}
InferenceCalculatorOptions_Delegate::InferenceCalculatorOptions_Delegate(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.Delegate)
}
InferenceCalculatorOptions_Delegate::InferenceCalculatorOptions_Delegate(const InferenceCalculatorOptions_Delegate& from)
: ::PROTOBUF_NAMESPACE_ID::Message() {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
clear_has_delegate();
switch (from.delegate_case()) {
case kTflite: {
_internal_mutable_tflite()->::mediapipe::InferenceCalculatorOptions_Delegate_TfLite::MergeFrom(from._internal_tflite());
break;
}
case kGpu: {
_internal_mutable_gpu()->::mediapipe::InferenceCalculatorOptions_Delegate_Gpu::MergeFrom(from._internal_gpu());
break;
}
case kNnapi: {
_internal_mutable_nnapi()->::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi::MergeFrom(from._internal_nnapi());
break;
}
case kXnnpack: {
_internal_mutable_xnnpack()->::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack::MergeFrom(from._internal_xnnpack());
break;
}
case DELEGATE_NOT_SET: {
break;
}
}
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.Delegate)
}
inline void InferenceCalculatorOptions_Delegate::SharedCtor() {
clear_has_delegate();
}
InferenceCalculatorOptions_Delegate::~InferenceCalculatorOptions_Delegate() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.Delegate)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_Delegate::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
if (has_delegate()) {
clear_delegate();
}
}
void InferenceCalculatorOptions_Delegate::ArenaDtor(void* object) {
InferenceCalculatorOptions_Delegate* _this = reinterpret_cast< InferenceCalculatorOptions_Delegate* >(object);
(void)_this;
}
void InferenceCalculatorOptions_Delegate::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_Delegate::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_Delegate::clear_delegate() {
// @@protoc_insertion_point(one_of_clear_start:mediapipe.InferenceCalculatorOptions.Delegate)
switch (delegate_case()) {
case kTflite: {
if (GetArenaForAllocation() == nullptr) {
delete delegate_.tflite_;
}
break;
}
case kGpu: {
if (GetArenaForAllocation() == nullptr) {
delete delegate_.gpu_;
}
break;
}
case kNnapi: {
if (GetArenaForAllocation() == nullptr) {
delete delegate_.nnapi_;
}
break;
}
case kXnnpack: {
if (GetArenaForAllocation() == nullptr) {
delete delegate_.xnnpack_;
}
break;
}
case DELEGATE_NOT_SET: {
break;
}
}
_oneof_case_[0] = DELEGATE_NOT_SET;
}
void InferenceCalculatorOptions_Delegate::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.Delegate)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
clear_delegate();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_Delegate::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// .mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
ptr = ctx->ParseMessage(_internal_mutable_tflite(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 18)) {
ptr = ctx->ParseMessage(_internal_mutable_gpu(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 26)) {
ptr = ctx->ParseMessage(_internal_mutable_nnapi(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 4;
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 34)) {
ptr = ctx->ParseMessage(_internal_mutable_xnnpack(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_Delegate::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.Delegate)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
switch (delegate_case()) {
case kTflite: {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
1, _Internal::tflite(this), target, stream);
break;
}
case kGpu: {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
2, _Internal::gpu(this), target, stream);
break;
}
case kNnapi: {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
3, _Internal::nnapi(this), target, stream);
break;
}
case kXnnpack: {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
4, _Internal::xnnpack(this), target, stream);
break;
}
default: ;
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.Delegate)
return target;
}
size_t InferenceCalculatorOptions_Delegate::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.Delegate)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
switch (delegate_case()) {
// .mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 1;
case kTflite: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*delegate_.tflite_);
break;
}
// .mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
case kGpu: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*delegate_.gpu_);
break;
}
// .mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 3;
case kNnapi: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*delegate_.nnapi_);
break;
}
// .mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 4;
case kXnnpack: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*delegate_.xnnpack_);
break;
}
case DELEGATE_NOT_SET: {
break;
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_Delegate::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_Delegate::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_Delegate::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_Delegate::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_Delegate *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_Delegate &>(from));
}
void InferenceCalculatorOptions_Delegate::MergeFrom(const InferenceCalculatorOptions_Delegate& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.Delegate)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
switch (from.delegate_case()) {
case kTflite: {
_internal_mutable_tflite()->::mediapipe::InferenceCalculatorOptions_Delegate_TfLite::MergeFrom(from._internal_tflite());
break;
}
case kGpu: {
_internal_mutable_gpu()->::mediapipe::InferenceCalculatorOptions_Delegate_Gpu::MergeFrom(from._internal_gpu());
break;
}
case kNnapi: {
_internal_mutable_nnapi()->::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi::MergeFrom(from._internal_nnapi());
break;
}
case kXnnpack: {
_internal_mutable_xnnpack()->::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack::MergeFrom(from._internal_xnnpack());
break;
}
case DELEGATE_NOT_SET: {
break;
}
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_Delegate::CopyFrom(const InferenceCalculatorOptions_Delegate& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.Delegate)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_Delegate::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_Delegate::InternalSwap(InferenceCalculatorOptions_Delegate* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(delegate_, other->delegate_);
swap(_oneof_case_[0], other->_oneof_case_[0]);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_Delegate::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[4]);
}
// ===================================================================
class InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_Internal {
public:
};
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned),
model_tensor_indices_(arena) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
}
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
model_tensor_indices_(from.model_tensor_indices_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::SharedCtor() {
}
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::~InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::ArenaDtor(void* object) {
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* _this = reinterpret_cast< InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* >(object);
(void)_this;
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
model_tensor_indices_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// repeated int32 model_tensor_indices = 1 [packed = true];
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_model_tensor_indices(), ptr, ctx);
CHK_(ptr);
} else if (static_cast<uint8_t>(tag) == 8) {
_internal_add_model_tensor_indices(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
// repeated int32 model_tensor_indices = 1 [packed = true];
{
int byte_size = _model_tensor_indices_cached_byte_size_.load(std::memory_order_relaxed);
if (byte_size > 0) {
target = stream->WriteInt32Packed(
1, _internal_model_tensor_indices(), byte_size, target);
}
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
return target;
}
size_t InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated int32 model_tensor_indices = 1 [packed = true];
{
size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
Int32Size(this->model_tensor_indices_);
if (data_size > 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
static_cast<int32_t>(data_size));
}
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
_model_tensor_indices_cached_byte_size_.store(cached_size,
std::memory_order_relaxed);
total_size += data_size;
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap &>(from));
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
model_tensor_indices_.MergeFrom(from.model_tensor_indices_);
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::InternalSwap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
model_tensor_indices_.InternalSwap(&other->model_tensor_indices_);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[5]);
}
// ===================================================================
class InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_Internal {
public:
};
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned),
tensor_names_(arena) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
}
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
tensor_names_(from.tensor_names_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::SharedCtor() {
}
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::~InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::ArenaDtor(void* object) {
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* _this = reinterpret_cast< InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* >(object);
(void)_this;
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
tensor_names_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// repeated string tensor_names = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
ptr -= 1;
do {
ptr += 1;
auto str = _internal_add_tensor_names();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names");
#endif // !NDEBUG
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
// repeated string tensor_names = 1;
for (int i = 0, n = this->_internal_tensor_names_size(); i < n; i++) {
const auto& s = this->_internal_tensor_names(i);
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
s.data(), static_cast<int>(s.length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names");
target = stream->WriteString(1, s, target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
return target;
}
size_t InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated string tensor_names = 1;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(tensor_names_.size());
for (int i = 0, n = tensor_names_.size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
tensor_names_.Get(i));
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap &>(from));
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
tensor_names_.MergeFrom(from.tensor_names_);
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::InternalSwap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
tensor_names_.InternalSwap(&other->tensor_names_);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[6]);
}
// ===================================================================
class InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_Internal {
public:
using HasBits = decltype(std::declval<InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink>()._has_bits_);
static void set_has_from_output_tensor_name(HasBits* has_bits) {
(*has_bits)[0] |= 1u;
}
static void set_has_to_input_tensor_name(HasBits* has_bits) {
(*has_bits)[0] |= 2u;
}
};
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
}
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_has_bits_(from._has_bits_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
from_output_tensor_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
from_output_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_from_output_tensor_name()) {
from_output_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_from_output_tensor_name(),
GetArenaForAllocation());
}
to_input_tensor_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
to_input_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_to_input_tensor_name()) {
to_input_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_to_input_tensor_name(),
GetArenaForAllocation());
}
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::SharedCtor() {
from_output_tensor_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
from_output_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
to_input_tensor_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
to_input_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
}
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::~InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
from_output_tensor_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
to_input_tensor_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::ArenaDtor(void* object) {
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* _this = reinterpret_cast< InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* >(object);
(void)_this;
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000003u) {
if (cached_has_bits & 0x00000001u) {
from_output_tensor_name_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000002u) {
to_input_tensor_name_.ClearNonDefaultToEmpty();
}
}
_has_bits_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
_Internal::HasBits has_bits{};
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// optional string from_output_tensor_name = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
auto str = _internal_mutable_from_output_tensor_name();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional string to_input_tensor_name = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 18)) {
auto str = _internal_mutable_to_input_tensor_name();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
_has_bits_.Or(has_bits);
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional string from_output_tensor_name = 1;
if (cached_has_bits & 0x00000001u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_from_output_tensor_name().data(), static_cast<int>(this->_internal_from_output_tensor_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name");
target = stream->WriteStringMaybeAliased(
1, this->_internal_from_output_tensor_name(), target);
}
// optional string to_input_tensor_name = 2;
if (cached_has_bits & 0x00000002u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_to_input_tensor_name().data(), static_cast<int>(this->_internal_to_input_tensor_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name");
target = stream->WriteStringMaybeAliased(
2, this->_internal_to_input_tensor_name(), target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
return target;
}
size_t InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000003u) {
// optional string from_output_tensor_name = 1;
if (cached_has_bits & 0x00000001u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_from_output_tensor_name());
}
// optional string to_input_tensor_name = 2;
if (cached_has_bits & 0x00000002u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_to_input_tensor_name());
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink &>(from));
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = from._has_bits_[0];
if (cached_has_bits & 0x00000003u) {
if (cached_has_bits & 0x00000001u) {
_internal_set_from_output_tensor_name(from._internal_from_output_tensor_name());
}
if (cached_has_bits & 0x00000002u) {
_internal_set_to_input_tensor_name(from._internal_to_input_tensor_name());
}
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::InternalSwap(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* other) {
using std::swap;
auto* lhs_arena = GetArenaForAllocation();
auto* rhs_arena = other->GetArenaForAllocation();
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_has_bits_[0], other->_has_bits_[0]);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&from_output_tensor_name_, lhs_arena,
&other->from_output_tensor_name_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&to_input_tensor_name_, lhs_arena,
&other->to_input_tensor_name_, rhs_arena
);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[7]);
}
// ===================================================================
class InferenceCalculatorOptions_InputOutputConfig::_Internal {
public:
static const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& input_tensor_indices_map(const InferenceCalculatorOptions_InputOutputConfig* msg);
static const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& input_tensor_names_map(const InferenceCalculatorOptions_InputOutputConfig* msg);
static const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& output_tensor_indices_map(const InferenceCalculatorOptions_InputOutputConfig* msg);
static const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& output_tensor_names_map(const InferenceCalculatorOptions_InputOutputConfig* msg);
};
const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&
InferenceCalculatorOptions_InputOutputConfig::_Internal::input_tensor_indices_map(const InferenceCalculatorOptions_InputOutputConfig* msg) {
return *msg->InputTensorMap_.input_tensor_indices_map_;
}
const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&
InferenceCalculatorOptions_InputOutputConfig::_Internal::input_tensor_names_map(const InferenceCalculatorOptions_InputOutputConfig* msg) {
return *msg->InputTensorMap_.input_tensor_names_map_;
}
const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&
InferenceCalculatorOptions_InputOutputConfig::_Internal::output_tensor_indices_map(const InferenceCalculatorOptions_InputOutputConfig* msg) {
return *msg->OutputTensorMap_.output_tensor_indices_map_;
}
const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&
InferenceCalculatorOptions_InputOutputConfig::_Internal::output_tensor_names_map(const InferenceCalculatorOptions_InputOutputConfig* msg) {
return *msg->OutputTensorMap_.output_tensor_names_map_;
}
void InferenceCalculatorOptions_InputOutputConfig::set_allocated_input_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* input_tensor_indices_map) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_InputTensorMap();
if (input_tensor_indices_map) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap>::GetOwningArena(input_tensor_indices_map);
if (message_arena != submessage_arena) {
input_tensor_indices_map = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, input_tensor_indices_map, submessage_arena);
}
set_has_input_tensor_indices_map();
InputTensorMap_.input_tensor_indices_map_ = input_tensor_indices_map;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
}
void InferenceCalculatorOptions_InputOutputConfig::set_allocated_input_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* input_tensor_names_map) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_InputTensorMap();
if (input_tensor_names_map) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap>::GetOwningArena(input_tensor_names_map);
if (message_arena != submessage_arena) {
input_tensor_names_map = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, input_tensor_names_map, submessage_arena);
}
set_has_input_tensor_names_map();
InputTensorMap_.input_tensor_names_map_ = input_tensor_names_map;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
}
void InferenceCalculatorOptions_InputOutputConfig::set_allocated_output_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* output_tensor_indices_map) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_OutputTensorMap();
if (output_tensor_indices_map) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap>::GetOwningArena(output_tensor_indices_map);
if (message_arena != submessage_arena) {
output_tensor_indices_map = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, output_tensor_indices_map, submessage_arena);
}
set_has_output_tensor_indices_map();
OutputTensorMap_.output_tensor_indices_map_ = output_tensor_indices_map;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
}
void InferenceCalculatorOptions_InputOutputConfig::set_allocated_output_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* output_tensor_names_map) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
clear_OutputTensorMap();
if (output_tensor_names_map) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap>::GetOwningArena(output_tensor_names_map);
if (message_arena != submessage_arena) {
output_tensor_names_map = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, output_tensor_names_map, submessage_arena);
}
set_has_output_tensor_names_map();
OutputTensorMap_.output_tensor_names_map_ = output_tensor_names_map;
}
// @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
}
InferenceCalculatorOptions_InputOutputConfig::InferenceCalculatorOptions_InputOutputConfig(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned),
feedback_tensor_links_(arena) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
}
InferenceCalculatorOptions_InputOutputConfig::InferenceCalculatorOptions_InputOutputConfig(const InferenceCalculatorOptions_InputOutputConfig& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
feedback_tensor_links_(from.feedback_tensor_links_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
clear_has_InputTensorMap();
switch (from.InputTensorMap_case()) {
case kInputTensorIndicesMap: {
_internal_mutable_input_tensor_indices_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeFrom(from._internal_input_tensor_indices_map());
break;
}
case kInputTensorNamesMap: {
_internal_mutable_input_tensor_names_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeFrom(from._internal_input_tensor_names_map());
break;
}
case INPUTTENSORMAP_NOT_SET: {
break;
}
}
clear_has_OutputTensorMap();
switch (from.OutputTensorMap_case()) {
case kOutputTensorIndicesMap: {
_internal_mutable_output_tensor_indices_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeFrom(from._internal_output_tensor_indices_map());
break;
}
case kOutputTensorNamesMap: {
_internal_mutable_output_tensor_names_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeFrom(from._internal_output_tensor_names_map());
break;
}
case OUTPUTTENSORMAP_NOT_SET: {
break;
}
}
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
}
inline void InferenceCalculatorOptions_InputOutputConfig::SharedCtor() {
clear_has_InputTensorMap();
clear_has_OutputTensorMap();
}
InferenceCalculatorOptions_InputOutputConfig::~InferenceCalculatorOptions_InputOutputConfig() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions_InputOutputConfig::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
if (has_InputTensorMap()) {
clear_InputTensorMap();
}
if (has_OutputTensorMap()) {
clear_OutputTensorMap();
}
}
void InferenceCalculatorOptions_InputOutputConfig::ArenaDtor(void* object) {
InferenceCalculatorOptions_InputOutputConfig* _this = reinterpret_cast< InferenceCalculatorOptions_InputOutputConfig* >(object);
(void)_this;
}
void InferenceCalculatorOptions_InputOutputConfig::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions_InputOutputConfig::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions_InputOutputConfig::clear_InputTensorMap() {
// @@protoc_insertion_point(one_of_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
switch (InputTensorMap_case()) {
case kInputTensorIndicesMap: {
if (GetArenaForAllocation() == nullptr) {
delete InputTensorMap_.input_tensor_indices_map_;
}
break;
}
case kInputTensorNamesMap: {
if (GetArenaForAllocation() == nullptr) {
delete InputTensorMap_.input_tensor_names_map_;
}
break;
}
case INPUTTENSORMAP_NOT_SET: {
break;
}
}
_oneof_case_[0] = INPUTTENSORMAP_NOT_SET;
}
void InferenceCalculatorOptions_InputOutputConfig::clear_OutputTensorMap() {
// @@protoc_insertion_point(one_of_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
switch (OutputTensorMap_case()) {
case kOutputTensorIndicesMap: {
if (GetArenaForAllocation() == nullptr) {
delete OutputTensorMap_.output_tensor_indices_map_;
}
break;
}
case kOutputTensorNamesMap: {
if (GetArenaForAllocation() == nullptr) {
delete OutputTensorMap_.output_tensor_names_map_;
}
break;
}
case OUTPUTTENSORMAP_NOT_SET: {
break;
}
}
_oneof_case_[1] = OUTPUTTENSORMAP_NOT_SET;
}
void InferenceCalculatorOptions_InputOutputConfig::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
feedback_tensor_links_.Clear();
clear_InputTensorMap();
clear_OutputTensorMap();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions_InputOutputConfig::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap input_tensor_indices_map = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
ptr = ctx->ParseMessage(_internal_mutable_input_tensor_indices_map(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap output_tensor_indices_map = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 18)) {
ptr = ctx->ParseMessage(_internal_mutable_output_tensor_indices_map(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap input_tensor_names_map = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 26)) {
ptr = ctx->ParseMessage(_internal_mutable_input_tensor_names_map(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap output_tensor_names_map = 4;
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 34)) {
ptr = ctx->ParseMessage(_internal_mutable_output_tensor_names_map(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// repeated .mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink feedback_tensor_links = 5;
case 5:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 42)) {
ptr -= 1;
do {
ptr += 1;
ptr = ctx->ParseMessage(_internal_add_feedback_tensor_links(), ptr);
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<42>(ptr));
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions_InputOutputConfig::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap input_tensor_indices_map = 1;
if (_internal_has_input_tensor_indices_map()) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
1, _Internal::input_tensor_indices_map(this), target, stream);
}
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap output_tensor_indices_map = 2;
if (_internal_has_output_tensor_indices_map()) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
2, _Internal::output_tensor_indices_map(this), target, stream);
}
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap input_tensor_names_map = 3;
if (_internal_has_input_tensor_names_map()) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
3, _Internal::input_tensor_names_map(this), target, stream);
}
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap output_tensor_names_map = 4;
if (_internal_has_output_tensor_names_map()) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
4, _Internal::output_tensor_names_map(this), target, stream);
}
// repeated .mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink feedback_tensor_links = 5;
for (unsigned int i = 0,
n = static_cast<unsigned int>(this->_internal_feedback_tensor_links_size()); i < n; i++) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(5, this->_internal_feedback_tensor_links(i), target, stream);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
return target;
}
size_t InferenceCalculatorOptions_InputOutputConfig::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated .mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink feedback_tensor_links = 5;
total_size += 1UL * this->_internal_feedback_tensor_links_size();
for (const auto& msg : this->feedback_tensor_links_) {
total_size +=
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
}
switch (InputTensorMap_case()) {
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap input_tensor_indices_map = 1;
case kInputTensorIndicesMap: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*InputTensorMap_.input_tensor_indices_map_);
break;
}
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap input_tensor_names_map = 3;
case kInputTensorNamesMap: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*InputTensorMap_.input_tensor_names_map_);
break;
}
case INPUTTENSORMAP_NOT_SET: {
break;
}
}
switch (OutputTensorMap_case()) {
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap output_tensor_indices_map = 2;
case kOutputTensorIndicesMap: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*OutputTensorMap_.output_tensor_indices_map_);
break;
}
// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap output_tensor_names_map = 4;
case kOutputTensorNamesMap: {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*OutputTensorMap_.output_tensor_names_map_);
break;
}
case OUTPUTTENSORMAP_NOT_SET: {
break;
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions_InputOutputConfig::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions_InputOutputConfig::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions_InputOutputConfig::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions_InputOutputConfig::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions_InputOutputConfig *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions_InputOutputConfig &>(from));
}
void InferenceCalculatorOptions_InputOutputConfig::MergeFrom(const InferenceCalculatorOptions_InputOutputConfig& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
feedback_tensor_links_.MergeFrom(from.feedback_tensor_links_);
switch (from.InputTensorMap_case()) {
case kInputTensorIndicesMap: {
_internal_mutable_input_tensor_indices_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeFrom(from._internal_input_tensor_indices_map());
break;
}
case kInputTensorNamesMap: {
_internal_mutable_input_tensor_names_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeFrom(from._internal_input_tensor_names_map());
break;
}
case INPUTTENSORMAP_NOT_SET: {
break;
}
}
switch (from.OutputTensorMap_case()) {
case kOutputTensorIndicesMap: {
_internal_mutable_output_tensor_indices_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::MergeFrom(from._internal_output_tensor_indices_map());
break;
}
case kOutputTensorNamesMap: {
_internal_mutable_output_tensor_names_map()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::MergeFrom(from._internal_output_tensor_names_map());
break;
}
case OUTPUTTENSORMAP_NOT_SET: {
break;
}
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions_InputOutputConfig::CopyFrom(const InferenceCalculatorOptions_InputOutputConfig& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions_InputOutputConfig::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions_InputOutputConfig::InternalSwap(InferenceCalculatorOptions_InputOutputConfig* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
feedback_tensor_links_.InternalSwap(&other->feedback_tensor_links_);
swap(InputTensorMap_, other->InputTensorMap_);
swap(OutputTensorMap_, other->OutputTensorMap_);
swap(_oneof_case_[0], other->_oneof_case_[0]);
swap(_oneof_case_[1], other->_oneof_case_[1]);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions_InputOutputConfig::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[8]);
}
// ===================================================================
class InferenceCalculatorOptions::_Internal {
public:
using HasBits = decltype(std::declval<InferenceCalculatorOptions>()._has_bits_);
static void set_has_model_path(HasBits* has_bits) {
(*has_bits)[0] |= 1u;
}
static void set_has_try_mmap_model(HasBits* has_bits) {
(*has_bits)[0] |= 8u;
}
static void set_has_use_gpu(HasBits* has_bits) {
(*has_bits)[0] |= 16u;
}
static void set_has_use_nnapi(HasBits* has_bits) {
(*has_bits)[0] |= 32u;
}
static void set_has_cpu_num_thread(HasBits* has_bits) {
(*has_bits)[0] |= 64u;
}
static const ::mediapipe::InferenceCalculatorOptions_Delegate& delegate(const InferenceCalculatorOptions* msg);
static void set_has_delegate(HasBits* has_bits) {
(*has_bits)[0] |= 2u;
}
static const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig& input_output_config(const InferenceCalculatorOptions* msg);
static void set_has_input_output_config(HasBits* has_bits) {
(*has_bits)[0] |= 4u;
}
};
const ::mediapipe::InferenceCalculatorOptions_Delegate&
InferenceCalculatorOptions::_Internal::delegate(const InferenceCalculatorOptions* msg) {
return *msg->delegate_;
}
const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig&
InferenceCalculatorOptions::_Internal::input_output_config(const InferenceCalculatorOptions* msg) {
return *msg->input_output_config_;
}
InferenceCalculatorOptions::InferenceCalculatorOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor();
if (!is_message_owned) {
RegisterArenaDtor(arena);
}
// @@protoc_insertion_point(arena_constructor:mediapipe.InferenceCalculatorOptions)
}
InferenceCalculatorOptions::InferenceCalculatorOptions(const InferenceCalculatorOptions& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_has_bits_(from._has_bits_) {
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
model_path_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
if (from._internal_has_model_path()) {
model_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_model_path(),
GetArenaForAllocation());
}
if (from._internal_has_delegate()) {
delegate_ = new ::mediapipe::InferenceCalculatorOptions_Delegate(*from.delegate_);
} else {
delegate_ = nullptr;
}
if (from._internal_has_input_output_config()) {
input_output_config_ = new ::mediapipe::InferenceCalculatorOptions_InputOutputConfig(*from.input_output_config_);
} else {
input_output_config_ = nullptr;
}
::memcpy(&try_mmap_model_, &from.try_mmap_model_,
static_cast<size_t>(reinterpret_cast<char*>(&cpu_num_thread_) -
reinterpret_cast<char*>(&try_mmap_model_)) + sizeof(cpu_num_thread_));
// @@protoc_insertion_point(copy_constructor:mediapipe.InferenceCalculatorOptions)
}
inline void InferenceCalculatorOptions::SharedCtor() {
model_path_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
model_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
::memset(reinterpret_cast<char*>(this) + static_cast<size_t>(
reinterpret_cast<char*>(&delegate_) - reinterpret_cast<char*>(this)),
0, static_cast<size_t>(reinterpret_cast<char*>(&use_nnapi_) -
reinterpret_cast<char*>(&delegate_)) + sizeof(use_nnapi_));
cpu_num_thread_ = -1;
}
InferenceCalculatorOptions::~InferenceCalculatorOptions() {
// @@protoc_insertion_point(destructor:mediapipe.InferenceCalculatorOptions)
if (GetArenaForAllocation() != nullptr) return;
SharedDtor();
_internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
inline void InferenceCalculatorOptions::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
model_path_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
if (this != internal_default_instance()) delete delegate_;
if (this != internal_default_instance()) delete input_output_config_;
}
void InferenceCalculatorOptions::ArenaDtor(void* object) {
InferenceCalculatorOptions* _this = reinterpret_cast< InferenceCalculatorOptions* >(object);
(void)_this;
}
void InferenceCalculatorOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
}
void InferenceCalculatorOptions::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
void InferenceCalculatorOptions::Clear() {
// @@protoc_insertion_point(message_clear_start:mediapipe.InferenceCalculatorOptions)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x00000007u) {
if (cached_has_bits & 0x00000001u) {
model_path_.ClearNonDefaultToEmpty();
}
if (cached_has_bits & 0x00000002u) {
GOOGLE_DCHECK(delegate_ != nullptr);
delegate_->Clear();
}
if (cached_has_bits & 0x00000004u) {
GOOGLE_DCHECK(input_output_config_ != nullptr);
input_output_config_->Clear();
}
}
::memset(&try_mmap_model_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&use_nnapi_) -
reinterpret_cast<char*>(&try_mmap_model_)) + sizeof(use_nnapi_));
cpu_num_thread_ = -1;
_has_bits_.Clear();
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* InferenceCalculatorOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
_Internal::HasBits has_bits{};
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
switch (tag >> 3) {
// optional string model_path = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
auto str = _internal_mutable_model_path();
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
#ifndef NDEBUG
::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "mediapipe.InferenceCalculatorOptions.model_path");
#endif // !NDEBUG
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional bool use_gpu = 2 [default = false, deprecated = true];
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 16)) {
_Internal::set_has_use_gpu(&has_bits);
use_gpu_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional bool use_nnapi = 3 [default = false, deprecated = true];
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 24)) {
_Internal::set_has_use_nnapi(&has_bits);
use_nnapi_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional int32 cpu_num_thread = 4 [default = -1];
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 32)) {
_Internal::set_has_cpu_num_thread(&has_bits);
cpu_num_thread_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional .mediapipe.InferenceCalculatorOptions.Delegate delegate = 5;
case 5:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 42)) {
ptr = ctx->ParseMessage(_internal_mutable_delegate(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional bool try_mmap_model = 7;
case 7:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 56)) {
_Internal::set_has_try_mmap_model(&has_bits);
try_mmap_model_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
// optional .mediapipe.InferenceCalculatorOptions.InputOutputConfig input_output_config = 8;
case 8:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 66)) {
ptr = ctx->ParseMessage(_internal_mutable_input_output_config(), ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
_has_bits_.Or(has_bits);
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* InferenceCalculatorOptions::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:mediapipe.InferenceCalculatorOptions)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional string model_path = 1;
if (cached_has_bits & 0x00000001u) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
this->_internal_model_path().data(), static_cast<int>(this->_internal_model_path().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
"mediapipe.InferenceCalculatorOptions.model_path");
target = stream->WriteStringMaybeAliased(
1, this->_internal_model_path(), target);
}
// optional bool use_gpu = 2 [default = false, deprecated = true];
if (cached_has_bits & 0x00000010u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->_internal_use_gpu(), target);
}
// optional bool use_nnapi = 3 [default = false, deprecated = true];
if (cached_has_bits & 0x00000020u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_use_nnapi(), target);
}
// optional int32 cpu_num_thread = 4 [default = -1];
if (cached_has_bits & 0x00000040u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(4, this->_internal_cpu_num_thread(), target);
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate delegate = 5;
if (cached_has_bits & 0x00000002u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
5, _Internal::delegate(this), target, stream);
}
// optional bool try_mmap_model = 7;
if (cached_has_bits & 0x00000008u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->_internal_try_mmap_model(), target);
}
// optional .mediapipe.InferenceCalculatorOptions.InputOutputConfig input_output_config = 8;
if (cached_has_bits & 0x00000004u) {
target = stream->EnsureSpace(target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessage(
8, _Internal::input_output_config(this), target, stream);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:mediapipe.InferenceCalculatorOptions)
return target;
}
size_t InferenceCalculatorOptions::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:mediapipe.InferenceCalculatorOptions)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
if (cached_has_bits & 0x0000007fu) {
// optional string model_path = 1;
if (cached_has_bits & 0x00000001u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->_internal_model_path());
}
// optional .mediapipe.InferenceCalculatorOptions.Delegate delegate = 5;
if (cached_has_bits & 0x00000002u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*delegate_);
}
// optional .mediapipe.InferenceCalculatorOptions.InputOutputConfig input_output_config = 8;
if (cached_has_bits & 0x00000004u) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*input_output_config_);
}
// optional bool try_mmap_model = 7;
if (cached_has_bits & 0x00000008u) {
total_size += 1 + 1;
}
// optional bool use_gpu = 2 [default = false, deprecated = true];
if (cached_has_bits & 0x00000010u) {
total_size += 1 + 1;
}
// optional bool use_nnapi = 3 [default = false, deprecated = true];
if (cached_has_bits & 0x00000020u) {
total_size += 1 + 1;
}
// optional int32 cpu_num_thread = 4 [default = -1];
if (cached_has_bits & 0x00000040u) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32SizePlusOne(this->_internal_cpu_num_thread());
}
}
return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData InferenceCalculatorOptions::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck,
InferenceCalculatorOptions::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*InferenceCalculatorOptions::GetClassData() const { return &_class_data_; }
void InferenceCalculatorOptions::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to,
const ::PROTOBUF_NAMESPACE_ID::Message& from) {
static_cast<InferenceCalculatorOptions *>(to)->MergeFrom(
static_cast<const InferenceCalculatorOptions &>(from));
}
void InferenceCalculatorOptions::MergeFrom(const InferenceCalculatorOptions& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:mediapipe.InferenceCalculatorOptions)
GOOGLE_DCHECK_NE(&from, this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = from._has_bits_[0];
if (cached_has_bits & 0x0000007fu) {
if (cached_has_bits & 0x00000001u) {
_internal_set_model_path(from._internal_model_path());
}
if (cached_has_bits & 0x00000002u) {
_internal_mutable_delegate()->::mediapipe::InferenceCalculatorOptions_Delegate::MergeFrom(from._internal_delegate());
}
if (cached_has_bits & 0x00000004u) {
_internal_mutable_input_output_config()->::mediapipe::InferenceCalculatorOptions_InputOutputConfig::MergeFrom(from._internal_input_output_config());
}
if (cached_has_bits & 0x00000008u) {
try_mmap_model_ = from.try_mmap_model_;
}
if (cached_has_bits & 0x00000010u) {
use_gpu_ = from.use_gpu_;
}
if (cached_has_bits & 0x00000020u) {
use_nnapi_ = from.use_nnapi_;
}
if (cached_has_bits & 0x00000040u) {
cpu_num_thread_ = from.cpu_num_thread_;
}
_has_bits_[0] |= cached_has_bits;
}
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void InferenceCalculatorOptions::CopyFrom(const InferenceCalculatorOptions& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:mediapipe.InferenceCalculatorOptions)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool InferenceCalculatorOptions::IsInitialized() const {
return true;
}
void InferenceCalculatorOptions::InternalSwap(InferenceCalculatorOptions* other) {
using std::swap;
auto* lhs_arena = GetArenaForAllocation();
auto* rhs_arena = other->GetArenaForAllocation();
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_has_bits_[0], other->_has_bits_[0]);
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
&model_path_, lhs_arena,
&other->model_path_, rhs_arena
);
::PROTOBUF_NAMESPACE_ID::internal::memswap<
PROTOBUF_FIELD_OFFSET(InferenceCalculatorOptions, use_nnapi_)
+ sizeof(InferenceCalculatorOptions::use_nnapi_)
- PROTOBUF_FIELD_OFFSET(InferenceCalculatorOptions, delegate_)>(
reinterpret_cast<char*>(&delegate_),
reinterpret_cast<char*>(&other->delegate_));
swap(cpu_num_thread_, other->cpu_num_thread_);
}
::PROTOBUF_NAMESPACE_ID::Metadata InferenceCalculatorOptions::GetMetadata() const {
return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(
&descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_getter, &descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto_once,
file_level_metadata_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto[9]);
}
#if !defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912)
const int InferenceCalculatorOptions::kExtFieldNumber;
#endif
PROTOBUF_ATTRIBUTE_INIT_PRIORITY ::PROTOBUF_NAMESPACE_ID::internal::ExtensionIdentifier< ::mediapipe::CalculatorOptions,
::PROTOBUF_NAMESPACE_ID::internal::MessageTypeTraits< ::mediapipe::InferenceCalculatorOptions >, 11, false >
InferenceCalculatorOptions::ext(kExtFieldNumber, ::mediapipe::InferenceCalculatorOptions::default_instance());
// @@protoc_insertion_point(namespace_scope)
} // namespace mediapipe
PROTOBUF_NAMESPACE_OPEN
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_Delegate* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_Delegate >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig >(arena);
}
template<> PROTOBUF_NOINLINE ::mediapipe::InferenceCalculatorOptions* Arena::CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions >(Arena* arena) {
return Arena::CreateMessageInternal< ::mediapipe::InferenceCalculatorOptions >(arena);
}
PROTOBUF_NAMESPACE_CLOSE
// @@protoc_insertion_point(global_scope)
#include <google/protobuf/port_undef.inc>