Skip to content

File inference_calculator.pb.h

File List > calculators > tensor > inference_calculator.pb.h

Go to the documentation of this file

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: mediapipe/calculators/tensor/inference_calculator.proto

#ifndef GOOGLE_PROTOBUF_INCLUDED_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto
#define GOOGLE_PROTOBUF_INCLUDED_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto

#include <limits>
#include <string>

#include <google/protobuf/port_def.inc>
#if PROTOBUF_VERSION < 3019000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3019001 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif

#include <google/protobuf/port_undef.inc>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_bases.h>
#include <google/protobuf/generated_message_table_driven.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata_lite.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>  // IWYU pragma: export
#include <google/protobuf/extension_set.h>  // IWYU pragma: export
#include <google/protobuf/generated_enum_reflection.h>
#include <google/protobuf/unknown_field_set.h>
#include "mediapipe/framework/calculator.pb.h"
#include "mediapipe/framework/calculator_options.pb.h"
// @@protoc_insertion_point(includes)
#include <google/protobuf/port_def.inc>
#define PROTOBUF_INTERNAL_EXPORT_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto
PROTOBUF_NAMESPACE_OPEN
namespace internal {
class AnyMetadata;
}  // namespace internal
PROTOBUF_NAMESPACE_CLOSE

// Internal implementation detail -- do not use these members.
struct TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto {
  static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTableField entries[]
    PROTOBUF_SECTION_VARIABLE(protodesc_cold);
  static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[]
    PROTOBUF_SECTION_VARIABLE(protodesc_cold);
  static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[10]
    PROTOBUF_SECTION_VARIABLE(protodesc_cold);
  static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
  static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
  static const uint32_t offsets[];
};
extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
namespace mediapipe {
class InferenceCalculatorOptions;
struct InferenceCalculatorOptionsDefaultTypeInternal;
extern InferenceCalculatorOptionsDefaultTypeInternal _InferenceCalculatorOptions_default_instance_;
class InferenceCalculatorOptions_Delegate;
struct InferenceCalculatorOptions_DelegateDefaultTypeInternal;
extern InferenceCalculatorOptions_DelegateDefaultTypeInternal _InferenceCalculatorOptions_Delegate_default_instance_;
class InferenceCalculatorOptions_Delegate_Gpu;
struct InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal;
extern InferenceCalculatorOptions_Delegate_GpuDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Gpu_default_instance_;
class InferenceCalculatorOptions_Delegate_Nnapi;
struct InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal;
extern InferenceCalculatorOptions_Delegate_NnapiDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Nnapi_default_instance_;
class InferenceCalculatorOptions_Delegate_TfLite;
struct InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal;
extern InferenceCalculatorOptions_Delegate_TfLiteDefaultTypeInternal _InferenceCalculatorOptions_Delegate_TfLite_default_instance_;
class InferenceCalculatorOptions_Delegate_Xnnpack;
struct InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal;
extern InferenceCalculatorOptions_Delegate_XnnpackDefaultTypeInternal _InferenceCalculatorOptions_Delegate_Xnnpack_default_instance_;
class InferenceCalculatorOptions_InputOutputConfig;
struct InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal;
extern InferenceCalculatorOptions_InputOutputConfigDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_default_instance_;
class InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink;
struct InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal;
extern InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLinkDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink_default_instance_;
class InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap;
struct InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal;
extern InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMapDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_;
class InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap;
struct InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal;
extern InferenceCalculatorOptions_InputOutputConfig_TensorNamesMapDefaultTypeInternal _InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_;
}  // namespace mediapipe
PROTOBUF_NAMESPACE_OPEN
template<> ::mediapipe::InferenceCalculatorOptions* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_Delegate* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate_Gpu>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate_TfLite>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_InputOutputConfig>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap>(Arena*);
template<> ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* Arena::CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace mediapipe {

enum InferenceCalculatorOptions_Delegate_Gpu_Api : int {
  InferenceCalculatorOptions_Delegate_Gpu_Api_ANY = 0,
  InferenceCalculatorOptions_Delegate_Gpu_Api_OPENGL = 1,
  InferenceCalculatorOptions_Delegate_Gpu_Api_OPENCL = 2
};
bool InferenceCalculatorOptions_Delegate_Gpu_Api_IsValid(int value);
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu_Api_Api_MIN = InferenceCalculatorOptions_Delegate_Gpu_Api_ANY;
constexpr InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu_Api_Api_MAX = InferenceCalculatorOptions_Delegate_Gpu_Api_OPENCL;
constexpr int InferenceCalculatorOptions_Delegate_Gpu_Api_Api_ARRAYSIZE = InferenceCalculatorOptions_Delegate_Gpu_Api_Api_MAX + 1;

const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor();
template<typename T>
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu_Api_Name(T enum_t_value) {
  static_assert(::std::is_same<T, InferenceCalculatorOptions_Delegate_Gpu_Api>::value ||
    ::std::is_integral<T>::value,
    "Incorrect type passed to function InferenceCalculatorOptions_Delegate_Gpu_Api_Name.");
  return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum(
    InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor(), enum_t_value);
}
inline bool InferenceCalculatorOptions_Delegate_Gpu_Api_Parse(
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, InferenceCalculatorOptions_Delegate_Gpu_Api* value) {
  return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<InferenceCalculatorOptions_Delegate_Gpu_Api>(
    InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor(), name, value);
}
enum InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior : int {
  InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_NO_WRITE = 0,
  InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_TRY_WRITE = 1,
  InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_WRITE_OR_ERROR = 2
};
bool InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_IsValid(int value);
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_MIN = InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_NO_WRITE;
constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_MAX = InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_WRITE_OR_ERROR;
constexpr int InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_ARRAYSIZE = InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_MAX + 1;

const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor();
template<typename T>
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_Name(T enum_t_value) {
  static_assert(::std::is_same<T, InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior>::value ||
    ::std::is_integral<T>::value,
    "Incorrect type passed to function InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_Name.");
  return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum(
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor(), enum_t_value);
}
inline bool InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_Parse(
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior* value) {
  return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior>(
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor(), name, value);
}
enum InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage : int {
  InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_UNSPECIFIED = 0,
  InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_FAST_SINGLE_ANSWER = 1,
  InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_SUSTAINED_SPEED = 2
};
bool InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_IsValid(int value);
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_MIN = InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_UNSPECIFIED;
constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_MAX = InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_SUSTAINED_SPEED;
constexpr int InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_ARRAYSIZE = InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_MAX + 1;

const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor();
template<typename T>
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_Name(T enum_t_value) {
  static_assert(::std::is_same<T, InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage>::value ||
    ::std::is_integral<T>::value,
    "Incorrect type passed to function InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_Name.");
  return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum(
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor(), enum_t_value);
}
inline bool InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_Parse(
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage* value) {
  return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage>(
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor(), name, value);
}
// ===================================================================

class InferenceCalculatorOptions_Delegate_TfLite final :
    public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.Delegate.TfLite) */ {
 public:
  inline InferenceCalculatorOptions_Delegate_TfLite() : InferenceCalculatorOptions_Delegate_TfLite(nullptr) {}
  explicit constexpr InferenceCalculatorOptions_Delegate_TfLite(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_Delegate_TfLite(const InferenceCalculatorOptions_Delegate_TfLite& from);
  InferenceCalculatorOptions_Delegate_TfLite(InferenceCalculatorOptions_Delegate_TfLite&& from) noexcept
    : InferenceCalculatorOptions_Delegate_TfLite() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_Delegate_TfLite& operator=(const InferenceCalculatorOptions_Delegate_TfLite& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_Delegate_TfLite& operator=(InferenceCalculatorOptions_Delegate_TfLite&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_Delegate_TfLite& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_Delegate_TfLite* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_Delegate_TfLite*>(
               &_InferenceCalculatorOptions_Delegate_TfLite_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    0;

  friend void swap(InferenceCalculatorOptions_Delegate_TfLite& a, InferenceCalculatorOptions_Delegate_TfLite& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_Delegate_TfLite* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_Delegate_TfLite* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_Delegate_TfLite* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_Delegate_TfLite>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom;
  inline void CopyFrom(const InferenceCalculatorOptions_Delegate_TfLite& from) {
    ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(this, from);
  }
  using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_Delegate_TfLite& from) {
    ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(this, from);
  }
  public:

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.Delegate.TfLite";
  }
  protected:
  explicit InferenceCalculatorOptions_Delegate_TfLite(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.TfLite)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_Delegate_Gpu final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.Delegate.Gpu) */ {
 public:
  inline InferenceCalculatorOptions_Delegate_Gpu() : InferenceCalculatorOptions_Delegate_Gpu(nullptr) {}
  ~InferenceCalculatorOptions_Delegate_Gpu() override;
  explicit constexpr InferenceCalculatorOptions_Delegate_Gpu(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_Delegate_Gpu(const InferenceCalculatorOptions_Delegate_Gpu& from);
  InferenceCalculatorOptions_Delegate_Gpu(InferenceCalculatorOptions_Delegate_Gpu&& from) noexcept
    : InferenceCalculatorOptions_Delegate_Gpu() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_Delegate_Gpu& operator=(const InferenceCalculatorOptions_Delegate_Gpu& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_Delegate_Gpu& operator=(InferenceCalculatorOptions_Delegate_Gpu&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_Delegate_Gpu& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_Delegate_Gpu* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_Delegate_Gpu*>(
               &_InferenceCalculatorOptions_Delegate_Gpu_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    1;

  friend void swap(InferenceCalculatorOptions_Delegate_Gpu& a, InferenceCalculatorOptions_Delegate_Gpu& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_Delegate_Gpu* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_Delegate_Gpu* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_Delegate_Gpu* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_Delegate_Gpu>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_Delegate_Gpu& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_Delegate_Gpu& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_Delegate_Gpu* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.Delegate.Gpu";
  }
  protected:
  explicit InferenceCalculatorOptions_Delegate_Gpu(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  typedef InferenceCalculatorOptions_Delegate_Gpu_Api Api;
  static constexpr Api ANY =
    InferenceCalculatorOptions_Delegate_Gpu_Api_ANY;
  static constexpr Api OPENGL =
    InferenceCalculatorOptions_Delegate_Gpu_Api_OPENGL;
  static constexpr Api OPENCL =
    InferenceCalculatorOptions_Delegate_Gpu_Api_OPENCL;
  static inline bool Api_IsValid(int value) {
    return InferenceCalculatorOptions_Delegate_Gpu_Api_IsValid(value);
  }
  static constexpr Api Api_MIN =
    InferenceCalculatorOptions_Delegate_Gpu_Api_Api_MIN;
  static constexpr Api Api_MAX =
    InferenceCalculatorOptions_Delegate_Gpu_Api_Api_MAX;
  static constexpr int Api_ARRAYSIZE =
    InferenceCalculatorOptions_Delegate_Gpu_Api_Api_ARRAYSIZE;
  static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor*
  Api_descriptor() {
    return InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor();
  }
  template<typename T>
  static inline const std::string& Api_Name(T enum_t_value) {
    static_assert(::std::is_same<T, Api>::value ||
      ::std::is_integral<T>::value,
      "Incorrect type passed to function Api_Name.");
    return InferenceCalculatorOptions_Delegate_Gpu_Api_Name(enum_t_value);
  }
  static inline bool Api_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
      Api* value) {
    return InferenceCalculatorOptions_Delegate_Gpu_Api_Parse(name, value);
  }

  typedef InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior CacheWritingBehavior;
  static constexpr CacheWritingBehavior NO_WRITE =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_NO_WRITE;
  static constexpr CacheWritingBehavior TRY_WRITE =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_TRY_WRITE;
  static constexpr CacheWritingBehavior WRITE_OR_ERROR =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_WRITE_OR_ERROR;
  static inline bool CacheWritingBehavior_IsValid(int value) {
    return InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_IsValid(value);
  }
  static constexpr CacheWritingBehavior CacheWritingBehavior_MIN =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_MIN;
  static constexpr CacheWritingBehavior CacheWritingBehavior_MAX =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_MAX;
  static constexpr int CacheWritingBehavior_ARRAYSIZE =
    InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_CacheWritingBehavior_ARRAYSIZE;
  static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor*
  CacheWritingBehavior_descriptor() {
    return InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor();
  }
  template<typename T>
  static inline const std::string& CacheWritingBehavior_Name(T enum_t_value) {
    static_assert(::std::is_same<T, CacheWritingBehavior>::value ||
      ::std::is_integral<T>::value,
      "Incorrect type passed to function CacheWritingBehavior_Name.");
    return InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_Name(enum_t_value);
  }
  static inline bool CacheWritingBehavior_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
      CacheWritingBehavior* value) {
    return InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_Parse(name, value);
  }

  typedef InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceUsage;
  static constexpr InferenceUsage UNSPECIFIED =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_UNSPECIFIED;
  static constexpr InferenceUsage FAST_SINGLE_ANSWER =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_FAST_SINGLE_ANSWER;
  static constexpr InferenceUsage SUSTAINED_SPEED =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_SUSTAINED_SPEED;
  static inline bool InferenceUsage_IsValid(int value) {
    return InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_IsValid(value);
  }
  static constexpr InferenceUsage InferenceUsage_MIN =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_MIN;
  static constexpr InferenceUsage InferenceUsage_MAX =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_MAX;
  static constexpr int InferenceUsage_ARRAYSIZE =
    InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_InferenceUsage_ARRAYSIZE;
  static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor*
  InferenceUsage_descriptor() {
    return InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor();
  }
  template<typename T>
  static inline const std::string& InferenceUsage_Name(T enum_t_value) {
    static_assert(::std::is_same<T, InferenceUsage>::value ||
      ::std::is_integral<T>::value,
      "Incorrect type passed to function InferenceUsage_Name.");
    return InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_Name(enum_t_value);
  }
  static inline bool InferenceUsage_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
      InferenceUsage* value) {
    return InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_Parse(name, value);
  }

  // accessors -------------------------------------------------------

  enum : int {
    kCachedKernelPathFieldNumber = 2,
    kSerializedModelDirFieldNumber = 7,
    kModelTokenFieldNumber = 8,
    kUseAdvancedGpuApiFieldNumber = 1,
    kApiFieldNumber = 4,
    kCacheWritingBehaviorFieldNumber = 10,
    kAllowPrecisionLossFieldNumber = 3,
    kUsageFieldNumber = 5,
  };
  // optional string cached_kernel_path = 2;
  bool has_cached_kernel_path() const;
  private:
  bool _internal_has_cached_kernel_path() const;
  public:
  void clear_cached_kernel_path();
  const std::string& cached_kernel_path() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_cached_kernel_path(ArgT0&& arg0, ArgT... args);
  std::string* mutable_cached_kernel_path();
  PROTOBUF_NODISCARD std::string* release_cached_kernel_path();
  void set_allocated_cached_kernel_path(std::string* cached_kernel_path);
  private:
  const std::string& _internal_cached_kernel_path() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_cached_kernel_path(const std::string& value);
  std::string* _internal_mutable_cached_kernel_path();
  public:

  // optional string serialized_model_dir = 7;
  bool has_serialized_model_dir() const;
  private:
  bool _internal_has_serialized_model_dir() const;
  public:
  void clear_serialized_model_dir();
  const std::string& serialized_model_dir() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_serialized_model_dir(ArgT0&& arg0, ArgT... args);
  std::string* mutable_serialized_model_dir();
  PROTOBUF_NODISCARD std::string* release_serialized_model_dir();
  void set_allocated_serialized_model_dir(std::string* serialized_model_dir);
  private:
  const std::string& _internal_serialized_model_dir() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_serialized_model_dir(const std::string& value);
  std::string* _internal_mutable_serialized_model_dir();
  public:

  // optional string model_token = 8;
  bool has_model_token() const;
  private:
  bool _internal_has_model_token() const;
  public:
  void clear_model_token();
  const std::string& model_token() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_model_token(ArgT0&& arg0, ArgT... args);
  std::string* mutable_model_token();
  PROTOBUF_NODISCARD std::string* release_model_token();
  void set_allocated_model_token(std::string* model_token);
  private:
  const std::string& _internal_model_token() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_model_token(const std::string& value);
  std::string* _internal_mutable_model_token();
  public:

  // optional bool use_advanced_gpu_api = 1 [default = false];
  bool has_use_advanced_gpu_api() const;
  private:
  bool _internal_has_use_advanced_gpu_api() const;
  public:
  void clear_use_advanced_gpu_api();
  bool use_advanced_gpu_api() const;
  void set_use_advanced_gpu_api(bool value);
  private:
  bool _internal_use_advanced_gpu_api() const;
  void _internal_set_use_advanced_gpu_api(bool value);
  public:

  // optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api api = 4 [default = ANY];
  bool has_api() const;
  private:
  bool _internal_has_api() const;
  public:
  void clear_api();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api api() const;
  void set_api(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value);
  private:
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api _internal_api() const;
  void _internal_set_api(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value);
  public:

  // optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior cache_writing_behavior = 10 [default = WRITE_OR_ERROR];
  bool has_cache_writing_behavior() const;
  private:
  bool _internal_has_cache_writing_behavior() const;
  public:
  void clear_cache_writing_behavior();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior cache_writing_behavior() const;
  void set_cache_writing_behavior(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value);
  private:
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior _internal_cache_writing_behavior() const;
  void _internal_set_cache_writing_behavior(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value);
  public:

  // optional bool allow_precision_loss = 3 [default = true];
  bool has_allow_precision_loss() const;
  private:
  bool _internal_has_allow_precision_loss() const;
  public:
  void clear_allow_precision_loss();
  bool allow_precision_loss() const;
  void set_allow_precision_loss(bool value);
  private:
  bool _internal_allow_precision_loss() const;
  void _internal_set_allow_precision_loss(bool value);
  public:

  // optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage usage = 5 [default = SUSTAINED_SPEED];
  bool has_usage() const;
  private:
  bool _internal_has_usage() const;
  public:
  void clear_usage();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage usage() const;
  void set_usage(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value);
  private:
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage _internal_usage() const;
  void _internal_set_usage(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value);
  public:

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr cached_kernel_path_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr serialized_model_dir_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr model_token_;
  bool use_advanced_gpu_api_;
  int api_;
  int cache_writing_behavior_;
  bool allow_precision_loss_;
  int usage_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_Delegate_Nnapi final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi) */ {
 public:
  inline InferenceCalculatorOptions_Delegate_Nnapi() : InferenceCalculatorOptions_Delegate_Nnapi(nullptr) {}
  ~InferenceCalculatorOptions_Delegate_Nnapi() override;
  explicit constexpr InferenceCalculatorOptions_Delegate_Nnapi(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_Delegate_Nnapi(const InferenceCalculatorOptions_Delegate_Nnapi& from);
  InferenceCalculatorOptions_Delegate_Nnapi(InferenceCalculatorOptions_Delegate_Nnapi&& from) noexcept
    : InferenceCalculatorOptions_Delegate_Nnapi() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_Delegate_Nnapi& operator=(const InferenceCalculatorOptions_Delegate_Nnapi& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_Delegate_Nnapi& operator=(InferenceCalculatorOptions_Delegate_Nnapi&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_Delegate_Nnapi& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_Delegate_Nnapi* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_Delegate_Nnapi*>(
               &_InferenceCalculatorOptions_Delegate_Nnapi_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    2;

  friend void swap(InferenceCalculatorOptions_Delegate_Nnapi& a, InferenceCalculatorOptions_Delegate_Nnapi& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_Delegate_Nnapi* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_Delegate_Nnapi* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_Delegate_Nnapi* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_Delegate_Nnapi>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_Delegate_Nnapi& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_Delegate_Nnapi& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_Delegate_Nnapi* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.Delegate.Nnapi";
  }
  protected:
  explicit InferenceCalculatorOptions_Delegate_Nnapi(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  enum : int {
    kCacheDirFieldNumber = 1,
    kModelTokenFieldNumber = 2,
    kAcceleratorNameFieldNumber = 3,
  };
  // optional string cache_dir = 1;
  bool has_cache_dir() const;
  private:
  bool _internal_has_cache_dir() const;
  public:
  void clear_cache_dir();
  const std::string& cache_dir() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_cache_dir(ArgT0&& arg0, ArgT... args);
  std::string* mutable_cache_dir();
  PROTOBUF_NODISCARD std::string* release_cache_dir();
  void set_allocated_cache_dir(std::string* cache_dir);
  private:
  const std::string& _internal_cache_dir() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_cache_dir(const std::string& value);
  std::string* _internal_mutable_cache_dir();
  public:

  // optional string model_token = 2;
  bool has_model_token() const;
  private:
  bool _internal_has_model_token() const;
  public:
  void clear_model_token();
  const std::string& model_token() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_model_token(ArgT0&& arg0, ArgT... args);
  std::string* mutable_model_token();
  PROTOBUF_NODISCARD std::string* release_model_token();
  void set_allocated_model_token(std::string* model_token);
  private:
  const std::string& _internal_model_token() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_model_token(const std::string& value);
  std::string* _internal_mutable_model_token();
  public:

  // optional string accelerator_name = 3;
  bool has_accelerator_name() const;
  private:
  bool _internal_has_accelerator_name() const;
  public:
  void clear_accelerator_name();
  const std::string& accelerator_name() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_accelerator_name(ArgT0&& arg0, ArgT... args);
  std::string* mutable_accelerator_name();
  PROTOBUF_NODISCARD std::string* release_accelerator_name();
  void set_allocated_accelerator_name(std::string* accelerator_name);
  private:
  const std::string& _internal_accelerator_name() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_accelerator_name(const std::string& value);
  std::string* _internal_mutable_accelerator_name();
  public:

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr cache_dir_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr model_token_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr accelerator_name_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_Delegate_Xnnpack final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack) */ {
 public:
  inline InferenceCalculatorOptions_Delegate_Xnnpack() : InferenceCalculatorOptions_Delegate_Xnnpack(nullptr) {}
  ~InferenceCalculatorOptions_Delegate_Xnnpack() override;
  explicit constexpr InferenceCalculatorOptions_Delegate_Xnnpack(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_Delegate_Xnnpack(const InferenceCalculatorOptions_Delegate_Xnnpack& from);
  InferenceCalculatorOptions_Delegate_Xnnpack(InferenceCalculatorOptions_Delegate_Xnnpack&& from) noexcept
    : InferenceCalculatorOptions_Delegate_Xnnpack() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_Delegate_Xnnpack& operator=(const InferenceCalculatorOptions_Delegate_Xnnpack& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_Delegate_Xnnpack& operator=(InferenceCalculatorOptions_Delegate_Xnnpack&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_Delegate_Xnnpack& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_Delegate_Xnnpack* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_Delegate_Xnnpack*>(
               &_InferenceCalculatorOptions_Delegate_Xnnpack_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    3;

  friend void swap(InferenceCalculatorOptions_Delegate_Xnnpack& a, InferenceCalculatorOptions_Delegate_Xnnpack& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_Delegate_Xnnpack* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_Delegate_Xnnpack* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_Delegate_Xnnpack* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_Delegate_Xnnpack>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_Delegate_Xnnpack& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_Delegate_Xnnpack& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_Delegate_Xnnpack* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack";
  }
  protected:
  explicit InferenceCalculatorOptions_Delegate_Xnnpack(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  enum : int {
    kNumThreadsFieldNumber = 1,
  };
  // optional int32 num_threads = 1 [default = -1];
  bool has_num_threads() const;
  private:
  bool _internal_has_num_threads() const;
  public:
  void clear_num_threads();
  int32_t num_threads() const;
  void set_num_threads(int32_t value);
  private:
  int32_t _internal_num_threads() const;
  void _internal_set_num_threads(int32_t value);
  public:

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  int32_t num_threads_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_Delegate final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.Delegate) */ {
 public:
  inline InferenceCalculatorOptions_Delegate() : InferenceCalculatorOptions_Delegate(nullptr) {}
  ~InferenceCalculatorOptions_Delegate() override;
  explicit constexpr InferenceCalculatorOptions_Delegate(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_Delegate(const InferenceCalculatorOptions_Delegate& from);
  InferenceCalculatorOptions_Delegate(InferenceCalculatorOptions_Delegate&& from) noexcept
    : InferenceCalculatorOptions_Delegate() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_Delegate& operator=(const InferenceCalculatorOptions_Delegate& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_Delegate& operator=(InferenceCalculatorOptions_Delegate&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_Delegate& default_instance() {
    return *internal_default_instance();
  }
  enum DelegateCase {
    kTflite = 1,
    kGpu = 2,
    kNnapi = 3,
    kXnnpack = 4,
    DELEGATE_NOT_SET = 0,
  };

  static inline const InferenceCalculatorOptions_Delegate* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_Delegate*>(
               &_InferenceCalculatorOptions_Delegate_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    4;

  friend void swap(InferenceCalculatorOptions_Delegate& a, InferenceCalculatorOptions_Delegate& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_Delegate* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_Delegate* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_Delegate* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_Delegate>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_Delegate& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_Delegate& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_Delegate* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.Delegate";
  }
  protected:
  explicit InferenceCalculatorOptions_Delegate(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  typedef InferenceCalculatorOptions_Delegate_TfLite TfLite;
  typedef InferenceCalculatorOptions_Delegate_Gpu Gpu;
  typedef InferenceCalculatorOptions_Delegate_Nnapi Nnapi;
  typedef InferenceCalculatorOptions_Delegate_Xnnpack Xnnpack;

  // accessors -------------------------------------------------------

  enum : int {
    kTfliteFieldNumber = 1,
    kGpuFieldNumber = 2,
    kNnapiFieldNumber = 3,
    kXnnpackFieldNumber = 4,
  };
  // .mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 1;
  bool has_tflite() const;
  private:
  bool _internal_has_tflite() const;
  public:
  void clear_tflite();
  const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite& tflite() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* release_tflite();
  ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* mutable_tflite();
  void set_allocated_tflite(::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* tflite);
  private:
  const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite& _internal_tflite() const;
  ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* _internal_mutable_tflite();
  public:
  void unsafe_arena_set_allocated_tflite(
      ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* tflite);
  ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* unsafe_arena_release_tflite();

  // .mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
  bool has_gpu() const;
  private:
  bool _internal_has_gpu() const;
  public:
  void clear_gpu();
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu& gpu() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* release_gpu();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* mutable_gpu();
  void set_allocated_gpu(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* gpu);
  private:
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu& _internal_gpu() const;
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* _internal_mutable_gpu();
  public:
  void unsafe_arena_set_allocated_gpu(
      ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* gpu);
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* unsafe_arena_release_gpu();

  // .mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 3;
  bool has_nnapi() const;
  private:
  bool _internal_has_nnapi() const;
  public:
  void clear_nnapi();
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi& nnapi() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* release_nnapi();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* mutable_nnapi();
  void set_allocated_nnapi(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* nnapi);
  private:
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi& _internal_nnapi() const;
  ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* _internal_mutable_nnapi();
  public:
  void unsafe_arena_set_allocated_nnapi(
      ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* nnapi);
  ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* unsafe_arena_release_nnapi();

  // .mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 4;
  bool has_xnnpack() const;
  private:
  bool _internal_has_xnnpack() const;
  public:
  void clear_xnnpack();
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack& xnnpack() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* release_xnnpack();
  ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* mutable_xnnpack();
  void set_allocated_xnnpack(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* xnnpack);
  private:
  const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack& _internal_xnnpack() const;
  ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* _internal_mutable_xnnpack();
  public:
  void unsafe_arena_set_allocated_xnnpack(
      ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* xnnpack);
  ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* unsafe_arena_release_xnnpack();

  void clear_delegate();
  DelegateCase delegate_case() const;
  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate)
 private:
  class _Internal;
  void set_has_tflite();
  void set_has_gpu();
  void set_has_nnapi();
  void set_has_xnnpack();

  inline bool has_delegate() const;
  inline void clear_has_delegate();

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  union DelegateUnion {
    constexpr DelegateUnion() : _constinit_{} {}
      ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
    ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* tflite_;
    ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* gpu_;
    ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* nnapi_;
    ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* xnnpack_;
  } delegate_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  uint32_t _oneof_case_[1];

  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap) */ {
 public:
  inline InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap() : InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(nullptr) {}
  ~InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap() override;
  explicit constexpr InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from);
  InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&& from) noexcept
    : InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& operator=(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& operator=(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap*>(
               &_InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    5;

  friend void swap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& a, InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap";
  }
  protected:
  explicit InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  enum : int {
    kModelTensorIndicesFieldNumber = 1,
  };
  // repeated int32 model_tensor_indices = 1 [packed = true];
  int model_tensor_indices_size() const;
  private:
  int _internal_model_tensor_indices_size() const;
  public:
  void clear_model_tensor_indices();
  private:
  int32_t _internal_model_tensor_indices(int index) const;
  const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >&
      _internal_model_tensor_indices() const;
  void _internal_add_model_tensor_indices(int32_t value);
  ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >*
      _internal_mutable_model_tensor_indices();
  public:
  int32_t model_tensor_indices(int index) const;
  void set_model_tensor_indices(int index, int32_t value);
  void add_model_tensor_indices(int32_t value);
  const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >&
      model_tensor_indices() const;
  ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >*
      mutable_model_tensor_indices();

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t > model_tensor_indices_;
  mutable std::atomic<int> _model_tensor_indices_cached_byte_size_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap) */ {
 public:
  inline InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap() : InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(nullptr) {}
  ~InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap() override;
  explicit constexpr InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from);
  InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&& from) noexcept
    : InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& operator=(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& operator=(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap*>(
               &_InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    6;

  friend void swap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& a, InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap";
  }
  protected:
  explicit InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  enum : int {
    kTensorNamesFieldNumber = 1,
  };
  // repeated string tensor_names = 1;
  int tensor_names_size() const;
  private:
  int _internal_tensor_names_size() const;
  public:
  void clear_tensor_names();
  const std::string& tensor_names(int index) const;
  std::string* mutable_tensor_names(int index);
  void set_tensor_names(int index, const std::string& value);
  void set_tensor_names(int index, std::string&& value);
  void set_tensor_names(int index, const char* value);
  void set_tensor_names(int index, const char* value, size_t size);
  std::string* add_tensor_names();
  void add_tensor_names(const std::string& value);
  void add_tensor_names(std::string&& value);
  void add_tensor_names(const char* value);
  void add_tensor_names(const char* value, size_t size);
  const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& tensor_names() const;
  ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_tensor_names();
  private:
  const std::string& _internal_tensor_names(int index) const;
  std::string* _internal_add_tensor_names();
  public:

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> tensor_names_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink) */ {
 public:
  inline InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink() : InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(nullptr) {}
  ~InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink() override;
  explicit constexpr InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from);
  InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink&& from) noexcept
    : InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& operator=(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& operator=(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink*>(
               &_InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    7;

  friend void swap(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& a, InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink";
  }
  protected:
  explicit InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  // accessors -------------------------------------------------------

  enum : int {
    kFromOutputTensorNameFieldNumber = 1,
    kToInputTensorNameFieldNumber = 2,
  };
  // optional string from_output_tensor_name = 1;
  bool has_from_output_tensor_name() const;
  private:
  bool _internal_has_from_output_tensor_name() const;
  public:
  void clear_from_output_tensor_name();
  const std::string& from_output_tensor_name() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_from_output_tensor_name(ArgT0&& arg0, ArgT... args);
  std::string* mutable_from_output_tensor_name();
  PROTOBUF_NODISCARD std::string* release_from_output_tensor_name();
  void set_allocated_from_output_tensor_name(std::string* from_output_tensor_name);
  private:
  const std::string& _internal_from_output_tensor_name() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_from_output_tensor_name(const std::string& value);
  std::string* _internal_mutable_from_output_tensor_name();
  public:

  // optional string to_input_tensor_name = 2;
  bool has_to_input_tensor_name() const;
  private:
  bool _internal_has_to_input_tensor_name() const;
  public:
  void clear_to_input_tensor_name();
  const std::string& to_input_tensor_name() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_to_input_tensor_name(ArgT0&& arg0, ArgT... args);
  std::string* mutable_to_input_tensor_name();
  PROTOBUF_NODISCARD std::string* release_to_input_tensor_name();
  void set_allocated_to_input_tensor_name(std::string* to_input_tensor_name);
  private:
  const std::string& _internal_to_input_tensor_name() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_to_input_tensor_name(const std::string& value);
  std::string* _internal_mutable_to_input_tensor_name();
  public:

  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr from_output_tensor_name_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr to_input_tensor_name_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions_InputOutputConfig final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions.InputOutputConfig) */ {
 public:
  inline InferenceCalculatorOptions_InputOutputConfig() : InferenceCalculatorOptions_InputOutputConfig(nullptr) {}
  ~InferenceCalculatorOptions_InputOutputConfig() override;
  explicit constexpr InferenceCalculatorOptions_InputOutputConfig(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions_InputOutputConfig(const InferenceCalculatorOptions_InputOutputConfig& from);
  InferenceCalculatorOptions_InputOutputConfig(InferenceCalculatorOptions_InputOutputConfig&& from) noexcept
    : InferenceCalculatorOptions_InputOutputConfig() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions_InputOutputConfig& operator=(const InferenceCalculatorOptions_InputOutputConfig& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions_InputOutputConfig& operator=(InferenceCalculatorOptions_InputOutputConfig&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions_InputOutputConfig& default_instance() {
    return *internal_default_instance();
  }
  enum InputTensorMapCase {
    kInputTensorIndicesMap = 1,
    kInputTensorNamesMap = 3,
    INPUTTENSORMAP_NOT_SET = 0,
  };

  enum OutputTensorMapCase {
    kOutputTensorIndicesMap = 2,
    kOutputTensorNamesMap = 4,
    OUTPUTTENSORMAP_NOT_SET = 0,
  };

  static inline const InferenceCalculatorOptions_InputOutputConfig* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions_InputOutputConfig*>(
               &_InferenceCalculatorOptions_InputOutputConfig_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    8;

  friend void swap(InferenceCalculatorOptions_InputOutputConfig& a, InferenceCalculatorOptions_InputOutputConfig& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions_InputOutputConfig* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions_InputOutputConfig* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions_InputOutputConfig* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions_InputOutputConfig>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions_InputOutputConfig& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions_InputOutputConfig& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions_InputOutputConfig* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions.InputOutputConfig";
  }
  protected:
  explicit InferenceCalculatorOptions_InputOutputConfig(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  typedef InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap TensorIndicesMap;
  typedef InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap TensorNamesMap;
  typedef InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink FeedbackTensorLink;

  // accessors -------------------------------------------------------

  enum : int {
    kFeedbackTensorLinksFieldNumber = 5,
    kInputTensorIndicesMapFieldNumber = 1,
    kInputTensorNamesMapFieldNumber = 3,
    kOutputTensorIndicesMapFieldNumber = 2,
    kOutputTensorNamesMapFieldNumber = 4,
  };
  // repeated .mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink feedback_tensor_links = 5;
  int feedback_tensor_links_size() const;
  private:
  int _internal_feedback_tensor_links_size() const;
  public:
  void clear_feedback_tensor_links();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* mutable_feedback_tensor_links(int index);
  ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >*
      mutable_feedback_tensor_links();
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& _internal_feedback_tensor_links(int index) const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* _internal_add_feedback_tensor_links();
  public:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& feedback_tensor_links(int index) const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* add_feedback_tensor_links();
  const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >&
      feedback_tensor_links() const;

  // .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap input_tensor_indices_map = 1;
  bool has_input_tensor_indices_map() const;
  private:
  bool _internal_has_input_tensor_indices_map() const;
  public:
  void clear_input_tensor_indices_map();
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& input_tensor_indices_map() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* release_input_tensor_indices_map();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* mutable_input_tensor_indices_map();
  void set_allocated_input_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* input_tensor_indices_map);
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& _internal_input_tensor_indices_map() const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* _internal_mutable_input_tensor_indices_map();
  public:
  void unsafe_arena_set_allocated_input_tensor_indices_map(
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* input_tensor_indices_map);
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* unsafe_arena_release_input_tensor_indices_map();

  // .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap input_tensor_names_map = 3;
  bool has_input_tensor_names_map() const;
  private:
  bool _internal_has_input_tensor_names_map() const;
  public:
  void clear_input_tensor_names_map();
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& input_tensor_names_map() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* release_input_tensor_names_map();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* mutable_input_tensor_names_map();
  void set_allocated_input_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* input_tensor_names_map);
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& _internal_input_tensor_names_map() const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* _internal_mutable_input_tensor_names_map();
  public:
  void unsafe_arena_set_allocated_input_tensor_names_map(
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* input_tensor_names_map);
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* unsafe_arena_release_input_tensor_names_map();

  // .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap output_tensor_indices_map = 2;
  bool has_output_tensor_indices_map() const;
  private:
  bool _internal_has_output_tensor_indices_map() const;
  public:
  void clear_output_tensor_indices_map();
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& output_tensor_indices_map() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* release_output_tensor_indices_map();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* mutable_output_tensor_indices_map();
  void set_allocated_output_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* output_tensor_indices_map);
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& _internal_output_tensor_indices_map() const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* _internal_mutable_output_tensor_indices_map();
  public:
  void unsafe_arena_set_allocated_output_tensor_indices_map(
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* output_tensor_indices_map);
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* unsafe_arena_release_output_tensor_indices_map();

  // .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap output_tensor_names_map = 4;
  bool has_output_tensor_names_map() const;
  private:
  bool _internal_has_output_tensor_names_map() const;
  public:
  void clear_output_tensor_names_map();
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& output_tensor_names_map() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* release_output_tensor_names_map();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* mutable_output_tensor_names_map();
  void set_allocated_output_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* output_tensor_names_map);
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& _internal_output_tensor_names_map() const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* _internal_mutable_output_tensor_names_map();
  public:
  void unsafe_arena_set_allocated_output_tensor_names_map(
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* output_tensor_names_map);
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* unsafe_arena_release_output_tensor_names_map();

  void clear_InputTensorMap();
  InputTensorMapCase InputTensorMap_case() const;
  void clear_OutputTensorMap();
  OutputTensorMapCase OutputTensorMap_case() const;
  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
 private:
  class _Internal;
  void set_has_input_tensor_indices_map();
  void set_has_input_tensor_names_map();
  void set_has_output_tensor_indices_map();
  void set_has_output_tensor_names_map();

  inline bool has_InputTensorMap() const;
  inline void clear_has_InputTensorMap();

  inline bool has_OutputTensorMap() const;
  inline void clear_has_OutputTensorMap();

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink > feedback_tensor_links_;
  union InputTensorMapUnion {
    constexpr InputTensorMapUnion() : _constinit_{} {}
      ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* input_tensor_indices_map_;
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* input_tensor_names_map_;
  } InputTensorMap_;
  union OutputTensorMapUnion {
    constexpr OutputTensorMapUnion() : _constinit_{} {}
      ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* output_tensor_indices_map_;
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* output_tensor_names_map_;
  } OutputTensorMap_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  uint32_t _oneof_case_[2];

  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// -------------------------------------------------------------------

class InferenceCalculatorOptions final :
    public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:mediapipe.InferenceCalculatorOptions) */ {
 public:
  inline InferenceCalculatorOptions() : InferenceCalculatorOptions(nullptr) {}
  ~InferenceCalculatorOptions() override;
  explicit constexpr InferenceCalculatorOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);

  InferenceCalculatorOptions(const InferenceCalculatorOptions& from);
  InferenceCalculatorOptions(InferenceCalculatorOptions&& from) noexcept
    : InferenceCalculatorOptions() {
    *this = ::std::move(from);
  }

  inline InferenceCalculatorOptions& operator=(const InferenceCalculatorOptions& from) {
    CopyFrom(from);
    return *this;
  }
  inline InferenceCalculatorOptions& operator=(InferenceCalculatorOptions&& from) noexcept {
    if (this == &from) return *this;
    if (GetOwningArena() == from.GetOwningArena()
  #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
        && GetOwningArena() != nullptr
  #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
    ) {
      InternalSwap(&from);
    } else {
      CopyFrom(from);
    }
    return *this;
  }

  inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const {
    return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance);
  }
  inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() {
    return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
  }

  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
    return GetDescriptor();
  }
  static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
    return default_instance().GetMetadata().descriptor;
  }
  static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
    return default_instance().GetMetadata().reflection;
  }
  static const InferenceCalculatorOptions& default_instance() {
    return *internal_default_instance();
  }
  static inline const InferenceCalculatorOptions* internal_default_instance() {
    return reinterpret_cast<const InferenceCalculatorOptions*>(
               &_InferenceCalculatorOptions_default_instance_);
  }
  static constexpr int kIndexInFileMessages =
    9;

  friend void swap(InferenceCalculatorOptions& a, InferenceCalculatorOptions& b) {
    a.Swap(&b);
  }
  inline void Swap(InferenceCalculatorOptions* other) {
    if (other == this) return;
  #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() != nullptr &&
        GetOwningArena() == other->GetOwningArena()) {
   #else  // PROTOBUF_FORCE_COPY_IN_SWAP
    if (GetOwningArena() == other->GetOwningArena()) {
  #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
      InternalSwap(other);
    } else {
      ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
    }
  }
  void UnsafeArenaSwap(InferenceCalculatorOptions* other) {
    if (other == this) return;
    GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
    InternalSwap(other);
  }

  // implements Message ----------------------------------------------

  InferenceCalculatorOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
    return CreateMaybeMessage<InferenceCalculatorOptions>(arena);
  }
  using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
  void CopyFrom(const InferenceCalculatorOptions& from);
  using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
  void MergeFrom(const InferenceCalculatorOptions& from);
  private:
  static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from);
  public:
  PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
  bool IsInitialized() const final;

  size_t ByteSizeLong() const final;
  const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
  uint8_t* _InternalSerialize(
      uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
  int GetCachedSize() const final { return _cached_size_.Get(); }

  private:
  void SharedCtor();
  void SharedDtor();
  void SetCachedSize(int size) const final;
  void InternalSwap(InferenceCalculatorOptions* other);

  private:
  friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
  static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
    return "mediapipe.InferenceCalculatorOptions";
  }
  protected:
  explicit InferenceCalculatorOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
                       bool is_message_owned = false);
  private:
  static void ArenaDtor(void* object);
  inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena);
  public:

  static const ClassData _class_data_;
  const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;

  ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;

  // nested types ----------------------------------------------------

  typedef InferenceCalculatorOptions_Delegate Delegate;
  typedef InferenceCalculatorOptions_InputOutputConfig InputOutputConfig;

  // accessors -------------------------------------------------------

  enum : int {
    kModelPathFieldNumber = 1,
    kDelegateFieldNumber = 5,
    kInputOutputConfigFieldNumber = 8,
    kTryMmapModelFieldNumber = 7,
    kUseGpuFieldNumber = 2,
    kUseNnapiFieldNumber = 3,
    kCpuNumThreadFieldNumber = 4,
  };
  // optional string model_path = 1;
  bool has_model_path() const;
  private:
  bool _internal_has_model_path() const;
  public:
  void clear_model_path();
  const std::string& model_path() const;
  template <typename ArgT0 = const std::string&, typename... ArgT>
  void set_model_path(ArgT0&& arg0, ArgT... args);
  std::string* mutable_model_path();
  PROTOBUF_NODISCARD std::string* release_model_path();
  void set_allocated_model_path(std::string* model_path);
  private:
  const std::string& _internal_model_path() const;
  inline PROTOBUF_ALWAYS_INLINE void _internal_set_model_path(const std::string& value);
  std::string* _internal_mutable_model_path();
  public:

  // optional .mediapipe.InferenceCalculatorOptions.Delegate delegate = 5;
  bool has_delegate() const;
  private:
  bool _internal_has_delegate() const;
  public:
  void clear_delegate();
  const ::mediapipe::InferenceCalculatorOptions_Delegate& delegate() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_Delegate* release_delegate();
  ::mediapipe::InferenceCalculatorOptions_Delegate* mutable_delegate();
  void set_allocated_delegate(::mediapipe::InferenceCalculatorOptions_Delegate* delegate);
  private:
  const ::mediapipe::InferenceCalculatorOptions_Delegate& _internal_delegate() const;
  ::mediapipe::InferenceCalculatorOptions_Delegate* _internal_mutable_delegate();
  public:
  void unsafe_arena_set_allocated_delegate(
      ::mediapipe::InferenceCalculatorOptions_Delegate* delegate);
  ::mediapipe::InferenceCalculatorOptions_Delegate* unsafe_arena_release_delegate();

  // optional .mediapipe.InferenceCalculatorOptions.InputOutputConfig input_output_config = 8;
  bool has_input_output_config() const;
  private:
  bool _internal_has_input_output_config() const;
  public:
  void clear_input_output_config();
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig& input_output_config() const;
  PROTOBUF_NODISCARD ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* release_input_output_config();
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* mutable_input_output_config();
  void set_allocated_input_output_config(::mediapipe::InferenceCalculatorOptions_InputOutputConfig* input_output_config);
  private:
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig& _internal_input_output_config() const;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* _internal_mutable_input_output_config();
  public:
  void unsafe_arena_set_allocated_input_output_config(
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* input_output_config);
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* unsafe_arena_release_input_output_config();

  // optional bool try_mmap_model = 7;
  bool has_try_mmap_model() const;
  private:
  bool _internal_has_try_mmap_model() const;
  public:
  void clear_try_mmap_model();
  bool try_mmap_model() const;
  void set_try_mmap_model(bool value);
  private:
  bool _internal_try_mmap_model() const;
  void _internal_set_try_mmap_model(bool value);
  public:

  // optional bool use_gpu = 2 [default = false, deprecated = true];
  PROTOBUF_DEPRECATED bool has_use_gpu() const;
  private:
  bool _internal_has_use_gpu() const;
  public:
  PROTOBUF_DEPRECATED void clear_use_gpu();
  PROTOBUF_DEPRECATED bool use_gpu() const;
  PROTOBUF_DEPRECATED void set_use_gpu(bool value);
  private:
  bool _internal_use_gpu() const;
  void _internal_set_use_gpu(bool value);
  public:

  // optional bool use_nnapi = 3 [default = false, deprecated = true];
  PROTOBUF_DEPRECATED bool has_use_nnapi() const;
  private:
  bool _internal_has_use_nnapi() const;
  public:
  PROTOBUF_DEPRECATED void clear_use_nnapi();
  PROTOBUF_DEPRECATED bool use_nnapi() const;
  PROTOBUF_DEPRECATED void set_use_nnapi(bool value);
  private:
  bool _internal_use_nnapi() const;
  void _internal_set_use_nnapi(bool value);
  public:

  // optional int32 cpu_num_thread = 4 [default = -1];
  bool has_cpu_num_thread() const;
  private:
  bool _internal_has_cpu_num_thread() const;
  public:
  void clear_cpu_num_thread();
  int32_t cpu_num_thread() const;
  void set_cpu_num_thread(int32_t value);
  private:
  int32_t _internal_cpu_num_thread() const;
  void _internal_set_cpu_num_thread(int32_t value);
  public:

  static const int kExtFieldNumber = 336783863;
  static ::PROTOBUF_NAMESPACE_ID::internal::ExtensionIdentifier< ::mediapipe::CalculatorOptions,
      ::PROTOBUF_NAMESPACE_ID::internal::MessageTypeTraits< ::mediapipe::InferenceCalculatorOptions >, 11, false >
    ext;
  // @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions)
 private:
  class _Internal;

  template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
  typedef void InternalArenaConstructable_;
  typedef void DestructorSkippable_;
  ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
  mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr model_path_;
  ::mediapipe::InferenceCalculatorOptions_Delegate* delegate_;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* input_output_config_;
  bool try_mmap_model_;
  bool use_gpu_;
  bool use_nnapi_;
  int32_t cpu_num_thread_;
  friend struct ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto;
};
// ===================================================================


// ===================================================================

#ifdef __GNUC__
  #pragma GCC diagnostic push
  #pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif  // __GNUC__
// InferenceCalculatorOptions_Delegate_TfLite

// -------------------------------------------------------------------

// InferenceCalculatorOptions_Delegate_Gpu

// optional bool use_advanced_gpu_api = 1 [default = false];
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_use_advanced_gpu_api() const {
  bool value = (_has_bits_[0] & 0x00000008u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_use_advanced_gpu_api() const {
  return _internal_has_use_advanced_gpu_api();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_use_advanced_gpu_api() {
  use_advanced_gpu_api_ = false;
  _has_bits_[0] &= ~0x00000008u;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_use_advanced_gpu_api() const {
  return use_advanced_gpu_api_;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::use_advanced_gpu_api() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.use_advanced_gpu_api)
  return _internal_use_advanced_gpu_api();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_use_advanced_gpu_api(bool value) {
  _has_bits_[0] |= 0x00000008u;
  use_advanced_gpu_api_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_use_advanced_gpu_api(bool value) {
  _internal_set_use_advanced_gpu_api(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.use_advanced_gpu_api)
}

// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api api = 4 [default = ANY];
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_api() const {
  bool value = (_has_bits_[0] & 0x00000010u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_api() const {
  return _internal_has_api();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_api() {
  api_ = 0;
  _has_bits_[0] &= ~0x00000010u;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::_internal_api() const {
  return static_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api >(api_);
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api InferenceCalculatorOptions_Delegate_Gpu::api() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.api)
  return _internal_api();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_api(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value) {
  assert(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api_IsValid(value));
  _has_bits_[0] |= 0x00000010u;
  api_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_api(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value) {
  _internal_set_api(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.api)
}

// optional bool allow_precision_loss = 3 [default = true];
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_allow_precision_loss() const {
  bool value = (_has_bits_[0] & 0x00000040u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_allow_precision_loss() const {
  return _internal_has_allow_precision_loss();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_allow_precision_loss() {
  allow_precision_loss_ = true;
  _has_bits_[0] &= ~0x00000040u;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_allow_precision_loss() const {
  return allow_precision_loss_;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::allow_precision_loss() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.allow_precision_loss)
  return _internal_allow_precision_loss();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_allow_precision_loss(bool value) {
  _has_bits_[0] |= 0x00000040u;
  allow_precision_loss_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_allow_precision_loss(bool value) {
  _internal_set_allow_precision_loss(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.allow_precision_loss)
}

// optional string cached_kernel_path = 2;
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_cached_kernel_path() const {
  bool value = (_has_bits_[0] & 0x00000001u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_cached_kernel_path() const {
  return _internal_has_cached_kernel_path();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_cached_kernel_path() {
  cached_kernel_path_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000001u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::cached_kernel_path() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path)
  return _internal_cached_kernel_path();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Gpu::set_cached_kernel_path(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000001u;
 cached_kernel_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path)
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::mutable_cached_kernel_path() {
  std::string* _s = _internal_mutable_cached_kernel_path();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::_internal_cached_kernel_path() const {
  return cached_kernel_path_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_cached_kernel_path(const std::string& value) {
  _has_bits_[0] |= 0x00000001u;
  cached_kernel_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::_internal_mutable_cached_kernel_path() {
  _has_bits_[0] |= 0x00000001u;
  return cached_kernel_path_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::release_cached_kernel_path() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path)
  if (!_internal_has_cached_kernel_path()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000001u;
  auto* p = cached_kernel_path_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (cached_kernel_path_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    cached_kernel_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_allocated_cached_kernel_path(std::string* cached_kernel_path) {
  if (cached_kernel_path != nullptr) {
    _has_bits_[0] |= 0x00000001u;
  } else {
    _has_bits_[0] &= ~0x00000001u;
  }
  cached_kernel_path_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), cached_kernel_path,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (cached_kernel_path_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    cached_kernel_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cached_kernel_path)
}

// optional string serialized_model_dir = 7;
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_serialized_model_dir() const {
  bool value = (_has_bits_[0] & 0x00000002u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_serialized_model_dir() const {
  return _internal_has_serialized_model_dir();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_serialized_model_dir() {
  serialized_model_dir_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000002u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::serialized_model_dir() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir)
  return _internal_serialized_model_dir();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Gpu::set_serialized_model_dir(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000002u;
 serialized_model_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir)
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::mutable_serialized_model_dir() {
  std::string* _s = _internal_mutable_serialized_model_dir();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::_internal_serialized_model_dir() const {
  return serialized_model_dir_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_serialized_model_dir(const std::string& value) {
  _has_bits_[0] |= 0x00000002u;
  serialized_model_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::_internal_mutable_serialized_model_dir() {
  _has_bits_[0] |= 0x00000002u;
  return serialized_model_dir_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::release_serialized_model_dir() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir)
  if (!_internal_has_serialized_model_dir()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000002u;
  auto* p = serialized_model_dir_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (serialized_model_dir_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    serialized_model_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_allocated_serialized_model_dir(std::string* serialized_model_dir) {
  if (serialized_model_dir != nullptr) {
    _has_bits_[0] |= 0x00000002u;
  } else {
    _has_bits_[0] &= ~0x00000002u;
  }
  serialized_model_dir_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), serialized_model_dir,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (serialized_model_dir_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    serialized_model_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.serialized_model_dir)
}

// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior cache_writing_behavior = 10 [default = WRITE_OR_ERROR];
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_cache_writing_behavior() const {
  bool value = (_has_bits_[0] & 0x00000020u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_cache_writing_behavior() const {
  return _internal_has_cache_writing_behavior();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_cache_writing_behavior() {
  cache_writing_behavior_ = 2;
  _has_bits_[0] &= ~0x00000020u;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::_internal_cache_writing_behavior() const {
  return static_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior >(cache_writing_behavior_);
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior InferenceCalculatorOptions_Delegate_Gpu::cache_writing_behavior() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cache_writing_behavior)
  return _internal_cache_writing_behavior();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_cache_writing_behavior(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value) {
  assert(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_IsValid(value));
  _has_bits_[0] |= 0x00000020u;
  cache_writing_behavior_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_cache_writing_behavior(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value) {
  _internal_set_cache_writing_behavior(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.cache_writing_behavior)
}

// optional string model_token = 8;
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_model_token() const {
  bool value = (_has_bits_[0] & 0x00000004u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_model_token() const {
  return _internal_has_model_token();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_model_token() {
  model_token_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000004u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::model_token() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token)
  return _internal_model_token();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Gpu::set_model_token(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000004u;
 model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token)
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::mutable_model_token() {
  std::string* _s = _internal_mutable_model_token();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Gpu::_internal_model_token() const {
  return model_token_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_model_token(const std::string& value) {
  _has_bits_[0] |= 0x00000004u;
  model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::_internal_mutable_model_token() {
  _has_bits_[0] |= 0x00000004u;
  return model_token_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Gpu::release_model_token() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token)
  if (!_internal_has_model_token()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000004u;
  auto* p = model_token_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_token_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_allocated_model_token(std::string* model_token) {
  if (model_token != nullptr) {
    _has_bits_[0] |= 0x00000004u;
  } else {
    _has_bits_[0] &= ~0x00000004u;
  }
  model_token_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), model_token,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_token_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.model_token)
}

// optional .mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage usage = 5 [default = SUSTAINED_SPEED];
inline bool InferenceCalculatorOptions_Delegate_Gpu::_internal_has_usage() const {
  bool value = (_has_bits_[0] & 0x00000080u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Gpu::has_usage() const {
  return _internal_has_usage();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::clear_usage() {
  usage_ = 2;
  _has_bits_[0] &= ~0x00000080u;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::_internal_usage() const {
  return static_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage >(usage_);
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceCalculatorOptions_Delegate_Gpu::usage() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.usage)
  return _internal_usage();
}
inline void InferenceCalculatorOptions_Delegate_Gpu::_internal_set_usage(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value) {
  assert(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_IsValid(value));
  _has_bits_[0] |= 0x00000080u;
  usage_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Gpu::set_usage(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value) {
  _internal_set_usage(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Gpu.usage)
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_Delegate_Nnapi

// optional string cache_dir = 1;
inline bool InferenceCalculatorOptions_Delegate_Nnapi::_internal_has_cache_dir() const {
  bool value = (_has_bits_[0] & 0x00000001u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Nnapi::has_cache_dir() const {
  return _internal_has_cache_dir();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::clear_cache_dir() {
  cache_dir_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000001u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::cache_dir() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir)
  return _internal_cache_dir();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Nnapi::set_cache_dir(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000001u;
 cache_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir)
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::mutable_cache_dir() {
  std::string* _s = _internal_mutable_cache_dir();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::_internal_cache_dir() const {
  return cache_dir_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::_internal_set_cache_dir(const std::string& value) {
  _has_bits_[0] |= 0x00000001u;
  cache_dir_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::_internal_mutable_cache_dir() {
  _has_bits_[0] |= 0x00000001u;
  return cache_dir_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::release_cache_dir() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir)
  if (!_internal_has_cache_dir()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000001u;
  auto* p = cache_dir_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (cache_dir_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    cache_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::set_allocated_cache_dir(std::string* cache_dir) {
  if (cache_dir != nullptr) {
    _has_bits_[0] |= 0x00000001u;
  } else {
    _has_bits_[0] &= ~0x00000001u;
  }
  cache_dir_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), cache_dir,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (cache_dir_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    cache_dir_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.cache_dir)
}

// optional string model_token = 2;
inline bool InferenceCalculatorOptions_Delegate_Nnapi::_internal_has_model_token() const {
  bool value = (_has_bits_[0] & 0x00000002u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Nnapi::has_model_token() const {
  return _internal_has_model_token();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::clear_model_token() {
  model_token_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000002u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::model_token() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token)
  return _internal_model_token();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Nnapi::set_model_token(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000002u;
 model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token)
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::mutable_model_token() {
  std::string* _s = _internal_mutable_model_token();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::_internal_model_token() const {
  return model_token_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::_internal_set_model_token(const std::string& value) {
  _has_bits_[0] |= 0x00000002u;
  model_token_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::_internal_mutable_model_token() {
  _has_bits_[0] |= 0x00000002u;
  return model_token_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::release_model_token() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token)
  if (!_internal_has_model_token()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000002u;
  auto* p = model_token_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_token_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::set_allocated_model_token(std::string* model_token) {
  if (model_token != nullptr) {
    _has_bits_[0] |= 0x00000002u;
  } else {
    _has_bits_[0] &= ~0x00000002u;
  }
  model_token_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), model_token,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_token_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_token_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.model_token)
}

// optional string accelerator_name = 3;
inline bool InferenceCalculatorOptions_Delegate_Nnapi::_internal_has_accelerator_name() const {
  bool value = (_has_bits_[0] & 0x00000004u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Nnapi::has_accelerator_name() const {
  return _internal_has_accelerator_name();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::clear_accelerator_name() {
  accelerator_name_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000004u;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::accelerator_name() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name)
  return _internal_accelerator_name();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_Delegate_Nnapi::set_accelerator_name(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000004u;
 accelerator_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name)
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::mutable_accelerator_name() {
  std::string* _s = _internal_mutable_accelerator_name();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_Delegate_Nnapi::_internal_accelerator_name() const {
  return accelerator_name_.Get();
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::_internal_set_accelerator_name(const std::string& value) {
  _has_bits_[0] |= 0x00000004u;
  accelerator_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::_internal_mutable_accelerator_name() {
  _has_bits_[0] |= 0x00000004u;
  return accelerator_name_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_Delegate_Nnapi::release_accelerator_name() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name)
  if (!_internal_has_accelerator_name()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000004u;
  auto* p = accelerator_name_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (accelerator_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    accelerator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_Delegate_Nnapi::set_allocated_accelerator_name(std::string* accelerator_name) {
  if (accelerator_name != nullptr) {
    _has_bits_[0] |= 0x00000004u;
  } else {
    _has_bits_[0] &= ~0x00000004u;
  }
  accelerator_name_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), accelerator_name,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (accelerator_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    accelerator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi.accelerator_name)
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_Delegate_Xnnpack

// optional int32 num_threads = 1 [default = -1];
inline bool InferenceCalculatorOptions_Delegate_Xnnpack::_internal_has_num_threads() const {
  bool value = (_has_bits_[0] & 0x00000001u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_Delegate_Xnnpack::has_num_threads() const {
  return _internal_has_num_threads();
}
inline void InferenceCalculatorOptions_Delegate_Xnnpack::clear_num_threads() {
  num_threads_ = -1;
  _has_bits_[0] &= ~0x00000001u;
}
inline int32_t InferenceCalculatorOptions_Delegate_Xnnpack::_internal_num_threads() const {
  return num_threads_;
}
inline int32_t InferenceCalculatorOptions_Delegate_Xnnpack::num_threads() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack.num_threads)
  return _internal_num_threads();
}
inline void InferenceCalculatorOptions_Delegate_Xnnpack::_internal_set_num_threads(int32_t value) {
  _has_bits_[0] |= 0x00000001u;
  num_threads_ = value;
}
inline void InferenceCalculatorOptions_Delegate_Xnnpack::set_num_threads(int32_t value) {
  _internal_set_num_threads(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack.num_threads)
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_Delegate

// .mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 1;
inline bool InferenceCalculatorOptions_Delegate::_internal_has_tflite() const {
  return delegate_case() == kTflite;
}
inline bool InferenceCalculatorOptions_Delegate::has_tflite() const {
  return _internal_has_tflite();
}
inline void InferenceCalculatorOptions_Delegate::set_has_tflite() {
  _oneof_case_[0] = kTflite;
}
inline void InferenceCalculatorOptions_Delegate::clear_tflite() {
  if (_internal_has_tflite()) {
    if (GetArenaForAllocation() == nullptr) {
      delete delegate_.tflite_;
    }
    clear_has_delegate();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* InferenceCalculatorOptions_Delegate::release_tflite() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
  if (_internal_has_tflite()) {
    clear_has_delegate();
      ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* temp = delegate_.tflite_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    delegate_.tflite_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite& InferenceCalculatorOptions_Delegate::_internal_tflite() const {
  return _internal_has_tflite()
      ? *delegate_.tflite_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite&>(::mediapipe::_InferenceCalculatorOptions_Delegate_TfLite_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite& InferenceCalculatorOptions_Delegate::tflite() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
  return _internal_tflite();
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* InferenceCalculatorOptions_Delegate::unsafe_arena_release_tflite() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
  if (_internal_has_tflite()) {
    clear_has_delegate();
    ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* temp = delegate_.tflite_;
    delegate_.tflite_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_Delegate::unsafe_arena_set_allocated_tflite(::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* tflite) {
  clear_delegate();
  if (tflite) {
    set_has_tflite();
    delegate_.tflite_ = tflite;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* InferenceCalculatorOptions_Delegate::_internal_mutable_tflite() {
  if (!_internal_has_tflite()) {
    clear_delegate();
    set_has_tflite();
    delegate_.tflite_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite >(GetArenaForAllocation());
  }
  return delegate_.tflite_;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* InferenceCalculatorOptions_Delegate::mutable_tflite() {
  ::mediapipe::InferenceCalculatorOptions_Delegate_TfLite* _msg = _internal_mutable_tflite();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.tflite)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
inline bool InferenceCalculatorOptions_Delegate::_internal_has_gpu() const {
  return delegate_case() == kGpu;
}
inline bool InferenceCalculatorOptions_Delegate::has_gpu() const {
  return _internal_has_gpu();
}
inline void InferenceCalculatorOptions_Delegate::set_has_gpu() {
  _oneof_case_[0] = kGpu;
}
inline void InferenceCalculatorOptions_Delegate::clear_gpu() {
  if (_internal_has_gpu()) {
    if (GetArenaForAllocation() == nullptr) {
      delete delegate_.gpu_;
    }
    clear_has_delegate();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* InferenceCalculatorOptions_Delegate::release_gpu() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
  if (_internal_has_gpu()) {
    clear_has_delegate();
      ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* temp = delegate_.gpu_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    delegate_.gpu_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu& InferenceCalculatorOptions_Delegate::_internal_gpu() const {
  return _internal_has_gpu()
      ? *delegate_.gpu_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu&>(::mediapipe::_InferenceCalculatorOptions_Delegate_Gpu_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu& InferenceCalculatorOptions_Delegate::gpu() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
  return _internal_gpu();
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* InferenceCalculatorOptions_Delegate::unsafe_arena_release_gpu() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
  if (_internal_has_gpu()) {
    clear_has_delegate();
    ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* temp = delegate_.gpu_;
    delegate_.gpu_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_Delegate::unsafe_arena_set_allocated_gpu(::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* gpu) {
  clear_delegate();
  if (gpu) {
    set_has_gpu();
    delegate_.gpu_ = gpu;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* InferenceCalculatorOptions_Delegate::_internal_mutable_gpu() {
  if (!_internal_has_gpu()) {
    clear_delegate();
    set_has_gpu();
    delegate_.gpu_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu >(GetArenaForAllocation());
  }
  return delegate_.gpu_;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* InferenceCalculatorOptions_Delegate::mutable_gpu() {
  ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu* _msg = _internal_mutable_gpu();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.gpu)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 3;
inline bool InferenceCalculatorOptions_Delegate::_internal_has_nnapi() const {
  return delegate_case() == kNnapi;
}
inline bool InferenceCalculatorOptions_Delegate::has_nnapi() const {
  return _internal_has_nnapi();
}
inline void InferenceCalculatorOptions_Delegate::set_has_nnapi() {
  _oneof_case_[0] = kNnapi;
}
inline void InferenceCalculatorOptions_Delegate::clear_nnapi() {
  if (_internal_has_nnapi()) {
    if (GetArenaForAllocation() == nullptr) {
      delete delegate_.nnapi_;
    }
    clear_has_delegate();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* InferenceCalculatorOptions_Delegate::release_nnapi() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
  if (_internal_has_nnapi()) {
    clear_has_delegate();
      ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* temp = delegate_.nnapi_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    delegate_.nnapi_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi& InferenceCalculatorOptions_Delegate::_internal_nnapi() const {
  return _internal_has_nnapi()
      ? *delegate_.nnapi_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi&>(::mediapipe::_InferenceCalculatorOptions_Delegate_Nnapi_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi& InferenceCalculatorOptions_Delegate::nnapi() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
  return _internal_nnapi();
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* InferenceCalculatorOptions_Delegate::unsafe_arena_release_nnapi() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
  if (_internal_has_nnapi()) {
    clear_has_delegate();
    ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* temp = delegate_.nnapi_;
    delegate_.nnapi_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_Delegate::unsafe_arena_set_allocated_nnapi(::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* nnapi) {
  clear_delegate();
  if (nnapi) {
    set_has_nnapi();
    delegate_.nnapi_ = nnapi;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* InferenceCalculatorOptions_Delegate::_internal_mutable_nnapi() {
  if (!_internal_has_nnapi()) {
    clear_delegate();
    set_has_nnapi();
    delegate_.nnapi_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi >(GetArenaForAllocation());
  }
  return delegate_.nnapi_;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* InferenceCalculatorOptions_Delegate::mutable_nnapi() {
  ::mediapipe::InferenceCalculatorOptions_Delegate_Nnapi* _msg = _internal_mutable_nnapi();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.nnapi)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 4;
inline bool InferenceCalculatorOptions_Delegate::_internal_has_xnnpack() const {
  return delegate_case() == kXnnpack;
}
inline bool InferenceCalculatorOptions_Delegate::has_xnnpack() const {
  return _internal_has_xnnpack();
}
inline void InferenceCalculatorOptions_Delegate::set_has_xnnpack() {
  _oneof_case_[0] = kXnnpack;
}
inline void InferenceCalculatorOptions_Delegate::clear_xnnpack() {
  if (_internal_has_xnnpack()) {
    if (GetArenaForAllocation() == nullptr) {
      delete delegate_.xnnpack_;
    }
    clear_has_delegate();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* InferenceCalculatorOptions_Delegate::release_xnnpack() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
  if (_internal_has_xnnpack()) {
    clear_has_delegate();
      ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* temp = delegate_.xnnpack_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    delegate_.xnnpack_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack& InferenceCalculatorOptions_Delegate::_internal_xnnpack() const {
  return _internal_has_xnnpack()
      ? *delegate_.xnnpack_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack&>(::mediapipe::_InferenceCalculatorOptions_Delegate_Xnnpack_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack& InferenceCalculatorOptions_Delegate::xnnpack() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
  return _internal_xnnpack();
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* InferenceCalculatorOptions_Delegate::unsafe_arena_release_xnnpack() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
  if (_internal_has_xnnpack()) {
    clear_has_delegate();
    ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* temp = delegate_.xnnpack_;
    delegate_.xnnpack_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_Delegate::unsafe_arena_set_allocated_xnnpack(::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* xnnpack) {
  clear_delegate();
  if (xnnpack) {
    set_has_xnnpack();
    delegate_.xnnpack_ = xnnpack;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* InferenceCalculatorOptions_Delegate::_internal_mutable_xnnpack() {
  if (!_internal_has_xnnpack()) {
    clear_delegate();
    set_has_xnnpack();
    delegate_.xnnpack_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack >(GetArenaForAllocation());
  }
  return delegate_.xnnpack_;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* InferenceCalculatorOptions_Delegate::mutable_xnnpack() {
  ::mediapipe::InferenceCalculatorOptions_Delegate_Xnnpack* _msg = _internal_mutable_xnnpack();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.Delegate.xnnpack)
  return _msg;
}

inline bool InferenceCalculatorOptions_Delegate::has_delegate() const {
  return delegate_case() != DELEGATE_NOT_SET;
}
inline void InferenceCalculatorOptions_Delegate::clear_has_delegate() {
  _oneof_case_[0] = DELEGATE_NOT_SET;
}
inline InferenceCalculatorOptions_Delegate::DelegateCase InferenceCalculatorOptions_Delegate::delegate_case() const {
  return InferenceCalculatorOptions_Delegate::DelegateCase(_oneof_case_[0]);
}
// -------------------------------------------------------------------

// InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap

// repeated int32 model_tensor_indices = 1 [packed = true];
inline int InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_internal_model_tensor_indices_size() const {
  return model_tensor_indices_.size();
}
inline int InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::model_tensor_indices_size() const {
  return _internal_model_tensor_indices_size();
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::clear_model_tensor_indices() {
  model_tensor_indices_.Clear();
}
inline int32_t InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_internal_model_tensor_indices(int index) const {
  return model_tensor_indices_.Get(index);
}
inline int32_t InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::model_tensor_indices(int index) const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap.model_tensor_indices)
  return _internal_model_tensor_indices(index);
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::set_model_tensor_indices(int index, int32_t value) {
  model_tensor_indices_.Set(index, value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap.model_tensor_indices)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_internal_add_model_tensor_indices(int32_t value) {
  model_tensor_indices_.Add(value);
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::add_model_tensor_indices(int32_t value) {
  _internal_add_model_tensor_indices(value);
  // @@protoc_insertion_point(field_add:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap.model_tensor_indices)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >&
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_internal_model_tensor_indices() const {
  return model_tensor_indices_;
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >&
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::model_tensor_indices() const {
  // @@protoc_insertion_point(field_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap.model_tensor_indices)
  return _internal_model_tensor_indices();
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >*
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::_internal_mutable_model_tensor_indices() {
  return &model_tensor_indices_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t >*
InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap::mutable_model_tensor_indices() {
  // @@protoc_insertion_point(field_mutable_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap.model_tensor_indices)
  return _internal_mutable_model_tensor_indices();
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap

// repeated string tensor_names = 1;
inline int InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_internal_tensor_names_size() const {
  return tensor_names_.size();
}
inline int InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::tensor_names_size() const {
  return _internal_tensor_names_size();
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::clear_tensor_names() {
  tensor_names_.Clear();
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::add_tensor_names() {
  std::string* _s = _internal_add_tensor_names();
  // @@protoc_insertion_point(field_add_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_internal_tensor_names(int index) const {
  return tensor_names_.Get(index);
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::tensor_names(int index) const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
  return _internal_tensor_names(index);
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::mutable_tensor_names(int index) {
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
  return tensor_names_.Mutable(index);
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::set_tensor_names(int index, const std::string& value) {
  tensor_names_.Mutable(index)->assign(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::set_tensor_names(int index, std::string&& value) {
  tensor_names_.Mutable(index)->assign(std::move(value));
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::set_tensor_names(int index, const char* value) {
  GOOGLE_DCHECK(value != nullptr);
  tensor_names_.Mutable(index)->assign(value);
  // @@protoc_insertion_point(field_set_char:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::set_tensor_names(int index, const char* value, size_t size) {
  tensor_names_.Mutable(index)->assign(
    reinterpret_cast<const char*>(value), size);
  // @@protoc_insertion_point(field_set_pointer:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::_internal_add_tensor_names() {
  return tensor_names_.Add();
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::add_tensor_names(const std::string& value) {
  tensor_names_.Add()->assign(value);
  // @@protoc_insertion_point(field_add:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::add_tensor_names(std::string&& value) {
  tensor_names_.Add(std::move(value));
  // @@protoc_insertion_point(field_add:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::add_tensor_names(const char* value) {
  GOOGLE_DCHECK(value != nullptr);
  tensor_names_.Add()->assign(value);
  // @@protoc_insertion_point(field_add_char:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline void InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::add_tensor_names(const char* value, size_t size) {
  tensor_names_.Add()->assign(reinterpret_cast<const char*>(value), size);
  // @@protoc_insertion_point(field_add_pointer:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::tensor_names() const {
  // @@protoc_insertion_point(field_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
  return tensor_names_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap::mutable_tensor_names() {
  // @@protoc_insertion_point(field_mutable_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap.tensor_names)
  return &tensor_names_;
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink

// optional string from_output_tensor_name = 1;
inline bool InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_has_from_output_tensor_name() const {
  bool value = (_has_bits_[0] & 0x00000001u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::has_from_output_tensor_name() const {
  return _internal_has_from_output_tensor_name();
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::clear_from_output_tensor_name() {
  from_output_tensor_name_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000001u;
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::from_output_tensor_name() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name)
  return _internal_from_output_tensor_name();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::set_from_output_tensor_name(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000001u;
 from_output_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name)
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::mutable_from_output_tensor_name() {
  std::string* _s = _internal_mutable_from_output_tensor_name();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_from_output_tensor_name() const {
  return from_output_tensor_name_.Get();
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_set_from_output_tensor_name(const std::string& value) {
  _has_bits_[0] |= 0x00000001u;
  from_output_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_mutable_from_output_tensor_name() {
  _has_bits_[0] |= 0x00000001u;
  return from_output_tensor_name_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::release_from_output_tensor_name() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name)
  if (!_internal_has_from_output_tensor_name()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000001u;
  auto* p = from_output_tensor_name_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (from_output_tensor_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    from_output_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::set_allocated_from_output_tensor_name(std::string* from_output_tensor_name) {
  if (from_output_tensor_name != nullptr) {
    _has_bits_[0] |= 0x00000001u;
  } else {
    _has_bits_[0] &= ~0x00000001u;
  }
  from_output_tensor_name_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from_output_tensor_name,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (from_output_tensor_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    from_output_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.from_output_tensor_name)
}

// optional string to_input_tensor_name = 2;
inline bool InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_has_to_input_tensor_name() const {
  bool value = (_has_bits_[0] & 0x00000002u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::has_to_input_tensor_name() const {
  return _internal_has_to_input_tensor_name();
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::clear_to_input_tensor_name() {
  to_input_tensor_name_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000002u;
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::to_input_tensor_name() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name)
  return _internal_to_input_tensor_name();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::set_to_input_tensor_name(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000002u;
 to_input_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name)
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::mutable_to_input_tensor_name() {
  std::string* _s = _internal_mutable_to_input_tensor_name();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name)
  return _s;
}
inline const std::string& InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_to_input_tensor_name() const {
  return to_input_tensor_name_.Get();
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_set_to_input_tensor_name(const std::string& value) {
  _has_bits_[0] |= 0x00000002u;
  to_input_tensor_name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::_internal_mutable_to_input_tensor_name() {
  _has_bits_[0] |= 0x00000002u;
  return to_input_tensor_name_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::release_to_input_tensor_name() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name)
  if (!_internal_has_to_input_tensor_name()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000002u;
  auto* p = to_input_tensor_name_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (to_input_tensor_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    to_input_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink::set_allocated_to_input_tensor_name(std::string* to_input_tensor_name) {
  if (to_input_tensor_name != nullptr) {
    _has_bits_[0] |= 0x00000002u;
  } else {
    _has_bits_[0] &= ~0x00000002u;
  }
  to_input_tensor_name_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), to_input_tensor_name,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (to_input_tensor_name_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    to_input_tensor_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink.to_input_tensor_name)
}

// -------------------------------------------------------------------

// InferenceCalculatorOptions_InputOutputConfig

// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap input_tensor_indices_map = 1;
inline bool InferenceCalculatorOptions_InputOutputConfig::_internal_has_input_tensor_indices_map() const {
  return InputTensorMap_case() == kInputTensorIndicesMap;
}
inline bool InferenceCalculatorOptions_InputOutputConfig::has_input_tensor_indices_map() const {
  return _internal_has_input_tensor_indices_map();
}
inline void InferenceCalculatorOptions_InputOutputConfig::set_has_input_tensor_indices_map() {
  _oneof_case_[0] = kInputTensorIndicesMap;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_input_tensor_indices_map() {
  if (_internal_has_input_tensor_indices_map()) {
    if (GetArenaForAllocation() == nullptr) {
      delete InputTensorMap_.input_tensor_indices_map_;
    }
    clear_has_InputTensorMap();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::release_input_tensor_indices_map() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
  if (_internal_has_input_tensor_indices_map()) {
    clear_has_InputTensorMap();
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* temp = InputTensorMap_.input_tensor_indices_map_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    InputTensorMap_.input_tensor_indices_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& InferenceCalculatorOptions_InputOutputConfig::_internal_input_tensor_indices_map() const {
  return _internal_has_input_tensor_indices_map()
      ? *InputTensorMap_.input_tensor_indices_map_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&>(::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& InferenceCalculatorOptions_InputOutputConfig::input_tensor_indices_map() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
  return _internal_input_tensor_indices_map();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_release_input_tensor_indices_map() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
  if (_internal_has_input_tensor_indices_map()) {
    clear_has_InputTensorMap();
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* temp = InputTensorMap_.input_tensor_indices_map_;
    InputTensorMap_.input_tensor_indices_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_set_allocated_input_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* input_tensor_indices_map) {
  clear_InputTensorMap();
  if (input_tensor_indices_map) {
    set_has_input_tensor_indices_map();
    InputTensorMap_.input_tensor_indices_map_ = input_tensor_indices_map;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::_internal_mutable_input_tensor_indices_map() {
  if (!_internal_has_input_tensor_indices_map()) {
    clear_InputTensorMap();
    set_has_input_tensor_indices_map();
    InputTensorMap_.input_tensor_indices_map_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap >(GetArenaForAllocation());
  }
  return InputTensorMap_.input_tensor_indices_map_;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::mutable_input_tensor_indices_map() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* _msg = _internal_mutable_input_tensor_indices_map();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_indices_map)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap input_tensor_names_map = 3;
inline bool InferenceCalculatorOptions_InputOutputConfig::_internal_has_input_tensor_names_map() const {
  return InputTensorMap_case() == kInputTensorNamesMap;
}
inline bool InferenceCalculatorOptions_InputOutputConfig::has_input_tensor_names_map() const {
  return _internal_has_input_tensor_names_map();
}
inline void InferenceCalculatorOptions_InputOutputConfig::set_has_input_tensor_names_map() {
  _oneof_case_[0] = kInputTensorNamesMap;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_input_tensor_names_map() {
  if (_internal_has_input_tensor_names_map()) {
    if (GetArenaForAllocation() == nullptr) {
      delete InputTensorMap_.input_tensor_names_map_;
    }
    clear_has_InputTensorMap();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::release_input_tensor_names_map() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
  if (_internal_has_input_tensor_names_map()) {
    clear_has_InputTensorMap();
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* temp = InputTensorMap_.input_tensor_names_map_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    InputTensorMap_.input_tensor_names_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& InferenceCalculatorOptions_InputOutputConfig::_internal_input_tensor_names_map() const {
  return _internal_has_input_tensor_names_map()
      ? *InputTensorMap_.input_tensor_names_map_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&>(::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& InferenceCalculatorOptions_InputOutputConfig::input_tensor_names_map() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
  return _internal_input_tensor_names_map();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_release_input_tensor_names_map() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
  if (_internal_has_input_tensor_names_map()) {
    clear_has_InputTensorMap();
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* temp = InputTensorMap_.input_tensor_names_map_;
    InputTensorMap_.input_tensor_names_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_set_allocated_input_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* input_tensor_names_map) {
  clear_InputTensorMap();
  if (input_tensor_names_map) {
    set_has_input_tensor_names_map();
    InputTensorMap_.input_tensor_names_map_ = input_tensor_names_map;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::_internal_mutable_input_tensor_names_map() {
  if (!_internal_has_input_tensor_names_map()) {
    clear_InputTensorMap();
    set_has_input_tensor_names_map();
    InputTensorMap_.input_tensor_names_map_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap >(GetArenaForAllocation());
  }
  return InputTensorMap_.input_tensor_names_map_;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::mutable_input_tensor_names_map() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* _msg = _internal_mutable_input_tensor_names_map();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.input_tensor_names_map)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap output_tensor_indices_map = 2;
inline bool InferenceCalculatorOptions_InputOutputConfig::_internal_has_output_tensor_indices_map() const {
  return OutputTensorMap_case() == kOutputTensorIndicesMap;
}
inline bool InferenceCalculatorOptions_InputOutputConfig::has_output_tensor_indices_map() const {
  return _internal_has_output_tensor_indices_map();
}
inline void InferenceCalculatorOptions_InputOutputConfig::set_has_output_tensor_indices_map() {
  _oneof_case_[1] = kOutputTensorIndicesMap;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_output_tensor_indices_map() {
  if (_internal_has_output_tensor_indices_map()) {
    if (GetArenaForAllocation() == nullptr) {
      delete OutputTensorMap_.output_tensor_indices_map_;
    }
    clear_has_OutputTensorMap();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::release_output_tensor_indices_map() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
  if (_internal_has_output_tensor_indices_map()) {
    clear_has_OutputTensorMap();
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* temp = OutputTensorMap_.output_tensor_indices_map_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    OutputTensorMap_.output_tensor_indices_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& InferenceCalculatorOptions_InputOutputConfig::_internal_output_tensor_indices_map() const {
  return _internal_has_output_tensor_indices_map()
      ? *OutputTensorMap_.output_tensor_indices_map_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap&>(::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap& InferenceCalculatorOptions_InputOutputConfig::output_tensor_indices_map() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
  return _internal_output_tensor_indices_map();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_release_output_tensor_indices_map() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
  if (_internal_has_output_tensor_indices_map()) {
    clear_has_OutputTensorMap();
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* temp = OutputTensorMap_.output_tensor_indices_map_;
    OutputTensorMap_.output_tensor_indices_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_set_allocated_output_tensor_indices_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* output_tensor_indices_map) {
  clear_OutputTensorMap();
  if (output_tensor_indices_map) {
    set_has_output_tensor_indices_map();
    OutputTensorMap_.output_tensor_indices_map_ = output_tensor_indices_map;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::_internal_mutable_output_tensor_indices_map() {
  if (!_internal_has_output_tensor_indices_map()) {
    clear_OutputTensorMap();
    set_has_output_tensor_indices_map();
    OutputTensorMap_.output_tensor_indices_map_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap >(GetArenaForAllocation());
  }
  return OutputTensorMap_.output_tensor_indices_map_;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* InferenceCalculatorOptions_InputOutputConfig::mutable_output_tensor_indices_map() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorIndicesMap* _msg = _internal_mutable_output_tensor_indices_map();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_indices_map)
  return _msg;
}

// .mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap output_tensor_names_map = 4;
inline bool InferenceCalculatorOptions_InputOutputConfig::_internal_has_output_tensor_names_map() const {
  return OutputTensorMap_case() == kOutputTensorNamesMap;
}
inline bool InferenceCalculatorOptions_InputOutputConfig::has_output_tensor_names_map() const {
  return _internal_has_output_tensor_names_map();
}
inline void InferenceCalculatorOptions_InputOutputConfig::set_has_output_tensor_names_map() {
  _oneof_case_[1] = kOutputTensorNamesMap;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_output_tensor_names_map() {
  if (_internal_has_output_tensor_names_map()) {
    if (GetArenaForAllocation() == nullptr) {
      delete OutputTensorMap_.output_tensor_names_map_;
    }
    clear_has_OutputTensorMap();
  }
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::release_output_tensor_names_map() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
  if (_internal_has_output_tensor_names_map()) {
    clear_has_OutputTensorMap();
      ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* temp = OutputTensorMap_.output_tensor_names_map_;
    if (GetArenaForAllocation() != nullptr) {
      temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
    }
    OutputTensorMap_.output_tensor_names_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& InferenceCalculatorOptions_InputOutputConfig::_internal_output_tensor_names_map() const {
  return _internal_has_output_tensor_names_map()
      ? *OutputTensorMap_.output_tensor_names_map_
      : reinterpret_cast< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap&>(::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap& InferenceCalculatorOptions_InputOutputConfig::output_tensor_names_map() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
  return _internal_output_tensor_names_map();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_release_output_tensor_names_map() {
  // @@protoc_insertion_point(field_unsafe_arena_release:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
  if (_internal_has_output_tensor_names_map()) {
    clear_has_OutputTensorMap();
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* temp = OutputTensorMap_.output_tensor_names_map_;
    OutputTensorMap_.output_tensor_names_map_ = nullptr;
    return temp;
  } else {
    return nullptr;
  }
}
inline void InferenceCalculatorOptions_InputOutputConfig::unsafe_arena_set_allocated_output_tensor_names_map(::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* output_tensor_names_map) {
  clear_OutputTensorMap();
  if (output_tensor_names_map) {
    set_has_output_tensor_names_map();
    OutputTensorMap_.output_tensor_names_map_ = output_tensor_names_map;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::_internal_mutable_output_tensor_names_map() {
  if (!_internal_has_output_tensor_names_map()) {
    clear_OutputTensorMap();
    set_has_output_tensor_names_map();
    OutputTensorMap_.output_tensor_names_map_ = CreateMaybeMessage< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap >(GetArenaForAllocation());
  }
  return OutputTensorMap_.output_tensor_names_map_;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* InferenceCalculatorOptions_InputOutputConfig::mutable_output_tensor_names_map() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_TensorNamesMap* _msg = _internal_mutable_output_tensor_names_map();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.output_tensor_names_map)
  return _msg;
}

// repeated .mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink feedback_tensor_links = 5;
inline int InferenceCalculatorOptions_InputOutputConfig::_internal_feedback_tensor_links_size() const {
  return feedback_tensor_links_.size();
}
inline int InferenceCalculatorOptions_InputOutputConfig::feedback_tensor_links_size() const {
  return _internal_feedback_tensor_links_size();
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_feedback_tensor_links() {
  feedback_tensor_links_.Clear();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* InferenceCalculatorOptions_InputOutputConfig::mutable_feedback_tensor_links(int index) {
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.InputOutputConfig.feedback_tensor_links)
  return feedback_tensor_links_.Mutable(index);
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >*
InferenceCalculatorOptions_InputOutputConfig::mutable_feedback_tensor_links() {
  // @@protoc_insertion_point(field_mutable_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.feedback_tensor_links)
  return &feedback_tensor_links_;
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& InferenceCalculatorOptions_InputOutputConfig::_internal_feedback_tensor_links(int index) const {
  return feedback_tensor_links_.Get(index);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink& InferenceCalculatorOptions_InputOutputConfig::feedback_tensor_links(int index) const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.InputOutputConfig.feedback_tensor_links)
  return _internal_feedback_tensor_links(index);
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* InferenceCalculatorOptions_InputOutputConfig::_internal_add_feedback_tensor_links() {
  return feedback_tensor_links_.Add();
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* InferenceCalculatorOptions_InputOutputConfig::add_feedback_tensor_links() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink* _add = _internal_add_feedback_tensor_links();
  // @@protoc_insertion_point(field_add:mediapipe.InferenceCalculatorOptions.InputOutputConfig.feedback_tensor_links)
  return _add;
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::mediapipe::InferenceCalculatorOptions_InputOutputConfig_FeedbackTensorLink >&
InferenceCalculatorOptions_InputOutputConfig::feedback_tensor_links() const {
  // @@protoc_insertion_point(field_list:mediapipe.InferenceCalculatorOptions.InputOutputConfig.feedback_tensor_links)
  return feedback_tensor_links_;
}

inline bool InferenceCalculatorOptions_InputOutputConfig::has_InputTensorMap() const {
  return InputTensorMap_case() != INPUTTENSORMAP_NOT_SET;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_has_InputTensorMap() {
  _oneof_case_[0] = INPUTTENSORMAP_NOT_SET;
}
inline bool InferenceCalculatorOptions_InputOutputConfig::has_OutputTensorMap() const {
  return OutputTensorMap_case() != OUTPUTTENSORMAP_NOT_SET;
}
inline void InferenceCalculatorOptions_InputOutputConfig::clear_has_OutputTensorMap() {
  _oneof_case_[1] = OUTPUTTENSORMAP_NOT_SET;
}
inline InferenceCalculatorOptions_InputOutputConfig::InputTensorMapCase InferenceCalculatorOptions_InputOutputConfig::InputTensorMap_case() const {
  return InferenceCalculatorOptions_InputOutputConfig::InputTensorMapCase(_oneof_case_[0]);
}
inline InferenceCalculatorOptions_InputOutputConfig::OutputTensorMapCase InferenceCalculatorOptions_InputOutputConfig::OutputTensorMap_case() const {
  return InferenceCalculatorOptions_InputOutputConfig::OutputTensorMapCase(_oneof_case_[1]);
}
// -------------------------------------------------------------------

// InferenceCalculatorOptions

// optional string model_path = 1;
inline bool InferenceCalculatorOptions::_internal_has_model_path() const {
  bool value = (_has_bits_[0] & 0x00000001u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions::has_model_path() const {
  return _internal_has_model_path();
}
inline void InferenceCalculatorOptions::clear_model_path() {
  model_path_.ClearToEmpty();
  _has_bits_[0] &= ~0x00000001u;
}
inline const std::string& InferenceCalculatorOptions::model_path() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.model_path)
  return _internal_model_path();
}
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void InferenceCalculatorOptions::set_model_path(ArgT0&& arg0, ArgT... args) {
 _has_bits_[0] |= 0x00000001u;
 model_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.model_path)
}
inline std::string* InferenceCalculatorOptions::mutable_model_path() {
  std::string* _s = _internal_mutable_model_path();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.model_path)
  return _s;
}
inline const std::string& InferenceCalculatorOptions::_internal_model_path() const {
  return model_path_.Get();
}
inline void InferenceCalculatorOptions::_internal_set_model_path(const std::string& value) {
  _has_bits_[0] |= 0x00000001u;
  model_path_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions::_internal_mutable_model_path() {
  _has_bits_[0] |= 0x00000001u;
  return model_path_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation());
}
inline std::string* InferenceCalculatorOptions::release_model_path() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.model_path)
  if (!_internal_has_model_path()) {
    return nullptr;
  }
  _has_bits_[0] &= ~0x00000001u;
  auto* p = model_path_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_path_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  return p;
}
inline void InferenceCalculatorOptions::set_allocated_model_path(std::string* model_path) {
  if (model_path != nullptr) {
    _has_bits_[0] |= 0x00000001u;
  } else {
    _has_bits_[0] &= ~0x00000001u;
  }
  model_path_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), model_path,
      GetArenaForAllocation());
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
  if (model_path_.IsDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited())) {
    model_path_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), "", GetArenaForAllocation());
  }
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.model_path)
}

// optional bool try_mmap_model = 7;
inline bool InferenceCalculatorOptions::_internal_has_try_mmap_model() const {
  bool value = (_has_bits_[0] & 0x00000008u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions::has_try_mmap_model() const {
  return _internal_has_try_mmap_model();
}
inline void InferenceCalculatorOptions::clear_try_mmap_model() {
  try_mmap_model_ = false;
  _has_bits_[0] &= ~0x00000008u;
}
inline bool InferenceCalculatorOptions::_internal_try_mmap_model() const {
  return try_mmap_model_;
}
inline bool InferenceCalculatorOptions::try_mmap_model() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.try_mmap_model)
  return _internal_try_mmap_model();
}
inline void InferenceCalculatorOptions::_internal_set_try_mmap_model(bool value) {
  _has_bits_[0] |= 0x00000008u;
  try_mmap_model_ = value;
}
inline void InferenceCalculatorOptions::set_try_mmap_model(bool value) {
  _internal_set_try_mmap_model(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.try_mmap_model)
}

// optional bool use_gpu = 2 [default = false, deprecated = true];
inline bool InferenceCalculatorOptions::_internal_has_use_gpu() const {
  bool value = (_has_bits_[0] & 0x00000010u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions::has_use_gpu() const {
  return _internal_has_use_gpu();
}
inline void InferenceCalculatorOptions::clear_use_gpu() {
  use_gpu_ = false;
  _has_bits_[0] &= ~0x00000010u;
}
inline bool InferenceCalculatorOptions::_internal_use_gpu() const {
  return use_gpu_;
}
inline bool InferenceCalculatorOptions::use_gpu() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.use_gpu)
  return _internal_use_gpu();
}
inline void InferenceCalculatorOptions::_internal_set_use_gpu(bool value) {
  _has_bits_[0] |= 0x00000010u;
  use_gpu_ = value;
}
inline void InferenceCalculatorOptions::set_use_gpu(bool value) {
  _internal_set_use_gpu(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.use_gpu)
}

// optional bool use_nnapi = 3 [default = false, deprecated = true];
inline bool InferenceCalculatorOptions::_internal_has_use_nnapi() const {
  bool value = (_has_bits_[0] & 0x00000020u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions::has_use_nnapi() const {
  return _internal_has_use_nnapi();
}
inline void InferenceCalculatorOptions::clear_use_nnapi() {
  use_nnapi_ = false;
  _has_bits_[0] &= ~0x00000020u;
}
inline bool InferenceCalculatorOptions::_internal_use_nnapi() const {
  return use_nnapi_;
}
inline bool InferenceCalculatorOptions::use_nnapi() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.use_nnapi)
  return _internal_use_nnapi();
}
inline void InferenceCalculatorOptions::_internal_set_use_nnapi(bool value) {
  _has_bits_[0] |= 0x00000020u;
  use_nnapi_ = value;
}
inline void InferenceCalculatorOptions::set_use_nnapi(bool value) {
  _internal_set_use_nnapi(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.use_nnapi)
}

// optional int32 cpu_num_thread = 4 [default = -1];
inline bool InferenceCalculatorOptions::_internal_has_cpu_num_thread() const {
  bool value = (_has_bits_[0] & 0x00000040u) != 0;
  return value;
}
inline bool InferenceCalculatorOptions::has_cpu_num_thread() const {
  return _internal_has_cpu_num_thread();
}
inline void InferenceCalculatorOptions::clear_cpu_num_thread() {
  cpu_num_thread_ = -1;
  _has_bits_[0] &= ~0x00000040u;
}
inline int32_t InferenceCalculatorOptions::_internal_cpu_num_thread() const {
  return cpu_num_thread_;
}
inline int32_t InferenceCalculatorOptions::cpu_num_thread() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.cpu_num_thread)
  return _internal_cpu_num_thread();
}
inline void InferenceCalculatorOptions::_internal_set_cpu_num_thread(int32_t value) {
  _has_bits_[0] |= 0x00000040u;
  cpu_num_thread_ = value;
}
inline void InferenceCalculatorOptions::set_cpu_num_thread(int32_t value) {
  _internal_set_cpu_num_thread(value);
  // @@protoc_insertion_point(field_set:mediapipe.InferenceCalculatorOptions.cpu_num_thread)
}

// optional .mediapipe.InferenceCalculatorOptions.Delegate delegate = 5;
inline bool InferenceCalculatorOptions::_internal_has_delegate() const {
  bool value = (_has_bits_[0] & 0x00000002u) != 0;
  PROTOBUF_ASSUME(!value || delegate_ != nullptr);
  return value;
}
inline bool InferenceCalculatorOptions::has_delegate() const {
  return _internal_has_delegate();
}
inline void InferenceCalculatorOptions::clear_delegate() {
  if (delegate_ != nullptr) delegate_->Clear();
  _has_bits_[0] &= ~0x00000002u;
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate& InferenceCalculatorOptions::_internal_delegate() const {
  const ::mediapipe::InferenceCalculatorOptions_Delegate* p = delegate_;
  return p != nullptr ? *p : reinterpret_cast<const ::mediapipe::InferenceCalculatorOptions_Delegate&>(
      ::mediapipe::_InferenceCalculatorOptions_Delegate_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_Delegate& InferenceCalculatorOptions::delegate() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.delegate)
  return _internal_delegate();
}
inline void InferenceCalculatorOptions::unsafe_arena_set_allocated_delegate(
    ::mediapipe::InferenceCalculatorOptions_Delegate* delegate) {
  if (GetArenaForAllocation() == nullptr) {
    delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(delegate_);
  }
  delegate_ = delegate;
  if (delegate) {
    _has_bits_[0] |= 0x00000002u;
  } else {
    _has_bits_[0] &= ~0x00000002u;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.delegate)
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate* InferenceCalculatorOptions::release_delegate() {
  _has_bits_[0] &= ~0x00000002u;
  ::mediapipe::InferenceCalculatorOptions_Delegate* temp = delegate_;
  delegate_ = nullptr;
#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
  auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
  temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
  if (GetArenaForAllocation() == nullptr) { delete old; }
#else  // PROTOBUF_FORCE_COPY_IN_RELEASE
  if (GetArenaForAllocation() != nullptr) {
    temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
  }
#endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
  return temp;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate* InferenceCalculatorOptions::unsafe_arena_release_delegate() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.delegate)
  _has_bits_[0] &= ~0x00000002u;
  ::mediapipe::InferenceCalculatorOptions_Delegate* temp = delegate_;
  delegate_ = nullptr;
  return temp;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate* InferenceCalculatorOptions::_internal_mutable_delegate() {
  _has_bits_[0] |= 0x00000002u;
  if (delegate_ == nullptr) {
    auto* p = CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_Delegate>(GetArenaForAllocation());
    delegate_ = p;
  }
  return delegate_;
}
inline ::mediapipe::InferenceCalculatorOptions_Delegate* InferenceCalculatorOptions::mutable_delegate() {
  ::mediapipe::InferenceCalculatorOptions_Delegate* _msg = _internal_mutable_delegate();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.delegate)
  return _msg;
}
inline void InferenceCalculatorOptions::set_allocated_delegate(::mediapipe::InferenceCalculatorOptions_Delegate* delegate) {
  ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
  if (message_arena == nullptr) {
    delete delegate_;
  }
  if (delegate) {
    ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
        ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_Delegate>::GetOwningArena(delegate);
    if (message_arena != submessage_arena) {
      delegate = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
          message_arena, delegate, submessage_arena);
    }
    _has_bits_[0] |= 0x00000002u;
  } else {
    _has_bits_[0] &= ~0x00000002u;
  }
  delegate_ = delegate;
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.delegate)
}

// optional .mediapipe.InferenceCalculatorOptions.InputOutputConfig input_output_config = 8;
inline bool InferenceCalculatorOptions::_internal_has_input_output_config() const {
  bool value = (_has_bits_[0] & 0x00000004u) != 0;
  PROTOBUF_ASSUME(!value || input_output_config_ != nullptr);
  return value;
}
inline bool InferenceCalculatorOptions::has_input_output_config() const {
  return _internal_has_input_output_config();
}
inline void InferenceCalculatorOptions::clear_input_output_config() {
  if (input_output_config_ != nullptr) input_output_config_->Clear();
  _has_bits_[0] &= ~0x00000004u;
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig& InferenceCalculatorOptions::_internal_input_output_config() const {
  const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* p = input_output_config_;
  return p != nullptr ? *p : reinterpret_cast<const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig&>(
      ::mediapipe::_InferenceCalculatorOptions_InputOutputConfig_default_instance_);
}
inline const ::mediapipe::InferenceCalculatorOptions_InputOutputConfig& InferenceCalculatorOptions::input_output_config() const {
  // @@protoc_insertion_point(field_get:mediapipe.InferenceCalculatorOptions.input_output_config)
  return _internal_input_output_config();
}
inline void InferenceCalculatorOptions::unsafe_arena_set_allocated_input_output_config(
    ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* input_output_config) {
  if (GetArenaForAllocation() == nullptr) {
    delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(input_output_config_);
  }
  input_output_config_ = input_output_config;
  if (input_output_config) {
    _has_bits_[0] |= 0x00000004u;
  } else {
    _has_bits_[0] &= ~0x00000004u;
  }
  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:mediapipe.InferenceCalculatorOptions.input_output_config)
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* InferenceCalculatorOptions::release_input_output_config() {
  _has_bits_[0] &= ~0x00000004u;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* temp = input_output_config_;
  input_output_config_ = nullptr;
#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
  auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
  temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
  if (GetArenaForAllocation() == nullptr) { delete old; }
#else  // PROTOBUF_FORCE_COPY_IN_RELEASE
  if (GetArenaForAllocation() != nullptr) {
    temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
  }
#endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
  return temp;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* InferenceCalculatorOptions::unsafe_arena_release_input_output_config() {
  // @@protoc_insertion_point(field_release:mediapipe.InferenceCalculatorOptions.input_output_config)
  _has_bits_[0] &= ~0x00000004u;
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* temp = input_output_config_;
  input_output_config_ = nullptr;
  return temp;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* InferenceCalculatorOptions::_internal_mutable_input_output_config() {
  _has_bits_[0] |= 0x00000004u;
  if (input_output_config_ == nullptr) {
    auto* p = CreateMaybeMessage<::mediapipe::InferenceCalculatorOptions_InputOutputConfig>(GetArenaForAllocation());
    input_output_config_ = p;
  }
  return input_output_config_;
}
inline ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* InferenceCalculatorOptions::mutable_input_output_config() {
  ::mediapipe::InferenceCalculatorOptions_InputOutputConfig* _msg = _internal_mutable_input_output_config();
  // @@protoc_insertion_point(field_mutable:mediapipe.InferenceCalculatorOptions.input_output_config)
  return _msg;
}
inline void InferenceCalculatorOptions::set_allocated_input_output_config(::mediapipe::InferenceCalculatorOptions_InputOutputConfig* input_output_config) {
  ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
  if (message_arena == nullptr) {
    delete input_output_config_;
  }
  if (input_output_config) {
    ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
        ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::mediapipe::InferenceCalculatorOptions_InputOutputConfig>::GetOwningArena(input_output_config);
    if (message_arena != submessage_arena) {
      input_output_config = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
          message_arena, input_output_config, submessage_arena);
    }
    _has_bits_[0] |= 0x00000004u;
  } else {
    _has_bits_[0] &= ~0x00000004u;
  }
  input_output_config_ = input_output_config;
  // @@protoc_insertion_point(field_set_allocated:mediapipe.InferenceCalculatorOptions.input_output_config)
}

#ifdef __GNUC__
  #pragma GCC diagnostic pop
#endif  // __GNUC__
// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------

// -------------------------------------------------------------------


// @@protoc_insertion_point(namespace_scope)

}  // namespace mediapipe

PROTOBUF_NAMESPACE_OPEN

template <> struct is_proto_enum< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api> : ::std::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api>() {
  return ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api_descriptor();
}
template <> struct is_proto_enum< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior> : ::std::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior>() {
  return ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior_descriptor();
}
template <> struct is_proto_enum< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage> : ::std::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage>() {
  return ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage_descriptor();
}

PROTOBUF_NAMESPACE_CLOSE

// @@protoc_insertion_point(global_scope)

#include <google/protobuf/port_undef.inc>
#endif  // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto