Skip to content

Class mediapipe::InferenceCalculatorOptions_Delegate_Gpu

ClassList > mediapipe > InferenceCalculatorOptions_Delegate_Gpu

Inherits the following classes: PROTOBUF_NAMESPACE_ID::Message

Classes

Type Name
class _Internal

Public Types

Type Name
typedef InferenceCalculatorOptions_Delegate_Gpu_Api Api
typedef InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior CacheWritingBehavior
typedef InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage InferenceUsage
enum int InferenceCalculatorOptions_Delegate_Gpu

Public Static Attributes

Type Name
constexpr Api ANY = /* multi line expression */
constexpr int Api_ARRAYSIZE = /* multi line expression */
constexpr Api Api_MAX = /* multi line expression */
constexpr Api Api_MIN = /* multi line expression */
constexpr int CacheWritingBehavior_ARRAYSIZE = /* multi line expression */
constexpr CacheWritingBehavior CacheWritingBehavior_MAX = /* multi line expression */
constexpr CacheWritingBehavior CacheWritingBehavior_MIN = /* multi line expression */
constexpr InferenceUsage FAST_SINGLE_ANSWER = /* multi line expression */
constexpr int InferenceUsage_ARRAYSIZE = /* multi line expression */
constexpr InferenceUsage InferenceUsage_MAX = /* multi line expression */
constexpr InferenceUsage InferenceUsage_MIN = /* multi line expression */
constexpr CacheWritingBehavior NO_WRITE = /* multi line expression */
constexpr Api OPENCL = /* multi line expression */
constexpr Api OPENGL = /* multi line expression */
constexpr InferenceUsage SUSTAINED_SPEED = /* multi line expression */
constexpr CacheWritingBehavior TRY_WRITE = /* multi line expression */
constexpr InferenceUsage UNSPECIFIED = /* multi line expression */
constexpr CacheWritingBehavior WRITE_OR_ERROR = /* multi line expression */
const ClassData _class_data_ = /* multi line expression */
constexpr int kIndexInFileMessages = /* multi line expression */

Public Functions

Type Name
size_t ByteSizeLong () const
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear ()
void CopyFrom (const InferenceCalculatorOptions_Delegate_Gpu & from)
int GetCachedSize () const
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData * GetClassData () const
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata () const
InferenceCalculatorOptions_Delegate_Gpu ()
constexpr InferenceCalculatorOptions_Delegate_Gpu (::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized)
InferenceCalculatorOptions_Delegate_Gpu (const InferenceCalculatorOptions_Delegate_Gpu & from)
InferenceCalculatorOptions_Delegate_Gpu (InferenceCalculatorOptions_Delegate_Gpu && from) noexcept
bool IsInitialized () const
void MergeFrom (const InferenceCalculatorOptions_Delegate_Gpu & from)
InferenceCalculatorOptions_Delegate_Gpu * New (::PROTOBUF_NAMESPACE_ID::Arena * arena=nullptr) const
void Swap (InferenceCalculatorOptions_Delegate_Gpu * other)
void UnsafeArenaSwap (InferenceCalculatorOptions_Delegate_Gpu * other)
const char * _InternalParse (const char * ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext * ctx)
uint8_t * _InternalSerialize (uint8_t * target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const
bool allow_precision_loss () const
::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api api () const
::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior cache_writing_behavior () const
const std::string & cached_kernel_path () const
void clear_allow_precision_loss ()
void clear_api ()
void clear_cache_writing_behavior ()
void clear_cached_kernel_path ()
void clear_model_token ()
void clear_serialized_model_dir ()
void clear_usage ()
void clear_use_advanced_gpu_api ()
bool has_allow_precision_loss () const
bool has_api () const
bool has_cache_writing_behavior () const
bool has_cached_kernel_path () const
bool has_model_token () const
bool has_serialized_model_dir () const
bool has_usage () const
bool has_use_advanced_gpu_api () const
const std::string & model_token () const
std::string * mutable_cached_kernel_path ()
std::string * mutable_model_token ()
std::string * mutable_serialized_model_dir ()
inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet * mutable_unknown_fields ()
InferenceCalculatorOptions_Delegate_Gpu & operator= (const InferenceCalculatorOptions_Delegate_Gpu & from)
InferenceCalculatorOptions_Delegate_Gpu & operator= (InferenceCalculatorOptions_Delegate_Gpu && from) noexcept
PROTOBUF_NODISCARD std::string * release_cached_kernel_path ()
PROTOBUF_NODISCARD std::string * release_model_token ()
PROTOBUF_NODISCARD std::string * release_serialized_model_dir ()
const std::string & serialized_model_dir () const
void set_allocated_cached_kernel_path (std::string * cached_kernel_path)
void set_allocated_model_token (std::string * model_token)
void set_allocated_serialized_model_dir (std::string * serialized_model_dir)
void set_allow_precision_loss (bool value)
void set_api (::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value)
void set_cache_writing_behavior (::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value)
void set_cached_kernel_path (ArgT0 && arg0, ArgT... args)
PROTOBUF_ALWAYS_INLINE void set_cached_kernel_path (ArgT0 && arg0, ArgT... args)
void set_model_token (ArgT0 && arg0, ArgT... args)
PROTOBUF_ALWAYS_INLINE void set_model_token (ArgT0 && arg0, ArgT... args)
void set_serialized_model_dir (ArgT0 && arg0, ArgT... args)
PROTOBUF_ALWAYS_INLINE void set_serialized_model_dir (ArgT0 && arg0, ArgT... args)
void set_usage (::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value)
void set_use_advanced_gpu_api (bool value)
const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet & unknown_fields () const
::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage usage () const
bool use_advanced_gpu_api () const
~InferenceCalculatorOptions_Delegate_Gpu () override

Public Static Functions

Type Name
bool Api_IsValid (int value)
const std::string & Api_Name (T enum_t_value)
bool Api_Parse (::PROTOBUF_NAMESPACE_ID::ConstStringParam name, Api * value)
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * Api_descriptor ()
bool CacheWritingBehavior_IsValid (int value)
const std::string & CacheWritingBehavior_Name (T enum_t_value)
bool CacheWritingBehavior_Parse (::PROTOBUF_NAMESPACE_ID::ConstStringParam name, CacheWritingBehavior * value)
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * CacheWritingBehavior_descriptor ()
const ::PROTOBUF_NAMESPACE_ID::Descriptor * GetDescriptor ()
const ::PROTOBUF_NAMESPACE_ID::Reflection * GetReflection ()
bool InferenceUsage_IsValid (int value)
const std::string & InferenceUsage_Name (T enum_t_value)
bool InferenceUsage_Parse (::PROTOBUF_NAMESPACE_ID::ConstStringParam name, InferenceUsage * value)
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * InferenceUsage_descriptor ()
const InferenceCalculatorOptions_Delegate_Gpu & default_instance ()
const ::PROTOBUF_NAMESPACE_ID::Descriptor * descriptor ()
const InferenceCalculatorOptions_Delegate_Gpu * internal_default_instance ()

Protected Functions

Type Name
InferenceCalculatorOptions_Delegate_Gpu (::PROTOBUF_NAMESPACE_ID::Arena * arena, bool is_message_owned=false)

Public Types Documentation

typedef Api

typedef InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api;

typedef CacheWritingBehavior

typedef InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior;

typedef InferenceUsage

typedef InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage;

enum InferenceCalculatorOptions_Delegate_Gpu

enum mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu {
    kCachedKernelPathFieldNumber = 2,
    kSerializedModelDirFieldNumber = 7,
    kModelTokenFieldNumber = 8,
    kUseAdvancedGpuApiFieldNumber = 1,
    kApiFieldNumber = 4,
    kCacheWritingBehaviorFieldNumber = 10,
    kAllowPrecisionLossFieldNumber = 3,
    kUsageFieldNumber = 5
};

Public Static Attributes Documentation

variable ANY

constexpr InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::ANY;

variable Api_ARRAYSIZE

constexpr int mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_ARRAYSIZE;

variable Api_MAX

constexpr InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_MAX;

variable Api_MIN

constexpr InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_MIN;

variable CacheWritingBehavior_ARRAYSIZE

constexpr int mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_ARRAYSIZE;

variable CacheWritingBehavior_MAX

constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_MAX;

variable CacheWritingBehavior_MIN

constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_MIN;

variable FAST_SINGLE_ANSWER

constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::FAST_SINGLE_ANSWER;

variable InferenceUsage_ARRAYSIZE

constexpr int mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_ARRAYSIZE;

variable InferenceUsage_MAX

constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_MAX;

variable InferenceUsage_MIN

constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_MIN;

variable NO_WRITE

constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::NO_WRITE;

variable OPENCL

constexpr InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::OPENCL;

variable OPENGL

constexpr InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::OPENGL;

variable SUSTAINED_SPEED

constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::SUSTAINED_SPEED;

variable TRY_WRITE

constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::TRY_WRITE;

variable UNSPECIFIED

constexpr InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::UNSPECIFIED;

variable WRITE_OR_ERROR

constexpr InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::WRITE_OR_ERROR;

variable _class_data_

const ::PROTOBUF_NAMESPACE_ID::Message::ClassData mediapipe::InferenceCalculatorOptions_Delegate_Gpu::_class_data_;

variable kIndexInFileMessages

constexpr int mediapipe::InferenceCalculatorOptions_Delegate_Gpu::kIndexInFileMessages;

Public Functions Documentation

function ByteSizeLong

size_t mediapipe::InferenceCalculatorOptions_Delegate_Gpu::ByteSizeLong () const

function Clear

PROTOBUF_ATTRIBUTE_REINITIALIZES void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Clear () 

function CopyFrom

void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CopyFrom (
    const InferenceCalculatorOptions_Delegate_Gpu & from
) 

function GetCachedSize

inline int mediapipe::InferenceCalculatorOptions_Delegate_Gpu::GetCachedSize () const

function GetClassData

const ::PROTOBUF_NAMESPACE_ID::Message::ClassData * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::GetClassData () const

function GetMetadata

::PROTOBUF_NAMESPACE_ID::Metadata mediapipe::InferenceCalculatorOptions_Delegate_Gpu::GetMetadata () const

function InferenceCalculatorOptions_Delegate_Gpu [1/5]

inline mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu () 

function InferenceCalculatorOptions_Delegate_Gpu [2/5]

explicit constexpr mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu (
    ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized
) 

function InferenceCalculatorOptions_Delegate_Gpu [3/5]

mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu (
    const InferenceCalculatorOptions_Delegate_Gpu & from
) 

function InferenceCalculatorOptions_Delegate_Gpu [4/5]

inline mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu (
    InferenceCalculatorOptions_Delegate_Gpu && from
) noexcept

function IsInitialized

bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::IsInitialized () const

function MergeFrom

void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::MergeFrom (
    const InferenceCalculatorOptions_Delegate_Gpu & from
) 

function New

inline InferenceCalculatorOptions_Delegate_Gpu * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::New (
    ::PROTOBUF_NAMESPACE_ID::Arena * arena=nullptr
) const

function Swap

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Swap (
    InferenceCalculatorOptions_Delegate_Gpu * other
) 

function UnsafeArenaSwap

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::UnsafeArenaSwap (
    InferenceCalculatorOptions_Delegate_Gpu * other
) 

function _InternalParse

const char * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::_InternalParse (
    const char * ptr,
    ::PROTOBUF_NAMESPACE_ID::internal::ParseContext * ctx
) 

function _InternalSerialize

uint8_t * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::_InternalSerialize (
    uint8_t * target,
    ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream
) const

function allow_precision_loss

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::allow_precision_loss () const

function api

inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api mediapipe::InferenceCalculatorOptions_Delegate_Gpu::api () const

function cache_writing_behavior

inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior mediapipe::InferenceCalculatorOptions_Delegate_Gpu::cache_writing_behavior () const

function cached_kernel_path

inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::cached_kernel_path () const

function clear_allow_precision_loss

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_allow_precision_loss () 

function clear_api

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_api () 

function clear_cache_writing_behavior

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_cache_writing_behavior () 

function clear_cached_kernel_path

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_cached_kernel_path () 

function clear_model_token

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_model_token () 

function clear_serialized_model_dir

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_serialized_model_dir () 

function clear_usage

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_usage () 

function clear_use_advanced_gpu_api

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::clear_use_advanced_gpu_api () 

function has_allow_precision_loss

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_allow_precision_loss () const

function has_api

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_api () const

function has_cache_writing_behavior

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_cache_writing_behavior () const

function has_cached_kernel_path

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_cached_kernel_path () const

function has_model_token

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_model_token () const

function has_serialized_model_dir

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_serialized_model_dir () const

function has_usage

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_usage () const

function has_use_advanced_gpu_api

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::has_use_advanced_gpu_api () const

function model_token

inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::model_token () const

function mutable_cached_kernel_path

inline std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::mutable_cached_kernel_path () 

function mutable_model_token

inline std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::mutable_model_token () 

function mutable_serialized_model_dir

inline std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::mutable_serialized_model_dir () 

function mutable_unknown_fields

inline inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::mutable_unknown_fields () 

function operator=

inline InferenceCalculatorOptions_Delegate_Gpu & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::operator= (
    const InferenceCalculatorOptions_Delegate_Gpu & from
) 

function operator=

inline InferenceCalculatorOptions_Delegate_Gpu & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::operator= (
    InferenceCalculatorOptions_Delegate_Gpu && from
) noexcept

function release_cached_kernel_path

inline PROTOBUF_NODISCARD std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::release_cached_kernel_path () 

function release_model_token

inline PROTOBUF_NODISCARD std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::release_model_token () 

function release_serialized_model_dir

inline PROTOBUF_NODISCARD std::string * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::release_serialized_model_dir () 

function serialized_model_dir

inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::serialized_model_dir () const

function set_allocated_cached_kernel_path

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_allocated_cached_kernel_path (
    std::string * cached_kernel_path
) 

function set_allocated_model_token

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_allocated_model_token (
    std::string * model_token
) 

function set_allocated_serialized_model_dir

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_allocated_serialized_model_dir (
    std::string * serialized_model_dir
) 

function set_allow_precision_loss

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_allow_precision_loss (
    bool value
) 

function set_api

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_api (
    ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_Api value
) 

function set_cache_writing_behavior

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_cache_writing_behavior (
    ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_CacheWritingBehavior value
) 

function set_cached_kernel_path [1/2]

template<typename ArgT0, typename... ArgT>
void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_cached_kernel_path (
    ArgT0 && arg0,
    ArgT... args
) 

function set_cached_kernel_path [2/2]

template<typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_cached_kernel_path (
    ArgT0 && arg0,
    ArgT... args
) 

function set_model_token [1/2]

template<typename ArgT0, typename... ArgT>
void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_model_token (
    ArgT0 && arg0,
    ArgT... args
) 

function set_model_token [2/2]

template<typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_model_token (
    ArgT0 && arg0,
    ArgT... args
) 

function set_serialized_model_dir [1/2]

template<typename ArgT0, typename... ArgT>
void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_serialized_model_dir (
    ArgT0 && arg0,
    ArgT... args
) 

function set_serialized_model_dir [2/2]

template<typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_serialized_model_dir (
    ArgT0 && arg0,
    ArgT... args
) 

function set_usage

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_usage (
    ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage value
) 

function set_use_advanced_gpu_api

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::set_use_advanced_gpu_api (
    bool value
) 

function unknown_fields

inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::unknown_fields () const

function usage

inline ::mediapipe::InferenceCalculatorOptions_Delegate_Gpu_InferenceUsage mediapipe::InferenceCalculatorOptions_Delegate_Gpu::usage () const

function use_advanced_gpu_api

inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::use_advanced_gpu_api () const

function ~InferenceCalculatorOptions_Delegate_Gpu

mediapipe::InferenceCalculatorOptions_Delegate_Gpu::~InferenceCalculatorOptions_Delegate_Gpu () override

Public Static Functions Documentation

function Api_IsValid

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_IsValid (
    int value
) 

function Api_Name

template<typename T>
static inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_Name (
    T enum_t_value
) 

function Api_Parse

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_Parse (
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
    Api * value
) 

function Api_descriptor

static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::Api_descriptor () 

function CacheWritingBehavior_IsValid

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_IsValid (
    int value
) 

function CacheWritingBehavior_Name

template<typename T>
static inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_Name (
    T enum_t_value
) 

function CacheWritingBehavior_Parse

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_Parse (
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
    CacheWritingBehavior * value
) 

function CacheWritingBehavior_descriptor

static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::CacheWritingBehavior_descriptor () 

function GetDescriptor

static inline const ::PROTOBUF_NAMESPACE_ID::Descriptor * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::GetDescriptor () 

function GetReflection

static inline const ::PROTOBUF_NAMESPACE_ID::Reflection * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::GetReflection () 

function InferenceUsage_IsValid

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_IsValid (
    int value
) 

function InferenceUsage_Name

template<typename T>
static inline const std::string & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_Name (
    T enum_t_value
) 

function InferenceUsage_Parse

static inline bool mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_Parse (
    ::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
    InferenceUsage * value
) 

function InferenceUsage_descriptor

static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceUsage_descriptor () 

function default_instance

static inline const InferenceCalculatorOptions_Delegate_Gpu & mediapipe::InferenceCalculatorOptions_Delegate_Gpu::default_instance () 

function descriptor

static inline const ::PROTOBUF_NAMESPACE_ID::Descriptor * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::descriptor () 

function internal_default_instance

static inline const InferenceCalculatorOptions_Delegate_Gpu * mediapipe::InferenceCalculatorOptions_Delegate_Gpu::internal_default_instance () 

Protected Functions Documentation

function InferenceCalculatorOptions_Delegate_Gpu [5/5]

explicit mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InferenceCalculatorOptions_Delegate_Gpu (
    ::PROTOBUF_NAMESPACE_ID::Arena * arena,
    bool is_message_owned=false
) 

Friends Documentation

friend InternalHelper

template<typename T>
class mediapipe::InferenceCalculatorOptions_Delegate_Gpu::InternalHelper (
    ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper
) 

friend AnyMetadata

class mediapipe::InferenceCalculatorOptions_Delegate_Gpu::AnyMetadata (
    ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata
) 

friend TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto

struct mediapipe::InferenceCalculatorOptions_Delegate_Gpu::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto (
    ::TableStruct_mediapipe_2fcalculators_2ftensor_2finference_5fcalculator_2eproto
) 

friend swap

inline void mediapipe::InferenceCalculatorOptions_Delegate_Gpu::swap (
    InferenceCalculatorOptions_Delegate_Gpu & a,
    InferenceCalculatorOptions_Delegate_Gpu & b
) 


The documentation for this class was generated from the following file /home/friedel/devel/ILLIXR-plugins/hand_tracking/build/mediapipe/calculators/tensor/inference_calculator.pb.h