Merge pull request #858 from arrybn:tf

pull/874/head
Vadim Pisarevsky 8 years ago
commit 5f49caa42f
  1. 7
      modules/dnn/3rdparty/protobuf/CMakeLists.txt
  2. 14
      modules/dnn/CMakeLists.txt
  3. 2
      modules/dnn/cmake/OpenCVFindCBLAS.cmake
  4. 22
      modules/dnn/cmake/OpenCVFindLibProtobuf.cmake
  5. 16
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  6. 6
      modules/dnn/include/opencv2/dnn/dnn.hpp
  7. 3014
      modules/dnn/misc/tensorflow/attr_value.pb.cc
  8. 1697
      modules/dnn/misc/tensorflow/attr_value.pb.h
  9. 2348
      modules/dnn/misc/tensorflow/function.pb.cc
  10. 1160
      modules/dnn/misc/tensorflow/function.pb.h
  11. 1687
      modules/dnn/misc/tensorflow/graph.pb.cc
  12. 814
      modules/dnn/misc/tensorflow/graph.pb.h
  13. 4045
      modules/dnn/misc/tensorflow/op_def.pb.cc
  14. 2103
      modules/dnn/misc/tensorflow/op_def.pb.h
  15. 1596
      modules/dnn/misc/tensorflow/tensor.pb.cc
  16. 770
      modules/dnn/misc/tensorflow/tensor.pb.h
  17. 895
      modules/dnn/misc/tensorflow/tensor_shape.pb.cc
  18. 423
      modules/dnn/misc/tensorflow/tensor_shape.pb.h
  19. 163
      modules/dnn/misc/tensorflow/types.pb.cc
  20. 129
      modules/dnn/misc/tensorflow/types.pb.h
  21. 572
      modules/dnn/misc/tensorflow/versions.pb.cc
  22. 239
      modules/dnn/misc/tensorflow/versions.pb.h
  23. 182
      modules/dnn/samples/tf_inception.cpp
  24. 17
      modules/dnn/src/caffe/layer_loaders.cpp
  25. 1
      modules/dnn/src/dnn.cpp
  26. 11
      modules/dnn/src/init.cpp
  27. 21
      modules/dnn/src/layers/convolution_layer.cpp
  28. 1
      modules/dnn/src/layers/convolution_layer.hpp
  29. 2
      modules/dnn/src/layers/fully_connected_layer.cpp
  30. 53
      modules/dnn/src/layers/layers_common.cpp
  31. 9
      modules/dnn/src/layers/layers_common.hpp
  32. 19
      modules/dnn/src/layers/lrn_layer.cpp
  33. 4
      modules/dnn/src/layers/lrn_layer.hpp
  34. 42
      modules/dnn/src/layers/op_im2col.cpp
  35. 5
      modules/dnn/src/layers/op_im2col.hpp
  36. 43
      modules/dnn/src/layers/pooling_layer.cpp
  37. 5
      modules/dnn/src/layers/pooling_layer.hpp
  38. 44
      modules/dnn/src/layers/reshape_layer.cpp
  39. 4
      modules/dnn/src/layers/reshape_layer.hpp
  40. 157
      modules/dnn/src/layers/shift_layer.cpp
  41. 36
      modules/dnn/src/layers/shift_layer.hpp
  42. 60
      modules/dnn/src/tensorflow/attr_value.proto
  43. 95
      modules/dnn/src/tensorflow/function.proto
  44. 112
      modules/dnn/src/tensorflow/graph.proto
  45. 157
      modules/dnn/src/tensorflow/op_def.proto
  46. 68
      modules/dnn/src/tensorflow/tensor.proto
  47. 45
      modules/dnn/src/tensorflow/tensor_shape.proto
  48. 749
      modules/dnn/src/tensorflow/tf_importer.cpp
  49. 63
      modules/dnn/src/tensorflow/tf_io.cpp
  50. 29
      modules/dnn/src/tensorflow/tf_io.hpp
  51. 60
      modules/dnn/src/tensorflow/types.proto
  52. 31
      modules/dnn/src/tensorflow/versions.proto
  53. 51
      modules/dnn/test/test_tf_importer.cpp

@ -16,9 +16,10 @@ if(MSVC)
/wd4702 /wd4456 /wd4457 /wd4065 /wd4310 /wd4661 /wd4506
)
else()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow -Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo -Wundef
-Wtautological-undefined-compare
-Wignored-qualifiers -Wextra -Wunused-function -Wunused-const-variable
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow
-Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo
-Wundef -Wtautological-undefined-compare -Wignored-qualifiers -Wextra
-Wunused-function -Wunused-const-variable
)
endif()

@ -15,6 +15,20 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninit
)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4701 /wd4100)
if(MSVC)
add_definitions( -D_CRT_SECURE_NO_WARNINGS=1 )
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4244 /wd4267 /wd4018 /wd4355 /wd4800 /wd4251 /wd4996 /wd4146
/wd4305 /wd4127 /wd4100 /wd4512 /wd4125 /wd4389 /wd4510 /wd4610
/wd4702 /wd4456 /wd4457 /wd4065 /wd4310 /wd4661 /wd4506
)
else()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow
-Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo
-Wundef -Wtautological-undefined-compare -Wignored-qualifiers -Wextra
-Wunused-function -Wunused-const-variable -Wdeprecated-declarations
)
endif()
# ----------------------------------------------------------------------------
# Resolve libprotobuf dependency
# ----------------------------------------------------------------------------

@ -16,7 +16,7 @@ if(${the_module}_WITH_BLAS)
endif()
if(NOT HAVE_BLAS)
include(cmake/OpenCVFindMKL.cmake)
if(MKL_FOUND)
if(HAVE_MKL)
set(BLAS_INCLUDE_DIR ${MKL_INCLUDE_DIRS})
set(BLAS_LIBRARIES ${MKL_LIBRARIES} )
set(BLAS_CBLAS_H "mkl_cblas.h" )

@ -12,9 +12,9 @@ if(UPDATE_PROTO_FILES)
endif()
if(DEFINED PROTOBUF_PROTOC_EXECUTABLE AND EXISTS ${PROTOBUF_PROTOC_EXECUTABLE})
message(STATUS "The protocol buffer compiler is found (${PROTOBUF_PROTOC_EXECUTABLE})")
PROTOBUF_GENERATE_CPP(PROTOBUF_HDRS PROTOBUF_SRCS
src/caffe/caffe.proto
)
file(GLOB proto_files src/tensorflow/*.proto)
list(APPEND proto_files src/caffe/caffe.proto)
PROTOBUF_GENERATE_CPP(PROTOBUF_HDRS PROTOBUF_SRCS ${proto_files})
else()
message(FATAL_ERROR "The protocol buffer compiler is not found (PROTOBUF_PROTOC_EXECUTABLE='${PROTOBUF_PROTOC_EXECUTABLE}')")
endif()
@ -34,13 +34,21 @@ else()
endif()
if(NOT UPDATE_PROTO_FILES)
list(APPEND PROTOBUF_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.cc)
list(APPEND PROTOBUF_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.h)
file(GLOB fw_srcs ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow/*.cc)
file(GLOB fw_hdrs ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow/*.h)
list(APPEND fw_srcs ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.cc)
list(APPEND fw_hdrs ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.h)
list(APPEND PROTOBUF_SRCS ${fw_srcs})
list(APPEND PROTOBUF_HDRS ${fw_hdrs})
list(APPEND PROTOBUF_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe)
list(APPEND PROTOBUF_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow)
endif()
add_definitions(-DHAVE_PROTOBUF=1)
#supress warnings in autogenerated caffe.pb.* files
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-parameter -Wundef)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4125 /wd4267 /wd4127 /wd4244 /wd4512 /wd4702)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-parameter -Wundef -Wignored-qualifiers -Wno-enum-compare
-Wdeprecated-declarations)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4125 /wd4267 /wd4127 /wd4244 /wd4512 /wd4702
/wd4456 /wd4510 /wd4610 /wd4800
)

@ -210,6 +210,7 @@ namespace dnn
public:
CV_PROP_RW Size kernel, stride, pad, dilation;
CV_PROP_RW String padMode;
};
class CV_EXPORTS_W ConvolutionLayer : public BaseConvolutionLayer
@ -238,9 +239,12 @@ namespace dnn
CV_PROP_RW int type;
CV_PROP_RW int size;
CV_PROP_RW double alpha, beta;
CV_PROP_RW double alpha, beta, bias;
CV_PROP_RW bool normBySize;
static CV_WRAP Ptr<LRNLayer> create(int type = LRNLayer::CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
static CV_WRAP Ptr<LRNLayer> create(int type = LRNLayer::CHANNEL_NRM, int size = 5,
double alpha = 1, double beta = 0.75, double bias = 1,
bool normBySize = true);
};
class CV_EXPORTS_W PoolingLayer : public Layer
@ -257,8 +261,11 @@ namespace dnn
CV_PROP_RW int type;
CV_PROP_RW Size kernel, stride, pad;
CV_PROP_RW bool globalPooling;
CV_PROP_RW String padMode;
static CV_WRAP Ptr<PoolingLayer> create(int type = PoolingLayer::MAX, Size kernel = Size(2, 2), Size stride = Size(1, 1), Size pad = Size(0, 0));
static CV_WRAP Ptr<PoolingLayer> create(int type = PoolingLayer::MAX, Size kernel = Size(2, 2),
Size stride = Size(1, 1), Size pad = Size(0, 0),
const cv::String& padMode = "");
static CV_WRAP Ptr<PoolingLayer> createGlobal(int type = PoolingLayer::MAX);
};
@ -294,7 +301,8 @@ namespace dnn
CV_PROP_RW BlobShape newShapeDesc;
CV_PROP_RW Range newShapeRange;
static CV_WRAP Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all());
static CV_WRAP Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all(),
bool enableReordering = false);
};
class CV_EXPORTS_W ConcatLayer : public Layer

@ -299,6 +299,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/
CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String());
/** @brief Creates the importer of <a href="http://www.tensorflow.org">TensorFlow</a> framework network.
* @param model path to the .pb file with binary protobuf description of the network architecture.
* @returns Pointer to the created importer, NULL in failure cases.
*/
CV_EXPORTS Ptr<Importer> createTensorflowImporter(const String &model);
/** @brief Creates the importer of <a href="http://torch.ch">Torch7</a> framework network.
* @param filename path to the file, dumped from Torch by using torch.save() function.
* @param isBinary specifies whether the network was serialized in ascii mode or binary.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,814 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: graph.proto
#ifndef PROTOBUF_graph_2eproto__INCLUDED
#define PROTOBUF_graph_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/map.h>
#include <google/protobuf/map_field_inl.h>
#include <google/protobuf/unknown_field_set.h>
#include "attr_value.pb.h"
#include "function.pb.h"
#include "versions.pb.h"
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_graph_2eproto();
void protobuf_InitDefaults_graph_2eproto();
void protobuf_AssignDesc_graph_2eproto();
void protobuf_ShutdownFile_graph_2eproto();
class GraphDef;
class NodeDef;
// ===================================================================
class GraphDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.GraphDef) */ {
public:
GraphDef();
virtual ~GraphDef();
GraphDef(const GraphDef& from);
inline GraphDef& operator=(const GraphDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const GraphDef& default_instance();
static const GraphDef* internal_default_instance();
void UnsafeArenaSwap(GraphDef* other);
void Swap(GraphDef* other);
// implements Message ----------------------------------------------
inline GraphDef* New() const { return New(NULL); }
GraphDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const GraphDef& from);
void MergeFrom(const GraphDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(GraphDef* other);
void UnsafeMergeFrom(const GraphDef& from);
protected:
explicit GraphDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.NodeDef node = 1;
int node_size() const;
void clear_node();
static const int kNodeFieldNumber = 1;
const ::tensorflow::NodeDef& node(int index) const;
::tensorflow::NodeDef* mutable_node(int index);
::tensorflow::NodeDef* add_node();
::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >*
mutable_node();
const ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >&
node() const;
// optional .tensorflow.VersionDef versions = 4;
bool has_versions() const;
void clear_versions();
static const int kVersionsFieldNumber = 4;
private:
void _slow_mutable_versions();
void _slow_set_allocated_versions(
::google::protobuf::Arena* message_arena, ::tensorflow::VersionDef** versions);
::tensorflow::VersionDef* _slow_release_versions();
public:
const ::tensorflow::VersionDef& versions() const;
::tensorflow::VersionDef* mutable_versions();
::tensorflow::VersionDef* release_versions();
void set_allocated_versions(::tensorflow::VersionDef* versions);
::tensorflow::VersionDef* unsafe_arena_release_versions();
void unsafe_arena_set_allocated_versions(
::tensorflow::VersionDef* versions);
// optional int32 version = 3 [deprecated = true];
GOOGLE_PROTOBUF_DEPRECATED_ATTR void clear_version();
GOOGLE_PROTOBUF_DEPRECATED_ATTR static const int kVersionFieldNumber = 3;
GOOGLE_PROTOBUF_DEPRECATED_ATTR ::google::protobuf::int32 version() const;
GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_version(::google::protobuf::int32 value);
// optional .tensorflow.FunctionDefLibrary library = 2;
bool has_library() const;
void clear_library();
static const int kLibraryFieldNumber = 2;
private:
void _slow_mutable_library();
void _slow_set_allocated_library(
::google::protobuf::Arena* message_arena, ::tensorflow::FunctionDefLibrary** library);
::tensorflow::FunctionDefLibrary* _slow_release_library();
public:
const ::tensorflow::FunctionDefLibrary& library() const;
::tensorflow::FunctionDefLibrary* mutable_library();
::tensorflow::FunctionDefLibrary* release_library();
void set_allocated_library(::tensorflow::FunctionDefLibrary* library);
::tensorflow::FunctionDefLibrary* unsafe_arena_release_library();
void unsafe_arena_set_allocated_library(
::tensorflow::FunctionDefLibrary* library);
// @@protoc_insertion_point(class_scope:tensorflow.GraphDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef > node_;
::tensorflow::VersionDef* versions_;
::tensorflow::FunctionDefLibrary* library_;
::google::protobuf::int32 version_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_graph_2eproto_impl();
friend void protobuf_AddDesc_graph_2eproto_impl();
friend void protobuf_AssignDesc_graph_2eproto();
friend void protobuf_ShutdownFile_graph_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<GraphDef> GraphDef_default_instance_;
// -------------------------------------------------------------------
class NodeDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.NodeDef) */ {
public:
NodeDef();
virtual ~NodeDef();
NodeDef(const NodeDef& from);
inline NodeDef& operator=(const NodeDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const NodeDef& default_instance();
static const NodeDef* internal_default_instance();
void UnsafeArenaSwap(NodeDef* other);
void Swap(NodeDef* other);
// implements Message ----------------------------------------------
inline NodeDef* New() const { return New(NULL); }
NodeDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const NodeDef& from);
void MergeFrom(const NodeDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(NodeDef* other);
void UnsafeMergeFrom(const NodeDef& from);
protected:
explicit NodeDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
::std::string* unsafe_arena_release_name();
void unsafe_arena_set_allocated_name(
::std::string* name);
// optional string op = 2;
void clear_op();
static const int kOpFieldNumber = 2;
const ::std::string& op() const;
void set_op(const ::std::string& value);
void set_op(const char* value);
void set_op(const char* value, size_t size);
::std::string* mutable_op();
::std::string* release_op();
void set_allocated_op(::std::string* op);
::std::string* unsafe_arena_release_op();
void unsafe_arena_set_allocated_op(
::std::string* op);
// repeated string input = 3;
int input_size() const;
void clear_input();
static const int kInputFieldNumber = 3;
const ::std::string& input(int index) const;
::std::string* mutable_input(int index);
void set_input(int index, const ::std::string& value);
void set_input(int index, const char* value);
void set_input(int index, const char* value, size_t size);
::std::string* add_input();
void add_input(const ::std::string& value);
void add_input(const char* value);
void add_input(const char* value, size_t size);
const ::google::protobuf::RepeatedPtrField< ::std::string>& input() const;
::google::protobuf::RepeatedPtrField< ::std::string>* mutable_input();
// optional string device = 4;
void clear_device();
static const int kDeviceFieldNumber = 4;
const ::std::string& device() const;
void set_device(const ::std::string& value);
void set_device(const char* value);
void set_device(const char* value, size_t size);
::std::string* mutable_device();
::std::string* release_device();
void set_allocated_device(::std::string* device);
::std::string* unsafe_arena_release_device();
void unsafe_arena_set_allocated_device(
::std::string* device);
// map<string, .tensorflow.AttrValue> attr = 5;
int attr_size() const;
void clear_attr();
static const int kAttrFieldNumber = 5;
const ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >&
attr() const;
::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >*
mutable_attr();
// @@protoc_insertion_point(class_scope:tensorflow.NodeDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::std::string> input_;
typedef ::google::protobuf::internal::MapEntryLite<
::std::string, ::tensorflow::AttrValue,
::google::protobuf::internal::WireFormatLite::TYPE_STRING,
::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,
0 >
NodeDef_AttrEntry;
::google::protobuf::internal::MapField<
::std::string, ::tensorflow::AttrValue,
::google::protobuf::internal::WireFormatLite::TYPE_STRING,
::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,
0 > attr_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::internal::ArenaStringPtr op_;
::google::protobuf::internal::ArenaStringPtr device_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_graph_2eproto_impl();
friend void protobuf_AddDesc_graph_2eproto_impl();
friend void protobuf_AssignDesc_graph_2eproto();
friend void protobuf_ShutdownFile_graph_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<NodeDef> NodeDef_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// GraphDef
// repeated .tensorflow.NodeDef node = 1;
inline int GraphDef::node_size() const {
return node_.size();
}
inline void GraphDef::clear_node() {
node_.Clear();
}
inline const ::tensorflow::NodeDef& GraphDef::node(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.node)
return node_.Get(index);
}
inline ::tensorflow::NodeDef* GraphDef::mutable_node(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.node)
return node_.Mutable(index);
}
inline ::tensorflow::NodeDef* GraphDef::add_node() {
// @@protoc_insertion_point(field_add:tensorflow.GraphDef.node)
return node_.Add();
}
inline ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >*
GraphDef::mutable_node() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.GraphDef.node)
return &node_;
}
inline const ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >&
GraphDef::node() const {
// @@protoc_insertion_point(field_list:tensorflow.GraphDef.node)
return node_;
}
// optional .tensorflow.VersionDef versions = 4;
inline bool GraphDef::has_versions() const {
return this != internal_default_instance() && versions_ != NULL;
}
inline void GraphDef::clear_versions() {
if (GetArenaNoVirtual() == NULL && versions_ != NULL) delete versions_;
versions_ = NULL;
}
inline const ::tensorflow::VersionDef& GraphDef::versions() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.versions)
return versions_ != NULL ? *versions_
: *::tensorflow::VersionDef::internal_default_instance();
}
inline ::tensorflow::VersionDef* GraphDef::mutable_versions() {
if (versions_ == NULL) {
_slow_mutable_versions();
}
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.versions)
return versions_;
}
inline ::tensorflow::VersionDef* GraphDef::release_versions() {
// @@protoc_insertion_point(field_release:tensorflow.GraphDef.versions)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_versions();
} else {
::tensorflow::VersionDef* temp = versions_;
versions_ = NULL;
return temp;
}
}
inline void GraphDef::set_allocated_versions(::tensorflow::VersionDef* versions) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete versions_;
}
if (versions != NULL) {
_slow_set_allocated_versions(message_arena, &versions);
}
versions_ = versions;
if (versions) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.GraphDef.versions)
}
// optional int32 version = 3 [deprecated = true];
inline void GraphDef::clear_version() {
version_ = 0;
}
inline ::google::protobuf::int32 GraphDef::version() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.version)
return version_;
}
inline void GraphDef::set_version(::google::protobuf::int32 value) {
version_ = value;
// @@protoc_insertion_point(field_set:tensorflow.GraphDef.version)
}
// optional .tensorflow.FunctionDefLibrary library = 2;
inline bool GraphDef::has_library() const {
return this != internal_default_instance() && library_ != NULL;
}
inline void GraphDef::clear_library() {
if (GetArenaNoVirtual() == NULL && library_ != NULL) delete library_;
library_ = NULL;
}
inline const ::tensorflow::FunctionDefLibrary& GraphDef::library() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.library)
return library_ != NULL ? *library_
: *::tensorflow::FunctionDefLibrary::internal_default_instance();
}
inline ::tensorflow::FunctionDefLibrary* GraphDef::mutable_library() {
if (library_ == NULL) {
_slow_mutable_library();
}
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.library)
return library_;
}
inline ::tensorflow::FunctionDefLibrary* GraphDef::release_library() {
// @@protoc_insertion_point(field_release:tensorflow.GraphDef.library)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_library();
} else {
::tensorflow::FunctionDefLibrary* temp = library_;
library_ = NULL;
return temp;
}
}
inline void GraphDef::set_allocated_library(::tensorflow::FunctionDefLibrary* library) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete library_;
}
if (library != NULL) {
_slow_set_allocated_library(message_arena, &library);
}
library_ = library;
if (library) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.GraphDef.library)
}
inline const GraphDef* GraphDef::internal_default_instance() {
return &GraphDef_default_instance_.get();
}
// -------------------------------------------------------------------
// NodeDef
// optional string name = 1;
inline void NodeDef::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::name() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.name)
}
inline void NodeDef::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.name)
}
inline void NodeDef::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.name)
}
inline ::std::string* NodeDef::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.name)
}
inline void NodeDef::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.name)
}
// optional string op = 2;
inline void NodeDef::clear_op() {
op_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::op() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.op)
return op_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_op(const ::std::string& value) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.op)
}
inline void NodeDef::set_op(const char* value) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.op)
}
inline void NodeDef::set_op(const char* value,
size_t size) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.op)
}
inline ::std::string* NodeDef::mutable_op() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.op)
return op_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_op() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.op)
return op_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_op() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.op)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return op_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_op(::std::string* op) {
if (op != NULL) {
} else {
}
op_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), op,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.op)
}
inline void NodeDef::unsafe_arena_set_allocated_op(
::std::string* op) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (op != NULL) {
} else {
}
op_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
op, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.op)
}
// repeated string input = 3;
inline int NodeDef::input_size() const {
return input_.size();
}
inline void NodeDef::clear_input() {
input_.Clear();
}
inline const ::std::string& NodeDef::input(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.input)
return input_.Get(index);
}
inline ::std::string* NodeDef::mutable_input(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.input)
return input_.Mutable(index);
}
inline void NodeDef::set_input(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.input)
input_.Mutable(index)->assign(value);
}
inline void NodeDef::set_input(int index, const char* value) {
input_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.input)
}
inline void NodeDef::set_input(int index, const char* value, size_t size) {
input_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.input)
}
inline ::std::string* NodeDef::add_input() {
// @@protoc_insertion_point(field_add_mutable:tensorflow.NodeDef.input)
return input_.Add();
}
inline void NodeDef::add_input(const ::std::string& value) {
input_.Add()->assign(value);
// @@protoc_insertion_point(field_add:tensorflow.NodeDef.input)
}
inline void NodeDef::add_input(const char* value) {
input_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:tensorflow.NodeDef.input)
}
inline void NodeDef::add_input(const char* value, size_t size) {
input_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:tensorflow.NodeDef.input)
}
inline const ::google::protobuf::RepeatedPtrField< ::std::string>&
NodeDef::input() const {
// @@protoc_insertion_point(field_list:tensorflow.NodeDef.input)
return input_;
}
inline ::google::protobuf::RepeatedPtrField< ::std::string>*
NodeDef::mutable_input() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.NodeDef.input)
return &input_;
}
// optional string device = 4;
inline void NodeDef::clear_device() {
device_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::device() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.device)
return device_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_device(const ::std::string& value) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.device)
}
inline void NodeDef::set_device(const char* value) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.device)
}
inline void NodeDef::set_device(const char* value,
size_t size) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.device)
}
inline ::std::string* NodeDef::mutable_device() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.device)
return device_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_device() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.device)
return device_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_device() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.device)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return device_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_device(::std::string* device) {
if (device != NULL) {
} else {
}
device_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), device,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.device)
}
inline void NodeDef::unsafe_arena_set_allocated_device(
::std::string* device) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (device != NULL) {
} else {
}
device_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
device, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.device)
}
// map<string, .tensorflow.AttrValue> attr = 5;
inline int NodeDef::attr_size() const {
return attr_.size();
}
inline void NodeDef::clear_attr() {
attr_.Clear();
}
inline const ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >&
NodeDef::attr() const {
// @@protoc_insertion_point(field_map:tensorflow.NodeDef.attr)
return attr_.GetMap();
}
inline ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >*
NodeDef::mutable_attr() {
// @@protoc_insertion_point(field_mutable_map:tensorflow.NodeDef.attr)
return attr_.MutableMap();
}
inline const NodeDef* NodeDef::internal_default_instance() {
return &NodeDef_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_graph_2eproto__INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,770 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor.proto
#ifndef PROTOBUF_tensor_2eproto__INCLUDED
#define PROTOBUF_tensor_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
#include "tensor_shape.pb.h"
#include "types.pb.h"
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_tensor_2eproto();
void protobuf_InitDefaults_tensor_2eproto();
void protobuf_AssignDesc_tensor_2eproto();
void protobuf_ShutdownFile_tensor_2eproto();
class TensorProto;
// ===================================================================
class TensorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorProto) */ {
public:
TensorProto();
virtual ~TensorProto();
TensorProto(const TensorProto& from);
inline TensorProto& operator=(const TensorProto& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorProto& default_instance();
static const TensorProto* internal_default_instance();
void UnsafeArenaSwap(TensorProto* other);
void Swap(TensorProto* other);
// implements Message ----------------------------------------------
inline TensorProto* New() const { return New(NULL); }
TensorProto* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorProto& from);
void MergeFrom(const TensorProto& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorProto* other);
void UnsafeMergeFrom(const TensorProto& from);
protected:
explicit TensorProto(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.DataType dtype = 1;
void clear_dtype();
static const int kDtypeFieldNumber = 1;
::tensorflow::DataType dtype() const;
void set_dtype(::tensorflow::DataType value);
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
bool has_tensor_shape() const;
void clear_tensor_shape();
static const int kTensorShapeFieldNumber = 2;
private:
void _slow_mutable_tensor_shape();
void _slow_set_allocated_tensor_shape(
::google::protobuf::Arena* message_arena, ::tensorflow::TensorShapeProto** tensor_shape);
::tensorflow::TensorShapeProto* _slow_release_tensor_shape();
public:
const ::tensorflow::TensorShapeProto& tensor_shape() const;
::tensorflow::TensorShapeProto* mutable_tensor_shape();
::tensorflow::TensorShapeProto* release_tensor_shape();
void set_allocated_tensor_shape(::tensorflow::TensorShapeProto* tensor_shape);
::tensorflow::TensorShapeProto* unsafe_arena_release_tensor_shape();
void unsafe_arena_set_allocated_tensor_shape(
::tensorflow::TensorShapeProto* tensor_shape);
// optional int32 version_number = 3;
void clear_version_number();
static const int kVersionNumberFieldNumber = 3;
::google::protobuf::int32 version_number() const;
void set_version_number(::google::protobuf::int32 value);
// optional bytes tensor_content = 4;
void clear_tensor_content();
static const int kTensorContentFieldNumber = 4;
const ::std::string& tensor_content() const;
void set_tensor_content(const ::std::string& value);
void set_tensor_content(const char* value);
void set_tensor_content(const void* value, size_t size);
::std::string* mutable_tensor_content();
::std::string* release_tensor_content();
void set_allocated_tensor_content(::std::string* tensor_content);
::std::string* unsafe_arena_release_tensor_content();
void unsafe_arena_set_allocated_tensor_content(
::std::string* tensor_content);
// repeated int32 half_val = 13 [packed = true];
int half_val_size() const;
void clear_half_val();
static const int kHalfValFieldNumber = 13;
::google::protobuf::int32 half_val(int index) const;
void set_half_val(int index, ::google::protobuf::int32 value);
void add_half_val(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
half_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_half_val();
// repeated float float_val = 5 [packed = true];
int float_val_size() const;
void clear_float_val();
static const int kFloatValFieldNumber = 5;
float float_val(int index) const;
void set_float_val(int index, float value);
void add_float_val(float value);
const ::google::protobuf::RepeatedField< float >&
float_val() const;
::google::protobuf::RepeatedField< float >*
mutable_float_val();
// repeated double double_val = 6 [packed = true];
int double_val_size() const;
void clear_double_val();
static const int kDoubleValFieldNumber = 6;
double double_val(int index) const;
void set_double_val(int index, double value);
void add_double_val(double value);
const ::google::protobuf::RepeatedField< double >&
double_val() const;
::google::protobuf::RepeatedField< double >*
mutable_double_val();
// repeated int32 int_val = 7 [packed = true];
int int_val_size() const;
void clear_int_val();
static const int kIntValFieldNumber = 7;
::google::protobuf::int32 int_val(int index) const;
void set_int_val(int index, ::google::protobuf::int32 value);
void add_int_val(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
int_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_int_val();
// repeated bytes string_val = 8;
int string_val_size() const;
void clear_string_val();
static const int kStringValFieldNumber = 8;
const ::std::string& string_val(int index) const;
::std::string* mutable_string_val(int index);
void set_string_val(int index, const ::std::string& value);
void set_string_val(int index, const char* value);
void set_string_val(int index, const void* value, size_t size);
::std::string* add_string_val();
void add_string_val(const ::std::string& value);
void add_string_val(const char* value);
void add_string_val(const void* value, size_t size);
const ::google::protobuf::RepeatedPtrField< ::std::string>& string_val() const;
::google::protobuf::RepeatedPtrField< ::std::string>* mutable_string_val();
// repeated float scomplex_val = 9 [packed = true];
int scomplex_val_size() const;
void clear_scomplex_val();
static const int kScomplexValFieldNumber = 9;
float scomplex_val(int index) const;
void set_scomplex_val(int index, float value);
void add_scomplex_val(float value);
const ::google::protobuf::RepeatedField< float >&
scomplex_val() const;
::google::protobuf::RepeatedField< float >*
mutable_scomplex_val();
// repeated int64 int64_val = 10 [packed = true];
int int64_val_size() const;
void clear_int64_val();
static const int kInt64ValFieldNumber = 10;
::google::protobuf::int64 int64_val(int index) const;
void set_int64_val(int index, ::google::protobuf::int64 value);
void add_int64_val(::google::protobuf::int64 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
int64_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
mutable_int64_val();
// repeated bool bool_val = 11 [packed = true];
int bool_val_size() const;
void clear_bool_val();
static const int kBoolValFieldNumber = 11;
bool bool_val(int index) const;
void set_bool_val(int index, bool value);
void add_bool_val(bool value);
const ::google::protobuf::RepeatedField< bool >&
bool_val() const;
::google::protobuf::RepeatedField< bool >*
mutable_bool_val();
// repeated double dcomplex_val = 12 [packed = true];
int dcomplex_val_size() const;
void clear_dcomplex_val();
static const int kDcomplexValFieldNumber = 12;
double dcomplex_val(int index) const;
void set_dcomplex_val(int index, double value);
void add_dcomplex_val(double value);
const ::google::protobuf::RepeatedField< double >&
dcomplex_val() const;
::google::protobuf::RepeatedField< double >*
mutable_dcomplex_val();
// @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > half_val_;
mutable int _half_val_cached_byte_size_;
::google::protobuf::RepeatedField< float > float_val_;
mutable int _float_val_cached_byte_size_;
::google::protobuf::RepeatedField< double > double_val_;
mutable int _double_val_cached_byte_size_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > int_val_;
mutable int _int_val_cached_byte_size_;
::google::protobuf::RepeatedPtrField< ::std::string> string_val_;
::google::protobuf::RepeatedField< float > scomplex_val_;
mutable int _scomplex_val_cached_byte_size_;
::google::protobuf::RepeatedField< ::google::protobuf::int64 > int64_val_;
mutable int _int64_val_cached_byte_size_;
::google::protobuf::RepeatedField< bool > bool_val_;
mutable int _bool_val_cached_byte_size_;
::google::protobuf::RepeatedField< double > dcomplex_val_;
mutable int _dcomplex_val_cached_byte_size_;
::google::protobuf::internal::ArenaStringPtr tensor_content_;
::tensorflow::TensorShapeProto* tensor_shape_;
int dtype_;
::google::protobuf::int32 version_number_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_2eproto_impl();
friend void protobuf_AddDesc_tensor_2eproto_impl();
friend void protobuf_AssignDesc_tensor_2eproto();
friend void protobuf_ShutdownFile_tensor_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorProto> TensorProto_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorProto
// optional .tensorflow.DataType dtype = 1;
inline void TensorProto::clear_dtype() {
dtype_ = 0;
}
inline ::tensorflow::DataType TensorProto::dtype() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.dtype)
return static_cast< ::tensorflow::DataType >(dtype_);
}
inline void TensorProto::set_dtype(::tensorflow::DataType value) {
dtype_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.dtype)
}
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
inline bool TensorProto::has_tensor_shape() const {
return this != internal_default_instance() && tensor_shape_ != NULL;
}
inline void TensorProto::clear_tensor_shape() {
if (GetArenaNoVirtual() == NULL && tensor_shape_ != NULL) delete tensor_shape_;
tensor_shape_ = NULL;
}
inline const ::tensorflow::TensorShapeProto& TensorProto::tensor_shape() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.tensor_shape)
return tensor_shape_ != NULL ? *tensor_shape_
: *::tensorflow::TensorShapeProto::internal_default_instance();
}
inline ::tensorflow::TensorShapeProto* TensorProto::mutable_tensor_shape() {
if (tensor_shape_ == NULL) {
_slow_mutable_tensor_shape();
}
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.tensor_shape)
return tensor_shape_;
}
inline ::tensorflow::TensorShapeProto* TensorProto::release_tensor_shape() {
// @@protoc_insertion_point(field_release:tensorflow.TensorProto.tensor_shape)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_tensor_shape();
} else {
::tensorflow::TensorShapeProto* temp = tensor_shape_;
tensor_shape_ = NULL;
return temp;
}
}
inline void TensorProto::set_allocated_tensor_shape(::tensorflow::TensorShapeProto* tensor_shape) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete tensor_shape_;
}
if (tensor_shape != NULL) {
_slow_set_allocated_tensor_shape(message_arena, &tensor_shape);
}
tensor_shape_ = tensor_shape;
if (tensor_shape) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorProto.tensor_shape)
}
// optional int32 version_number = 3;
inline void TensorProto::clear_version_number() {
version_number_ = 0;
}
inline ::google::protobuf::int32 TensorProto::version_number() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.version_number)
return version_number_;
}
inline void TensorProto::set_version_number(::google::protobuf::int32 value) {
version_number_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.version_number)
}
// optional bytes tensor_content = 4;
inline void TensorProto::clear_tensor_content() {
tensor_content_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& TensorProto::tensor_content() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.tensor_content)
return tensor_content_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void TensorProto::set_tensor_content(const ::std::string& value) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::set_tensor_content(const char* value) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::set_tensor_content(const void* value,
size_t size) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorProto.tensor_content)
}
inline ::std::string* TensorProto::mutable_tensor_content() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.tensor_content)
return tensor_content_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorProto::release_tensor_content() {
// @@protoc_insertion_point(field_release:tensorflow.TensorProto.tensor_content)
return tensor_content_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorProto::unsafe_arena_release_tensor_content() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorProto.tensor_content)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return tensor_content_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void TensorProto::set_allocated_tensor_content(::std::string* tensor_content) {
if (tensor_content != NULL) {
} else {
}
tensor_content_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), tensor_content,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::unsafe_arena_set_allocated_tensor_content(
::std::string* tensor_content) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (tensor_content != NULL) {
} else {
}
tensor_content_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
tensor_content, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorProto.tensor_content)
}
// repeated int32 half_val = 13 [packed = true];
inline int TensorProto::half_val_size() const {
return half_val_.size();
}
inline void TensorProto::clear_half_val() {
half_val_.Clear();
}
inline ::google::protobuf::int32 TensorProto::half_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.half_val)
return half_val_.Get(index);
}
inline void TensorProto::set_half_val(int index, ::google::protobuf::int32 value) {
half_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.half_val)
}
inline void TensorProto::add_half_val(::google::protobuf::int32 value) {
half_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.half_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
TensorProto::half_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.half_val)
return half_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
TensorProto::mutable_half_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.half_val)
return &half_val_;
}
// repeated float float_val = 5 [packed = true];
inline int TensorProto::float_val_size() const {
return float_val_.size();
}
inline void TensorProto::clear_float_val() {
float_val_.Clear();
}
inline float TensorProto::float_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.float_val)
return float_val_.Get(index);
}
inline void TensorProto::set_float_val(int index, float value) {
float_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.float_val)
}
inline void TensorProto::add_float_val(float value) {
float_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.float_val)
}
inline const ::google::protobuf::RepeatedField< float >&
TensorProto::float_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.float_val)
return float_val_;
}
inline ::google::protobuf::RepeatedField< float >*
TensorProto::mutable_float_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.float_val)
return &float_val_;
}
// repeated double double_val = 6 [packed = true];
inline int TensorProto::double_val_size() const {
return double_val_.size();
}
inline void TensorProto::clear_double_val() {
double_val_.Clear();
}
inline double TensorProto::double_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.double_val)
return double_val_.Get(index);
}
inline void TensorProto::set_double_val(int index, double value) {
double_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.double_val)
}
inline void TensorProto::add_double_val(double value) {
double_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.double_val)
}
inline const ::google::protobuf::RepeatedField< double >&
TensorProto::double_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.double_val)
return double_val_;
}
inline ::google::protobuf::RepeatedField< double >*
TensorProto::mutable_double_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.double_val)
return &double_val_;
}
// repeated int32 int_val = 7 [packed = true];
inline int TensorProto::int_val_size() const {
return int_val_.size();
}
inline void TensorProto::clear_int_val() {
int_val_.Clear();
}
inline ::google::protobuf::int32 TensorProto::int_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.int_val)
return int_val_.Get(index);
}
inline void TensorProto::set_int_val(int index, ::google::protobuf::int32 value) {
int_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.int_val)
}
inline void TensorProto::add_int_val(::google::protobuf::int32 value) {
int_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.int_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
TensorProto::int_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.int_val)
return int_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
TensorProto::mutable_int_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.int_val)
return &int_val_;
}
// repeated bytes string_val = 8;
inline int TensorProto::string_val_size() const {
return string_val_.size();
}
inline void TensorProto::clear_string_val() {
string_val_.Clear();
}
inline const ::std::string& TensorProto::string_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.string_val)
return string_val_.Get(index);
}
inline ::std::string* TensorProto::mutable_string_val(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.string_val)
return string_val_.Mutable(index);
}
inline void TensorProto::set_string_val(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.string_val)
string_val_.Mutable(index)->assign(value);
}
inline void TensorProto::set_string_val(int index, const char* value) {
string_val_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:tensorflow.TensorProto.string_val)
}
inline void TensorProto::set_string_val(int index, const void* value, size_t size) {
string_val_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorProto.string_val)
}
inline ::std::string* TensorProto::add_string_val() {
// @@protoc_insertion_point(field_add_mutable:tensorflow.TensorProto.string_val)
return string_val_.Add();
}
inline void TensorProto::add_string_val(const ::std::string& value) {
string_val_.Add()->assign(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.string_val)
}
inline void TensorProto::add_string_val(const char* value) {
string_val_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:tensorflow.TensorProto.string_val)
}
inline void TensorProto::add_string_val(const void* value, size_t size) {
string_val_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:tensorflow.TensorProto.string_val)
}
inline const ::google::protobuf::RepeatedPtrField< ::std::string>&
TensorProto::string_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.string_val)
return string_val_;
}
inline ::google::protobuf::RepeatedPtrField< ::std::string>*
TensorProto::mutable_string_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.string_val)
return &string_val_;
}
// repeated float scomplex_val = 9 [packed = true];
inline int TensorProto::scomplex_val_size() const {
return scomplex_val_.size();
}
inline void TensorProto::clear_scomplex_val() {
scomplex_val_.Clear();
}
inline float TensorProto::scomplex_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.scomplex_val)
return scomplex_val_.Get(index);
}
inline void TensorProto::set_scomplex_val(int index, float value) {
scomplex_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.scomplex_val)
}
inline void TensorProto::add_scomplex_val(float value) {
scomplex_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.scomplex_val)
}
inline const ::google::protobuf::RepeatedField< float >&
TensorProto::scomplex_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.scomplex_val)
return scomplex_val_;
}
inline ::google::protobuf::RepeatedField< float >*
TensorProto::mutable_scomplex_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.scomplex_val)
return &scomplex_val_;
}
// repeated int64 int64_val = 10 [packed = true];
inline int TensorProto::int64_val_size() const {
return int64_val_.size();
}
inline void TensorProto::clear_int64_val() {
int64_val_.Clear();
}
inline ::google::protobuf::int64 TensorProto::int64_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.int64_val)
return int64_val_.Get(index);
}
inline void TensorProto::set_int64_val(int index, ::google::protobuf::int64 value) {
int64_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.int64_val)
}
inline void TensorProto::add_int64_val(::google::protobuf::int64 value) {
int64_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.int64_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
TensorProto::int64_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.int64_val)
return int64_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
TensorProto::mutable_int64_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.int64_val)
return &int64_val_;
}
// repeated bool bool_val = 11 [packed = true];
inline int TensorProto::bool_val_size() const {
return bool_val_.size();
}
inline void TensorProto::clear_bool_val() {
bool_val_.Clear();
}
inline bool TensorProto::bool_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.bool_val)
return bool_val_.Get(index);
}
inline void TensorProto::set_bool_val(int index, bool value) {
bool_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.bool_val)
}
inline void TensorProto::add_bool_val(bool value) {
bool_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.bool_val)
}
inline const ::google::protobuf::RepeatedField< bool >&
TensorProto::bool_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.bool_val)
return bool_val_;
}
inline ::google::protobuf::RepeatedField< bool >*
TensorProto::mutable_bool_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.bool_val)
return &bool_val_;
}
// repeated double dcomplex_val = 12 [packed = true];
inline int TensorProto::dcomplex_val_size() const {
return dcomplex_val_.size();
}
inline void TensorProto::clear_dcomplex_val() {
dcomplex_val_.Clear();
}
inline double TensorProto::dcomplex_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.dcomplex_val)
return dcomplex_val_.Get(index);
}
inline void TensorProto::set_dcomplex_val(int index, double value) {
dcomplex_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.dcomplex_val)
}
inline void TensorProto::add_dcomplex_val(double value) {
dcomplex_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.dcomplex_val)
}
inline const ::google::protobuf::RepeatedField< double >&
TensorProto::dcomplex_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.dcomplex_val)
return dcomplex_val_;
}
inline ::google::protobuf::RepeatedField< double >*
TensorProto::mutable_dcomplex_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.dcomplex_val)
return &dcomplex_val_;
}
inline const TensorProto* TensorProto::internal_default_instance() {
return &TensorProto_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_tensor_2eproto__INCLUDED

@ -0,0 +1,895 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor_shape.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "tensor_shape.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::Descriptor* TensorShapeProto_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
TensorShapeProto_reflection_ = NULL;
const ::google::protobuf::Descriptor* TensorShapeProto_Dim_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
TensorShapeProto_Dim_reflection_ = NULL;
} // namespace
void protobuf_AssignDesc_tensor_5fshape_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_tensor_5fshape_2eproto() {
protobuf_AddDesc_tensor_5fshape_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"tensor_shape.proto");
GOOGLE_CHECK(file != NULL);
TensorShapeProto_descriptor_ = file->message_type(0);
static const int TensorShapeProto_offsets_[2] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, dim_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, unknown_rank_),
};
TensorShapeProto_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
TensorShapeProto_descriptor_,
TensorShapeProto::internal_default_instance(),
TensorShapeProto_offsets_,
-1,
-1,
-1,
sizeof(TensorShapeProto),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, _internal_metadata_));
TensorShapeProto_Dim_descriptor_ = TensorShapeProto_descriptor_->nested_type(0);
static const int TensorShapeProto_Dim_offsets_[2] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, size_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, name_),
};
TensorShapeProto_Dim_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
TensorShapeProto_Dim_descriptor_,
TensorShapeProto_Dim::internal_default_instance(),
TensorShapeProto_Dim_offsets_,
-1,
-1,
-1,
sizeof(TensorShapeProto_Dim),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, _internal_metadata_));
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_tensor_5fshape_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
TensorShapeProto_descriptor_, TensorShapeProto::internal_default_instance());
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
TensorShapeProto_Dim_descriptor_, TensorShapeProto_Dim::internal_default_instance());
}
} // namespace
void protobuf_ShutdownFile_tensor_5fshape_2eproto() {
TensorShapeProto_default_instance_.Shutdown();
delete TensorShapeProto_reflection_;
TensorShapeProto_Dim_default_instance_.Shutdown();
delete TensorShapeProto_Dim_reflection_;
}
void protobuf_InitDefaults_tensor_5fshape_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
TensorShapeProto_default_instance_.DefaultConstruct();
::google::protobuf::internal::GetEmptyString();
TensorShapeProto_Dim_default_instance_.DefaultConstruct();
TensorShapeProto_default_instance_.get_mutable()->InitAsDefaultInstance();
TensorShapeProto_Dim_default_instance_.get_mutable()->InitAsDefaultInstance();
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_tensor_5fshape_2eproto_once_);
void protobuf_InitDefaults_tensor_5fshape_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_tensor_5fshape_2eproto_once_,
&protobuf_InitDefaults_tensor_5fshape_2eproto_impl);
}
void protobuf_AddDesc_tensor_5fshape_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_tensor_5fshape_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\022tensor_shape.proto\022\ntensorflow\"z\n\020Tens"
"orShapeProto\022-\n\003dim\030\002 \003(\0132 .tensorflow.T"
"ensorShapeProto.Dim\022\024\n\014unknown_rank\030\003 \001("
"\010\032!\n\003Dim\022\014\n\004size\030\001 \001(\003\022\014\n\004name\030\002 \001(\tB2\n\030"
"org.tensorflow.frameworkB\021TensorShapePro"
"tosP\001\370\001\001b\006proto3", 216);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"tensor_shape.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_tensor_5fshape_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_tensor_5fshape_2eproto_once_);
void protobuf_AddDesc_tensor_5fshape_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_tensor_5fshape_2eproto_once_,
&protobuf_AddDesc_tensor_5fshape_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_tensor_5fshape_2eproto {
StaticDescriptorInitializer_tensor_5fshape_2eproto() {
protobuf_AddDesc_tensor_5fshape_2eproto();
}
} static_descriptor_initializer_tensor_5fshape_2eproto_;
namespace {
static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_NORETURN;
static void MergeFromFail(int line) {
::google::protobuf::internal::MergeFromFail(__FILE__, line);
}
} // namespace
// ===================================================================
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int TensorShapeProto_Dim::kSizeFieldNumber;
const int TensorShapeProto_Dim::kNameFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
TensorShapeProto_Dim::TensorShapeProto_Dim()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_tensor_5fshape_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.TensorShapeProto.Dim)
}
TensorShapeProto_Dim::TensorShapeProto_Dim(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_tensor_5fshape_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.TensorShapeProto.Dim)
}
void TensorShapeProto_Dim::InitAsDefaultInstance() {
}
TensorShapeProto_Dim::TensorShapeProto_Dim(const TensorShapeProto_Dim& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.TensorShapeProto.Dim)
}
void TensorShapeProto_Dim::SharedCtor() {
name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
size_ = GOOGLE_LONGLONG(0);
_cached_size_ = 0;
}
TensorShapeProto_Dim::~TensorShapeProto_Dim() {
// @@protoc_insertion_point(destructor:tensorflow.TensorShapeProto.Dim)
SharedDtor();
}
void TensorShapeProto_Dim::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
name_.Destroy(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), arena);
}
void TensorShapeProto_Dim::ArenaDtor(void* object) {
TensorShapeProto_Dim* _this = reinterpret_cast< TensorShapeProto_Dim* >(object);
(void)_this;
}
void TensorShapeProto_Dim::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void TensorShapeProto_Dim::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* TensorShapeProto_Dim::descriptor() {
protobuf_AssignDescriptorsOnce();
return TensorShapeProto_Dim_descriptor_;
}
const TensorShapeProto_Dim& TensorShapeProto_Dim::default_instance() {
protobuf_InitDefaults_tensor_5fshape_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto_Dim> TensorShapeProto_Dim_default_instance_;
TensorShapeProto_Dim* TensorShapeProto_Dim::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<TensorShapeProto_Dim>(arena);
}
void TensorShapeProto_Dim::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.TensorShapeProto.Dim)
size_ = GOOGLE_LONGLONG(0);
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
bool TensorShapeProto_Dim::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.TensorShapeProto.Dim)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// optional int64 size = 1;
case 1: {
if (tag == 8) {
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
input, &size_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(18)) goto parse_name;
break;
}
// optional string name = 2;
case 2: {
if (tag == 18) {
parse_name:
DO_(::google::protobuf::internal::WireFormatLite::ReadString(
input, this->mutable_name()));
DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::PARSE,
"tensorflow.TensorShapeProto.Dim.name"));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.TensorShapeProto.Dim)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.TensorShapeProto.Dim)
return false;
#undef DO_
}
void TensorShapeProto_Dim::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.TensorShapeProto.Dim)
// optional int64 size = 1;
if (this->size() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt64(1, this->size(), output);
}
// optional string name = 2;
if (this->name().size() > 0) {
::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::SERIALIZE,
"tensorflow.TensorShapeProto.Dim.name");
::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
2, this->name(), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.TensorShapeProto.Dim)
}
::google::protobuf::uint8* TensorShapeProto_Dim::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorShapeProto.Dim)
// optional int64 size = 1;
if (this->size() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(1, this->size(), target);
}
// optional string name = 2;
if (this->name().size() > 0) {
::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::SERIALIZE,
"tensorflow.TensorShapeProto.Dim.name");
target =
::google::protobuf::internal::WireFormatLite::WriteStringToArray(
2, this->name(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorShapeProto.Dim)
return target;
}
size_t TensorShapeProto_Dim::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorShapeProto.Dim)
size_t total_size = 0;
// optional int64 size = 1;
if (this->size() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int64Size(
this->size());
}
// optional string name = 2;
if (this->name().size() > 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::StringSize(
this->name());
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void TensorShapeProto_Dim::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.TensorShapeProto.Dim)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const TensorShapeProto_Dim* source =
::google::protobuf::internal::DynamicCastToGenerated<const TensorShapeProto_Dim>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.TensorShapeProto.Dim)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.TensorShapeProto.Dim)
UnsafeMergeFrom(*source);
}
}
void TensorShapeProto_Dim::MergeFrom(const TensorShapeProto_Dim& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorShapeProto.Dim)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void TensorShapeProto_Dim::UnsafeMergeFrom(const TensorShapeProto_Dim& from) {
GOOGLE_DCHECK(&from != this);
if (from.size() != 0) {
set_size(from.size());
}
if (from.name().size() > 0) {
set_name(from.name());
}
}
void TensorShapeProto_Dim::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.TensorShapeProto.Dim)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void TensorShapeProto_Dim::CopyFrom(const TensorShapeProto_Dim& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorShapeProto.Dim)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool TensorShapeProto_Dim::IsInitialized() const {
return true;
}
void TensorShapeProto_Dim::Swap(TensorShapeProto_Dim* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
TensorShapeProto_Dim temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void TensorShapeProto_Dim::UnsafeArenaSwap(TensorShapeProto_Dim* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void TensorShapeProto_Dim::InternalSwap(TensorShapeProto_Dim* other) {
std::swap(size_, other->size_);
name_.Swap(&other->name_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata TensorShapeProto_Dim::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = TensorShapeProto_Dim_descriptor_;
metadata.reflection = TensorShapeProto_Dim_reflection_;
return metadata;
}
// -------------------------------------------------------------------
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int TensorShapeProto::kDimFieldNumber;
const int TensorShapeProto::kUnknownRankFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
TensorShapeProto::TensorShapeProto()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_tensor_5fshape_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.TensorShapeProto)
}
TensorShapeProto::TensorShapeProto(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena),
dim_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_tensor_5fshape_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.TensorShapeProto)
}
void TensorShapeProto::InitAsDefaultInstance() {
}
TensorShapeProto::TensorShapeProto(const TensorShapeProto& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.TensorShapeProto)
}
void TensorShapeProto::SharedCtor() {
unknown_rank_ = false;
_cached_size_ = 0;
}
TensorShapeProto::~TensorShapeProto() {
// @@protoc_insertion_point(destructor:tensorflow.TensorShapeProto)
SharedDtor();
}
void TensorShapeProto::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
}
void TensorShapeProto::ArenaDtor(void* object) {
TensorShapeProto* _this = reinterpret_cast< TensorShapeProto* >(object);
(void)_this;
}
void TensorShapeProto::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void TensorShapeProto::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* TensorShapeProto::descriptor() {
protobuf_AssignDescriptorsOnce();
return TensorShapeProto_descriptor_;
}
const TensorShapeProto& TensorShapeProto::default_instance() {
protobuf_InitDefaults_tensor_5fshape_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto> TensorShapeProto_default_instance_;
TensorShapeProto* TensorShapeProto::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<TensorShapeProto>(arena);
}
void TensorShapeProto::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.TensorShapeProto)
unknown_rank_ = false;
dim_.Clear();
}
bool TensorShapeProto::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.TensorShapeProto)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
case 2: {
if (tag == 18) {
DO_(input->IncrementRecursionDepth());
parse_loop_dim:
DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtualNoRecursionDepth(
input, add_dim()));
} else {
goto handle_unusual;
}
if (input->ExpectTag(18)) goto parse_loop_dim;
input->UnsafeDecrementRecursionDepth();
if (input->ExpectTag(24)) goto parse_unknown_rank;
break;
}
// optional bool unknown_rank = 3;
case 3: {
if (tag == 24) {
parse_unknown_rank:
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
input, &unknown_rank_)));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.TensorShapeProto)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.TensorShapeProto)
return false;
#undef DO_
}
void TensorShapeProto::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.TensorShapeProto)
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
for (unsigned int i = 0, n = this->dim_size(); i < n; i++) {
::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
2, this->dim(i), output);
}
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
::google::protobuf::internal::WireFormatLite::WriteBool(3, this->unknown_rank(), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.TensorShapeProto)
}
::google::protobuf::uint8* TensorShapeProto::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorShapeProto)
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
for (unsigned int i = 0, n = this->dim_size(); i < n; i++) {
target = ::google::protobuf::internal::WireFormatLite::
InternalWriteMessageNoVirtualToArray(
2, this->dim(i), false, target);
}
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteBoolToArray(3, this->unknown_rank(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorShapeProto)
return target;
}
size_t TensorShapeProto::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorShapeProto)
size_t total_size = 0;
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
total_size += 1 + 1;
}
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
{
unsigned int count = this->dim_size();
total_size += 1UL * count;
for (unsigned int i = 0; i < count; i++) {
total_size +=
::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
this->dim(i));
}
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void TensorShapeProto::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.TensorShapeProto)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const TensorShapeProto* source =
::google::protobuf::internal::DynamicCastToGenerated<const TensorShapeProto>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.TensorShapeProto)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.TensorShapeProto)
UnsafeMergeFrom(*source);
}
}
void TensorShapeProto::MergeFrom(const TensorShapeProto& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorShapeProto)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void TensorShapeProto::UnsafeMergeFrom(const TensorShapeProto& from) {
GOOGLE_DCHECK(&from != this);
dim_.MergeFrom(from.dim_);
if (from.unknown_rank() != 0) {
set_unknown_rank(from.unknown_rank());
}
}
void TensorShapeProto::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.TensorShapeProto)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void TensorShapeProto::CopyFrom(const TensorShapeProto& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorShapeProto)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool TensorShapeProto::IsInitialized() const {
return true;
}
void TensorShapeProto::Swap(TensorShapeProto* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
TensorShapeProto temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void TensorShapeProto::UnsafeArenaSwap(TensorShapeProto* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void TensorShapeProto::InternalSwap(TensorShapeProto* other) {
dim_.UnsafeArenaSwap(&other->dim_);
std::swap(unknown_rank_, other->unknown_rank_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata TensorShapeProto::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = TensorShapeProto_descriptor_;
metadata.reflection = TensorShapeProto_reflection_;
return metadata;
}
#if PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorShapeProto_Dim
// optional int64 size = 1;
void TensorShapeProto_Dim::clear_size() {
size_ = GOOGLE_LONGLONG(0);
}
::google::protobuf::int64 TensorShapeProto_Dim::size() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.size)
return size_;
}
void TensorShapeProto_Dim::set_size(::google::protobuf::int64 value) {
size_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.size)
}
// optional string name = 2;
void TensorShapeProto_Dim::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
const ::std::string& TensorShapeProto_Dim::name() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
void TensorShapeProto_Dim::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorShapeProto.Dim.name)
}
::std::string* TensorShapeProto_Dim::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.Dim.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
::std::string* TensorShapeProto_Dim::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.TensorShapeProto.Dim.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
::std::string* TensorShapeProto_Dim::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorShapeProto.Dim.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
void TensorShapeProto_Dim::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline const TensorShapeProto_Dim* TensorShapeProto_Dim::internal_default_instance() {
return &TensorShapeProto_Dim_default_instance_.get();
}
// -------------------------------------------------------------------
// TensorShapeProto
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
int TensorShapeProto::dim_size() const {
return dim_.size();
}
void TensorShapeProto::clear_dim() {
dim_.Clear();
}
const ::tensorflow::TensorShapeProto_Dim& TensorShapeProto::dim(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.dim)
return dim_.Get(index);
}
::tensorflow::TensorShapeProto_Dim* TensorShapeProto::mutable_dim(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.dim)
return dim_.Mutable(index);
}
::tensorflow::TensorShapeProto_Dim* TensorShapeProto::add_dim() {
// @@protoc_insertion_point(field_add:tensorflow.TensorShapeProto.dim)
return dim_.Add();
}
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
TensorShapeProto::mutable_dim() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorShapeProto.dim)
return &dim_;
}
const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
TensorShapeProto::dim() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorShapeProto.dim)
return dim_;
}
// optional bool unknown_rank = 3;
void TensorShapeProto::clear_unknown_rank() {
unknown_rank_ = false;
}
bool TensorShapeProto::unknown_rank() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.unknown_rank)
return unknown_rank_;
}
void TensorShapeProto::set_unknown_rank(bool value) {
unknown_rank_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.unknown_rank)
}
inline const TensorShapeProto* TensorShapeProto::internal_default_instance() {
return &TensorShapeProto_default_instance_.get();
}
#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

@ -0,0 +1,423 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor_shape.proto
#ifndef PROTOBUF_tensor_5fshape_2eproto__INCLUDED
#define PROTOBUF_tensor_5fshape_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_tensor_5fshape_2eproto();
void protobuf_InitDefaults_tensor_5fshape_2eproto();
void protobuf_AssignDesc_tensor_5fshape_2eproto();
void protobuf_ShutdownFile_tensor_5fshape_2eproto();
class TensorShapeProto;
class TensorShapeProto_Dim;
// ===================================================================
class TensorShapeProto_Dim : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorShapeProto.Dim) */ {
public:
TensorShapeProto_Dim();
virtual ~TensorShapeProto_Dim();
TensorShapeProto_Dim(const TensorShapeProto_Dim& from);
inline TensorShapeProto_Dim& operator=(const TensorShapeProto_Dim& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorShapeProto_Dim& default_instance();
static const TensorShapeProto_Dim* internal_default_instance();
void UnsafeArenaSwap(TensorShapeProto_Dim* other);
void Swap(TensorShapeProto_Dim* other);
// implements Message ----------------------------------------------
inline TensorShapeProto_Dim* New() const { return New(NULL); }
TensorShapeProto_Dim* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorShapeProto_Dim& from);
void MergeFrom(const TensorShapeProto_Dim& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorShapeProto_Dim* other);
void UnsafeMergeFrom(const TensorShapeProto_Dim& from);
protected:
explicit TensorShapeProto_Dim(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int64 size = 1;
void clear_size();
static const int kSizeFieldNumber = 1;
::google::protobuf::int64 size() const;
void set_size(::google::protobuf::int64 value);
// optional string name = 2;
void clear_name();
static const int kNameFieldNumber = 2;
const ::std::string& name() const;
void set_name(const ::std::string& value);
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
::std::string* unsafe_arena_release_name();
void unsafe_arena_set_allocated_name(
::std::string* name);
// @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto.Dim)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::int64 size_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_5fshape_2eproto_impl();
friend void protobuf_AddDesc_tensor_5fshape_2eproto_impl();
friend void protobuf_AssignDesc_tensor_5fshape_2eproto();
friend void protobuf_ShutdownFile_tensor_5fshape_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto_Dim> TensorShapeProto_Dim_default_instance_;
// -------------------------------------------------------------------
class TensorShapeProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorShapeProto) */ {
public:
TensorShapeProto();
virtual ~TensorShapeProto();
TensorShapeProto(const TensorShapeProto& from);
inline TensorShapeProto& operator=(const TensorShapeProto& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorShapeProto& default_instance();
static const TensorShapeProto* internal_default_instance();
void UnsafeArenaSwap(TensorShapeProto* other);
void Swap(TensorShapeProto* other);
// implements Message ----------------------------------------------
inline TensorShapeProto* New() const { return New(NULL); }
TensorShapeProto* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorShapeProto& from);
void MergeFrom(const TensorShapeProto& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorShapeProto* other);
void UnsafeMergeFrom(const TensorShapeProto& from);
protected:
explicit TensorShapeProto(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
typedef TensorShapeProto_Dim Dim;
// accessors -------------------------------------------------------
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
int dim_size() const;
void clear_dim();
static const int kDimFieldNumber = 2;
const ::tensorflow::TensorShapeProto_Dim& dim(int index) const;
::tensorflow::TensorShapeProto_Dim* mutable_dim(int index);
::tensorflow::TensorShapeProto_Dim* add_dim();
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
mutable_dim();
const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
dim() const;
// optional bool unknown_rank = 3;
void clear_unknown_rank();
static const int kUnknownRankFieldNumber = 3;
bool unknown_rank() const;
void set_unknown_rank(bool value);
// @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim > dim_;
bool unknown_rank_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_5fshape_2eproto_impl();
friend void protobuf_AddDesc_tensor_5fshape_2eproto_impl();
friend void protobuf_AssignDesc_tensor_5fshape_2eproto();
friend void protobuf_ShutdownFile_tensor_5fshape_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto> TensorShapeProto_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorShapeProto_Dim
// optional int64 size = 1;
inline void TensorShapeProto_Dim::clear_size() {
size_ = GOOGLE_LONGLONG(0);
}
inline ::google::protobuf::int64 TensorShapeProto_Dim::size() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.size)
return size_;
}
inline void TensorShapeProto_Dim::set_size(::google::protobuf::int64 value) {
size_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.size)
}
// optional string name = 2;
inline void TensorShapeProto_Dim::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& TensorShapeProto_Dim::name() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void TensorShapeProto_Dim::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorShapeProto.Dim.name)
}
inline ::std::string* TensorShapeProto_Dim::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.Dim.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorShapeProto_Dim::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.TensorShapeProto.Dim.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorShapeProto_Dim::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorShapeProto.Dim.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void TensorShapeProto_Dim::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline const TensorShapeProto_Dim* TensorShapeProto_Dim::internal_default_instance() {
return &TensorShapeProto_Dim_default_instance_.get();
}
// -------------------------------------------------------------------
// TensorShapeProto
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
inline int TensorShapeProto::dim_size() const {
return dim_.size();
}
inline void TensorShapeProto::clear_dim() {
dim_.Clear();
}
inline const ::tensorflow::TensorShapeProto_Dim& TensorShapeProto::dim(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.dim)
return dim_.Get(index);
}
inline ::tensorflow::TensorShapeProto_Dim* TensorShapeProto::mutable_dim(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.dim)
return dim_.Mutable(index);
}
inline ::tensorflow::TensorShapeProto_Dim* TensorShapeProto::add_dim() {
// @@protoc_insertion_point(field_add:tensorflow.TensorShapeProto.dim)
return dim_.Add();
}
inline ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
TensorShapeProto::mutable_dim() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorShapeProto.dim)
return &dim_;
}
inline const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
TensorShapeProto::dim() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorShapeProto.dim)
return dim_;
}
// optional bool unknown_rank = 3;
inline void TensorShapeProto::clear_unknown_rank() {
unknown_rank_ = false;
}
inline bool TensorShapeProto::unknown_rank() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.unknown_rank)
return unknown_rank_;
}
inline void TensorShapeProto::set_unknown_rank(bool value) {
unknown_rank_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.unknown_rank)
}
inline const TensorShapeProto* TensorShapeProto::internal_default_instance() {
return &TensorShapeProto_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_tensor_5fshape_2eproto__INCLUDED

@ -0,0 +1,163 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: types.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "types.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::EnumDescriptor* DataType_descriptor_ = NULL;
} // namespace
void protobuf_AssignDesc_types_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_types_2eproto() {
protobuf_AddDesc_types_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"types.proto");
GOOGLE_CHECK(file != NULL);
DataType_descriptor_ = file->enum_type(0);
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_types_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
}
} // namespace
void protobuf_ShutdownFile_types_2eproto() {
}
void protobuf_InitDefaults_types_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_types_2eproto_once_);
void protobuf_InitDefaults_types_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_types_2eproto_once_,
&protobuf_InitDefaults_types_2eproto_impl);
}
void protobuf_AddDesc_types_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_types_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\013types.proto\022\ntensorflow*\234\005\n\010DataType\022\016"
"\n\nDT_INVALID\020\000\022\014\n\010DT_FLOAT\020\001\022\r\n\tDT_DOUBL"
"E\020\002\022\014\n\010DT_INT32\020\003\022\014\n\010DT_UINT8\020\004\022\014\n\010DT_IN"
"T16\020\005\022\013\n\007DT_INT8\020\006\022\r\n\tDT_STRING\020\007\022\020\n\014DT_"
"COMPLEX64\020\010\022\014\n\010DT_INT64\020\t\022\013\n\007DT_BOOL\020\n\022\014"
"\n\010DT_QINT8\020\013\022\r\n\tDT_QUINT8\020\014\022\r\n\tDT_QINT32"
"\020\r\022\017\n\013DT_BFLOAT16\020\016\022\r\n\tDT_QINT16\020\017\022\016\n\nDT"
"_QUINT16\020\020\022\r\n\tDT_UINT16\020\021\022\021\n\rDT_COMPLEX1"
"28\020\022\022\013\n\007DT_HALF\020\023\022\020\n\014DT_FLOAT_REF\020e\022\021\n\rD"
"T_DOUBLE_REF\020f\022\020\n\014DT_INT32_REF\020g\022\020\n\014DT_U"
"INT8_REF\020h\022\020\n\014DT_INT16_REF\020i\022\017\n\013DT_INT8_"
"REF\020j\022\021\n\rDT_STRING_REF\020k\022\024\n\020DT_COMPLEX64"
"_REF\020l\022\020\n\014DT_INT64_REF\020m\022\017\n\013DT_BOOL_REF\020"
"n\022\020\n\014DT_QINT8_REF\020o\022\021\n\rDT_QUINT8_REF\020p\022\021"
"\n\rDT_QINT32_REF\020q\022\023\n\017DT_BFLOAT16_REF\020r\022\021"
"\n\rDT_QINT16_REF\020s\022\022\n\016DT_QUINT16_REF\020t\022\021\n"
"\rDT_UINT16_REF\020u\022\025\n\021DT_COMPLEX128_REF\020v\022"
"\017\n\013DT_HALF_REF\020wB,\n\030org.tensorflow.frame"
"workB\013TypesProtosP\001\370\001\001b\006proto3", 750);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"types.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_types_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_types_2eproto_once_);
void protobuf_AddDesc_types_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_types_2eproto_once_,
&protobuf_AddDesc_types_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_types_2eproto {
StaticDescriptorInitializer_types_2eproto() {
protobuf_AddDesc_types_2eproto();
}
} static_descriptor_initializer_types_2eproto_;
const ::google::protobuf::EnumDescriptor* DataType_descriptor() {
protobuf_AssignDescriptorsOnce();
return DataType_descriptor_;
}
bool DataType_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
case 16:
case 17:
case 18:
case 19:
case 101:
case 102:
case 103:
case 104:
case 105:
case 106:
case 107:
case 108:
case 109:
case 110:
case 111:
case 112:
case 113:
case 114:
case 115:
case 116:
case 117:
case 118:
case 119:
return true;
default:
return false;
}
}
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

@ -0,0 +1,129 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: types.proto
#ifndef PROTOBUF_types_2eproto__INCLUDED
#define PROTOBUF_types_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/generated_enum_reflection.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_types_2eproto();
void protobuf_InitDefaults_types_2eproto();
void protobuf_AssignDesc_types_2eproto();
void protobuf_ShutdownFile_types_2eproto();
enum DataType {
DT_INVALID = 0,
DT_FLOAT = 1,
DT_DOUBLE = 2,
DT_INT32 = 3,
DT_UINT8 = 4,
DT_INT16 = 5,
DT_INT8 = 6,
DT_STRING = 7,
DT_COMPLEX64 = 8,
DT_INT64 = 9,
DT_BOOL = 10,
DT_QINT8 = 11,
DT_QUINT8 = 12,
DT_QINT32 = 13,
DT_BFLOAT16 = 14,
DT_QINT16 = 15,
DT_QUINT16 = 16,
DT_UINT16 = 17,
DT_COMPLEX128 = 18,
DT_HALF = 19,
DT_FLOAT_REF = 101,
DT_DOUBLE_REF = 102,
DT_INT32_REF = 103,
DT_UINT8_REF = 104,
DT_INT16_REF = 105,
DT_INT8_REF = 106,
DT_STRING_REF = 107,
DT_COMPLEX64_REF = 108,
DT_INT64_REF = 109,
DT_BOOL_REF = 110,
DT_QINT8_REF = 111,
DT_QUINT8_REF = 112,
DT_QINT32_REF = 113,
DT_BFLOAT16_REF = 114,
DT_QINT16_REF = 115,
DT_QUINT16_REF = 116,
DT_UINT16_REF = 117,
DT_COMPLEX128_REF = 118,
DT_HALF_REF = 119,
DataType_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,
DataType_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max
};
bool DataType_IsValid(int value);
const DataType DataType_MIN = DT_INVALID;
const DataType DataType_MAX = DT_HALF_REF;
const int DataType_ARRAYSIZE = DataType_MAX + 1;
const ::google::protobuf::EnumDescriptor* DataType_descriptor();
inline const ::std::string& DataType_Name(DataType value) {
return ::google::protobuf::internal::NameOfEnum(
DataType_descriptor(), value);
}
inline bool DataType_Parse(
const ::std::string& name, DataType* value) {
return ::google::protobuf::internal::ParseNamedEnum<DataType>(
DataType_descriptor(), name, value);
}
// ===================================================================
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
#ifndef SWIG
namespace google {
namespace protobuf {
template <> struct is_proto_enum< ::tensorflow::DataType> : ::google::protobuf::internal::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::tensorflow::DataType>() {
return ::tensorflow::DataType_descriptor();
}
} // namespace protobuf
} // namespace google
#endif // SWIG
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_types_2eproto__INCLUDED

@ -0,0 +1,572 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: versions.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "versions.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::Descriptor* VersionDef_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
VersionDef_reflection_ = NULL;
} // namespace
void protobuf_AssignDesc_versions_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_versions_2eproto() {
protobuf_AddDesc_versions_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"versions.proto");
GOOGLE_CHECK(file != NULL);
VersionDef_descriptor_ = file->message_type(0);
static const int VersionDef_offsets_[3] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, producer_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, min_consumer_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, bad_consumers_),
};
VersionDef_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
VersionDef_descriptor_,
VersionDef::internal_default_instance(),
VersionDef_offsets_,
-1,
-1,
-1,
sizeof(VersionDef),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, _internal_metadata_));
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_versions_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
VersionDef_descriptor_, VersionDef::internal_default_instance());
}
} // namespace
void protobuf_ShutdownFile_versions_2eproto() {
VersionDef_default_instance_.Shutdown();
delete VersionDef_reflection_;
}
void protobuf_InitDefaults_versions_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
VersionDef_default_instance_.DefaultConstruct();
VersionDef_default_instance_.get_mutable()->InitAsDefaultInstance();
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_versions_2eproto_once_);
void protobuf_InitDefaults_versions_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_versions_2eproto_once_,
&protobuf_InitDefaults_versions_2eproto_impl);
}
void protobuf_AddDesc_versions_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_versions_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\016versions.proto\022\ntensorflow\"K\n\nVersionD"
"ef\022\020\n\010producer\030\001 \001(\005\022\024\n\014min_consumer\030\002 \001"
"(\005\022\025\n\rbad_consumers\030\003 \003(\005B/\n\030org.tensorf"
"low.frameworkB\016VersionsProtosP\001\370\001\001b\006prot"
"o3", 162);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"versions.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_versions_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_versions_2eproto_once_);
void protobuf_AddDesc_versions_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_versions_2eproto_once_,
&protobuf_AddDesc_versions_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_versions_2eproto {
StaticDescriptorInitializer_versions_2eproto() {
protobuf_AddDesc_versions_2eproto();
}
} static_descriptor_initializer_versions_2eproto_;
namespace {
static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_NORETURN;
static void MergeFromFail(int line) {
::google::protobuf::internal::MergeFromFail(__FILE__, line);
}
} // namespace
// ===================================================================
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int VersionDef::kProducerFieldNumber;
const int VersionDef::kMinConsumerFieldNumber;
const int VersionDef::kBadConsumersFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
VersionDef::VersionDef()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_versions_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.VersionDef)
}
VersionDef::VersionDef(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena),
bad_consumers_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_versions_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.VersionDef)
}
void VersionDef::InitAsDefaultInstance() {
}
VersionDef::VersionDef(const VersionDef& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.VersionDef)
}
void VersionDef::SharedCtor() {
::memset(&producer_, 0, reinterpret_cast<char*>(&min_consumer_) -
reinterpret_cast<char*>(&producer_) + sizeof(min_consumer_));
_cached_size_ = 0;
}
VersionDef::~VersionDef() {
// @@protoc_insertion_point(destructor:tensorflow.VersionDef)
SharedDtor();
}
void VersionDef::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
}
void VersionDef::ArenaDtor(void* object) {
VersionDef* _this = reinterpret_cast< VersionDef* >(object);
(void)_this;
}
void VersionDef::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void VersionDef::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* VersionDef::descriptor() {
protobuf_AssignDescriptorsOnce();
return VersionDef_descriptor_;
}
const VersionDef& VersionDef::default_instance() {
protobuf_InitDefaults_versions_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<VersionDef> VersionDef_default_instance_;
VersionDef* VersionDef::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<VersionDef>(arena);
}
void VersionDef::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.VersionDef)
#if defined(__clang__)
#define ZR_HELPER_(f) \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \
__builtin_offsetof(VersionDef, f) \
_Pragma("clang diagnostic pop")
#else
#define ZR_HELPER_(f) reinterpret_cast<char*>(\
&reinterpret_cast<VersionDef*>(16)->f)
#endif
#define ZR_(first, last) do {\
::memset(&(first), 0,\
ZR_HELPER_(last) - ZR_HELPER_(first) + sizeof(last));\
} while (0)
ZR_(producer_, min_consumer_);
#undef ZR_HELPER_
#undef ZR_
bad_consumers_.Clear();
}
bool VersionDef::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.VersionDef)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// optional int32 producer = 1;
case 1: {
if (tag == 8) {
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, &producer_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(16)) goto parse_min_consumer;
break;
}
// optional int32 min_consumer = 2;
case 2: {
if (tag == 16) {
parse_min_consumer:
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, &min_consumer_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(26)) goto parse_bad_consumers;
break;
}
// repeated int32 bad_consumers = 3;
case 3: {
if (tag == 26) {
parse_bad_consumers:
DO_((::google::protobuf::internal::WireFormatLite::ReadPackedPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, this->mutable_bad_consumers())));
} else if (tag == 24) {
DO_((::google::protobuf::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
1, 26, input, this->mutable_bad_consumers())));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.VersionDef)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.VersionDef)
return false;
#undef DO_
}
void VersionDef::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.VersionDef)
// optional int32 producer = 1;
if (this->producer() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->producer(), output);
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->min_consumer(), output);
}
// repeated int32 bad_consumers = 3;
if (this->bad_consumers_size() > 0) {
::google::protobuf::internal::WireFormatLite::WriteTag(3, ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
output->WriteVarint32(_bad_consumers_cached_byte_size_);
}
for (int i = 0; i < this->bad_consumers_size(); i++) {
::google::protobuf::internal::WireFormatLite::WriteInt32NoTag(
this->bad_consumers(i), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.VersionDef)
}
::google::protobuf::uint8* VersionDef::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.VersionDef)
// optional int32 producer = 1;
if (this->producer() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->producer(), target);
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(2, this->min_consumer(), target);
}
// repeated int32 bad_consumers = 3;
if (this->bad_consumers_size() > 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteTagToArray(
3,
::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
target);
target = ::google::protobuf::io::CodedOutputStream::WriteVarint32ToArray(
_bad_consumers_cached_byte_size_, target);
}
for (int i = 0; i < this->bad_consumers_size(); i++) {
target = ::google::protobuf::internal::WireFormatLite::
WriteInt32NoTagToArray(this->bad_consumers(i), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.VersionDef)
return target;
}
size_t VersionDef::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.VersionDef)
size_t total_size = 0;
// optional int32 producer = 1;
if (this->producer() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(
this->producer());
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(
this->min_consumer());
}
// repeated int32 bad_consumers = 3;
{
size_t data_size = 0;
unsigned int count = this->bad_consumers_size();
for (unsigned int i = 0; i < count; i++) {
data_size += ::google::protobuf::internal::WireFormatLite::
Int32Size(this->bad_consumers(i));
}
if (data_size > 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
}
int cached_size = ::google::protobuf::internal::ToCachedSize(data_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_bad_consumers_cached_byte_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
total_size += data_size;
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void VersionDef::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.VersionDef)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const VersionDef* source =
::google::protobuf::internal::DynamicCastToGenerated<const VersionDef>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.VersionDef)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.VersionDef)
UnsafeMergeFrom(*source);
}
}
void VersionDef::MergeFrom(const VersionDef& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.VersionDef)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void VersionDef::UnsafeMergeFrom(const VersionDef& from) {
GOOGLE_DCHECK(&from != this);
bad_consumers_.UnsafeMergeFrom(from.bad_consumers_);
if (from.producer() != 0) {
set_producer(from.producer());
}
if (from.min_consumer() != 0) {
set_min_consumer(from.min_consumer());
}
}
void VersionDef::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.VersionDef)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void VersionDef::CopyFrom(const VersionDef& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.VersionDef)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool VersionDef::IsInitialized() const {
return true;
}
void VersionDef::Swap(VersionDef* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
VersionDef temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void VersionDef::UnsafeArenaSwap(VersionDef* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void VersionDef::InternalSwap(VersionDef* other) {
std::swap(producer_, other->producer_);
std::swap(min_consumer_, other->min_consumer_);
bad_consumers_.UnsafeArenaSwap(&other->bad_consumers_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata VersionDef::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = VersionDef_descriptor_;
metadata.reflection = VersionDef_reflection_;
return metadata;
}
#if PROTOBUF_INLINE_NOT_IN_HEADERS
// VersionDef
// optional int32 producer = 1;
void VersionDef::clear_producer() {
producer_ = 0;
}
::google::protobuf::int32 VersionDef::producer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.producer)
return producer_;
}
void VersionDef::set_producer(::google::protobuf::int32 value) {
producer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.producer)
}
// optional int32 min_consumer = 2;
void VersionDef::clear_min_consumer() {
min_consumer_ = 0;
}
::google::protobuf::int32 VersionDef::min_consumer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.min_consumer)
return min_consumer_;
}
void VersionDef::set_min_consumer(::google::protobuf::int32 value) {
min_consumer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.min_consumer)
}
// repeated int32 bad_consumers = 3;
int VersionDef::bad_consumers_size() const {
return bad_consumers_.size();
}
void VersionDef::clear_bad_consumers() {
bad_consumers_.Clear();
}
::google::protobuf::int32 VersionDef::bad_consumers(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.bad_consumers)
return bad_consumers_.Get(index);
}
void VersionDef::set_bad_consumers(int index, ::google::protobuf::int32 value) {
bad_consumers_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.bad_consumers)
}
void VersionDef::add_bad_consumers(::google::protobuf::int32 value) {
bad_consumers_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.VersionDef.bad_consumers)
}
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
VersionDef::bad_consumers() const {
// @@protoc_insertion_point(field_list:tensorflow.VersionDef.bad_consumers)
return bad_consumers_;
}
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
VersionDef::mutable_bad_consumers() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.VersionDef.bad_consumers)
return &bad_consumers_;
}
inline const VersionDef* VersionDef::internal_default_instance() {
return &VersionDef_default_instance_.get();
}
#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

@ -0,0 +1,239 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: versions.proto
#ifndef PROTOBUF_versions_2eproto__INCLUDED
#define PROTOBUF_versions_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_versions_2eproto();
void protobuf_InitDefaults_versions_2eproto();
void protobuf_AssignDesc_versions_2eproto();
void protobuf_ShutdownFile_versions_2eproto();
class VersionDef;
// ===================================================================
class VersionDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.VersionDef) */ {
public:
VersionDef();
virtual ~VersionDef();
VersionDef(const VersionDef& from);
inline VersionDef& operator=(const VersionDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const VersionDef& default_instance();
static const VersionDef* internal_default_instance();
void UnsafeArenaSwap(VersionDef* other);
void Swap(VersionDef* other);
// implements Message ----------------------------------------------
inline VersionDef* New() const { return New(NULL); }
VersionDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const VersionDef& from);
void MergeFrom(const VersionDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VersionDef* other);
void UnsafeMergeFrom(const VersionDef& from);
protected:
explicit VersionDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int32 producer = 1;
void clear_producer();
static const int kProducerFieldNumber = 1;
::google::protobuf::int32 producer() const;
void set_producer(::google::protobuf::int32 value);
// optional int32 min_consumer = 2;
void clear_min_consumer();
static const int kMinConsumerFieldNumber = 2;
::google::protobuf::int32 min_consumer() const;
void set_min_consumer(::google::protobuf::int32 value);
// repeated int32 bad_consumers = 3;
int bad_consumers_size() const;
void clear_bad_consumers();
static const int kBadConsumersFieldNumber = 3;
::google::protobuf::int32 bad_consumers(int index) const;
void set_bad_consumers(int index, ::google::protobuf::int32 value);
void add_bad_consumers(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
bad_consumers() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_bad_consumers();
// @@protoc_insertion_point(class_scope:tensorflow.VersionDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > bad_consumers_;
mutable int _bad_consumers_cached_byte_size_;
::google::protobuf::int32 producer_;
::google::protobuf::int32 min_consumer_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_versions_2eproto_impl();
friend void protobuf_AddDesc_versions_2eproto_impl();
friend void protobuf_AssignDesc_versions_2eproto();
friend void protobuf_ShutdownFile_versions_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<VersionDef> VersionDef_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// VersionDef
// optional int32 producer = 1;
inline void VersionDef::clear_producer() {
producer_ = 0;
}
inline ::google::protobuf::int32 VersionDef::producer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.producer)
return producer_;
}
inline void VersionDef::set_producer(::google::protobuf::int32 value) {
producer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.producer)
}
// optional int32 min_consumer = 2;
inline void VersionDef::clear_min_consumer() {
min_consumer_ = 0;
}
inline ::google::protobuf::int32 VersionDef::min_consumer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.min_consumer)
return min_consumer_;
}
inline void VersionDef::set_min_consumer(::google::protobuf::int32 value) {
min_consumer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.min_consumer)
}
// repeated int32 bad_consumers = 3;
inline int VersionDef::bad_consumers_size() const {
return bad_consumers_.size();
}
inline void VersionDef::clear_bad_consumers() {
bad_consumers_.Clear();
}
inline ::google::protobuf::int32 VersionDef::bad_consumers(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.bad_consumers)
return bad_consumers_.Get(index);
}
inline void VersionDef::set_bad_consumers(int index, ::google::protobuf::int32 value) {
bad_consumers_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.bad_consumers)
}
inline void VersionDef::add_bad_consumers(::google::protobuf::int32 value) {
bad_consumers_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.VersionDef.bad_consumers)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
VersionDef::bad_consumers() const {
// @@protoc_insertion_point(field_list:tensorflow.VersionDef.bad_consumers)
return bad_consumers_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
VersionDef::mutable_bad_consumers() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.VersionDef.bad_consumers)
return &bad_consumers_;
}
inline const VersionDef* VersionDef::internal_default_instance() {
return &VersionDef_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_versions_2eproto__INCLUDED

@ -0,0 +1,182 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Sample of using OpenCV dnn module with Tensorflow Inception model.
*/
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
const String keys =
"{help h || Sample app for loading Inception TensorFlow model. "
"The model and class names list can be downloaded here: "
"https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip }"
"{model m |tensorflow_inception_graph.pb| path to TensorFlow .pb model file }"
"{image i || path to image file }"
"{i_blob | .input | input blob name) }"
"{o_blob | softmax2 | output blob name) }"
"{c_names c | imagenet_comp_graph_label_strings.txt | path to file with classnames for class id }"
"{result r || path to save output blob (optional, binary format, NCHW order) }"
;
void getMaxClass(dnn::Blob &probBlob, int *classId, double *classProb);
std::vector<String> readClassNames(const char *filename);
int main(int argc, char **argv)
{
cv::CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String modelFile = parser.get<String>("model");
String imageFile = parser.get<String>("image");
String inBlobName = parser.get<String>("i_blob");
String outBlobName = parser.get<String>("o_blob");
if (!parser.check())
{
parser.printErrors();
return 0;
}
String classNamesFile = parser.get<String>("c_names");
String resultFile = parser.get<String>("result");
//! [Create the importer of TensorFlow model]
Ptr<dnn::Importer> importer;
try //Try to import TensorFlow AlexNet model
{
importer = dnn::createTensorflowImporter(modelFile);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
std::cerr << err.msg << std::endl;
}
//! [Create the importer of Caffe model]
if (!importer)
{
std::cerr << "Can't load network by using the mode file: " << std::endl;
std::cerr << modelFile << std::endl;
exit(-1);
}
//! [Initialize network]
dnn::Net net;
importer->populateNet(net);
importer.release(); //We don't need importer anymore
//! [Initialize network]
//! [Prepare blob]
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
cv::Size inputImgSize = cv::Size(224, 224);
if (inputImgSize != img.size())
resize(img, img, inputImgSize); //Resize image to input size
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
dnn::Blob inputBlob = dnn::Blob::fromImages(img); //Convert Mat to dnn::Blob image batch
//! [Prepare blob]
//! [Set input blob]
net.setBlob(inBlobName, inputBlob); //set the network input
//! [Set input blob]
cv::TickMeter tm;
tm.start();
//! [Make forward pass]
net.forward(); //compute output
//! [Make forward pass]
tm.stop();
//! [Gather output]
dnn::Blob prob = net.getBlob(outBlobName); //gather output of "prob" layer
Mat& result = prob.matRef();
BlobShape shape = prob.shape();
if (!resultFile.empty()) {
CV_Assert(result.isContinuous());
ofstream fout(resultFile.c_str(), ios::out | ios::binary);
fout.write((char*)result.data, result.total() * sizeof(float));
fout.close();
}
std::cout << "Output blob shape " << shape << std::endl;
std::cout << "Inference time, ms: " << tm.getTimeMilli() << std::endl;
if (!classNamesFile.empty()) {
std::vector<String> classNames = readClassNames(classNamesFile.c_str());
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb);//find the best class
//! [Print results]
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
}
return 0;
} //main
/* Find best class for the blob (i. e. class with maximal probability) */
void getMaxClass(dnn::Blob &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.matRefConst().reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
std::vector<String> readClassNames(const char *filename)
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back( name );
}
fp.close();
return classNames;
}

@ -15,7 +15,9 @@ namespace dnn
static void initConvDeconvLayerFromCaffe(Ptr<BaseConvolutionLayer> l, LayerParams &params)
{
l->setParamsFrom(params);
getConvolutionKernelParams(params, l->kernel.height, l->kernel.width, l->pad.height, l->pad.width, l->stride.height, l->stride.width, l->dilation.height, l->dilation.width);
getConvolutionKernelParams(params, l->kernel.height, l->kernel.width, l->pad.height,
l->pad.width, l->stride.height, l->stride.width, l->dilation.height,
l->dilation.width, l->padMode);
bool bias = params.get<bool>("bias_term", true);
int numOutput = params.get<int>("num_output");
@ -47,6 +49,7 @@ Ptr<Layer> createLayerFromCaffe<PoolingLayer>(LayerParams &params)
int type = PoolingLayer::MAX;
Size kernel, stride, pad;
bool globalPooling;
cv::String padMode;
if (params.has("pool"))
{
@ -61,11 +64,12 @@ Ptr<Layer> createLayerFromCaffe<PoolingLayer>(LayerParams &params)
CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\"");
}
getPoolingKernelParams(params, kernel.height, kernel.width, globalPooling, pad.height, pad.width, stride.height, stride.width);
getPoolingKernelParams(params, kernel.height, kernel.width, globalPooling,
pad.height, pad.width, stride.height, stride.width, padMode);
//getCaffeConvParams(params, kernel, pad, stride);
if (!globalPooling)
return Ptr<Layer>(PoolingLayer::create(type, kernel, stride, pad));
return Ptr<Layer>(PoolingLayer::create(type, kernel, stride, pad, padMode));
else
return Ptr<Layer>(PoolingLayer::createGlobal(type));
}
@ -118,8 +122,10 @@ Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams& params)
double alpha = params.get<double>("alpha", 1);
double beta = params.get<double>("beta", 0.75);
double bias = params.get<double>("bias", 1);
bool normBySize = params.get<bool>("norm_by_size", true);
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta, bias, normBySize));
}
template<>
@ -139,6 +145,7 @@ Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
{
int axis = params.get<int>("axis", 0);
int numAxes = params.get<int>("num_axes", -1);
bool enableReordering = params.get<bool>("reorder_dims", false);
CV_Assert(numAxes >= -1);
Range applyingRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes);
@ -153,7 +160,7 @@ Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
else
newShape = Shape::all(0);
return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange));
return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange, enableReordering));
}
template<>

@ -159,6 +159,7 @@ struct Net::Impl
inpl.name = "_input";
inpl.type = "__NetInputLayer__";
inpl.layerInstance = netInputLayer;
layerNameToId.insert(std::make_pair(inpl.name, inpl.id));
lastLayerId = 1;
netWasAllocated = false;

@ -50,6 +50,7 @@
#include "layers/prior_box_layer.hpp"
#include "layers/detection_output_layer.hpp"
#include "layers/normalize_bbox_layer.hpp"
#include "layers/shift_layer.hpp"
namespace cv
{
@ -94,13 +95,15 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(AbsVal, createLayerFromCaffe<AbsLayer>);
REG_RUNTIME_LAYER_FUNC(Power, createLayerFromCaffe<PowerLayer>);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer);
REG_RUNTIME_LAYER_CLASS(Identity, BlankLayer);
REG_RUNTIME_LAYER_FUNC(Crop, createLayerFromCaffe<CropLayer>);
REG_RUNTIME_LAYER_FUNC(Eltwise, createLayerFromCaffe<EltwiseLayer>);
REG_RUNTIME_LAYER_CLASS(Permute, PermuteLayer)
REG_RUNTIME_LAYER_CLASS(PriorBox, PriorBoxLayer)
REG_RUNTIME_LAYER_CLASS(DetectionOutput, DetectionOutputLayer)
REG_RUNTIME_LAYER_CLASS(NormalizeBBox, NormalizeBBoxLayer)
REG_RUNTIME_LAYER_CLASS(Permute, PermuteLayer);
REG_RUNTIME_LAYER_CLASS(PriorBox, PriorBoxLayer);
REG_RUNTIME_LAYER_CLASS(DetectionOutput, DetectionOutputLayer);
REG_RUNTIME_LAYER_CLASS(NormalizeBBox, NormalizeBBoxLayer);
REG_RUNTIME_LAYER_CLASS(Shift, ShiftLayer);
init.status = true;
}

@ -202,9 +202,13 @@ void ConvolutionLayerImpl::im2col(const Mat &srcImg, Mat &dstCol)
Mat &colMat = colBlob.matRef();
if (srcImg.type() == CV_32F)
im2col_CpuPBody<float>::run(srcImg.ptr<float>(), inpGroupCn, inpH, inpW, kernel.height, kernel.width, pad.height, pad.width, stride.height, stride.width, dilation.height, dilation.width, colMat.ptr<float>());
im2col_CpuPBody<float>::run(srcImg.ptr<float>(), inpGroupCn, inpH, inpW, kernel.height,
kernel.width, pad.height, pad.width, stride.height, stride.width,
dilation.height, dilation.width, outH, outW, colMat.ptr<float>());
if (srcImg.type() == CV_64F)
im2col_CpuPBody<double>::run(srcImg.ptr<double>(), inpGroupCn, inpH, inpW, kernel.height, kernel.width, pad.height, pad.width, stride.height, stride.width, dilation.height, dilation.width, colMat.ptr<double>());
im2col_CpuPBody<double>::run(srcImg.ptr<double>(), inpGroupCn, inpH, inpW, kernel.height,
kernel.width, pad.height, pad.width, stride.height, stride.width,
dilation.height, dilation.width, outH, outW, colMat.ptr<double>());
dstCol = colMat;
}
@ -214,11 +218,18 @@ void ConvolutionLayerImpl::computeInpOutShape(const Blob &input)
inpH = input.rows();
inpW = input.cols();
inpCn = input.channels();
outH = (inpH + 2 * pad.height - (dilation.height * (kernel.height - 1) + 1)) / stride.height + 1;
outW = (inpW + 2 * pad.width - (dilation.width * (kernel.width - 1) + 1)) / stride.width + 1;
outCn = numOutput;
if (padMode.empty())
{
outH = (inpH + 2 * pad.height - (dilation.height * (kernel.height - 1) + 1)) / stride.height + 1;
outW = (inpW + 2 * pad.width - (dilation.width * (kernel.width - 1) + 1)) / stride.width + 1;
}
else
{
getConvPoolOutParams(inpH, inpW, kernel, stride, pad, padMode, outH, outW);
}
topH = outH; topW = outW; topCn = outCn;
}

@ -103,4 +103,5 @@ Ptr<Layer> createDeconvolutionLayerFromCaffe(LayerParams &params);
}
}
#endif

@ -82,7 +82,7 @@ void FullyConnectedLayerImpl::allocate(const std::vector<Blob*> &input, std::vec
for (size_t i = 0; i < input.size(); i++)
{
CV_Assert(i == 0 || (input[i]->equalShape(*input[0]) && input[i]->type() == dtype));
Shape outShape = input[i]->shape().slice(0, axis) + Shape(numOutput);
Shape outShape = Shape(outerSize, numOutput);
output[i].create(outShape, dtype, allocFlags);
}
}

@ -102,19 +102,26 @@ void getKernelSize(LayerParams &params, int &kernelH, int &kernelW)
CV_Assert(kernelH > 0 && kernelW > 0);
}
void getStrideAndPadding(LayerParams &params, int &padH, int &padW, int &strideH, int &strideW)
void getStrideAndPadding(LayerParams &params, int &padH, int &padW, int &strideH, int &strideW, cv::String& padMode)
{
util::getParameter(params, "pad", "pad", padH, padW, true, 0);
util::getParameter(params, "stride", "stride", strideH, strideW, true, 1);
padMode = "";
if (params.has("pad_mode"))
{
padMode = params.get<String>("pad_mode");
}
CV_Assert(padH >= 0 && padW >= 0 && strideH > 0 && strideW > 0);
}
}
void getPoolingKernelParams(LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling, int &padH, int &padW, int &strideH, int &strideW)
void getPoolingKernelParams(LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling,
int &padH, int &padW, int &strideH, int &strideW, cv::String &padMode)
{
util::getStrideAndPadding(params, padH, padW, strideH, strideW);
util::getStrideAndPadding(params, padH, padW, strideH, strideW, padMode);
globalPooling = params.has("global_pooling");
@ -135,15 +142,51 @@ void getPoolingKernelParams(LayerParams &params, int &kernelH, int &kernelW, boo
}
}
void getConvolutionKernelParams(LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW, int &strideH, int &strideW, int &dilationH, int &dilationW)
void getConvolutionKernelParams(LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW,
int &strideH, int &strideW, int &dilationH, int &dilationW, cv::String &padMode)
{
util::getKernelSize(params, kernelH, kernelW);
util::getStrideAndPadding(params, padH, padW, strideH, strideW);
util::getStrideAndPadding(params, padH, padW, strideH, strideW, padMode);
util::getParameter(params, "dilation", "dilation", dilationH, dilationW, true, 1);
CV_Assert(dilationH > 0 && dilationW > 0);
}
// From TensorFlow code:
// Total padding on rows and cols is
// Pr = (R' - 1) * S + Kr - R
// Pc = (C' - 1) * S + Kc - C
// where (R', C') are output dimensions, (R, C) are input dimensions, S
// is stride, (Kr, Kc) are filter dimensions.
// We pad Pr/2 on the left and Pr - Pr/2 on the right, Pc/2 on the top
// and Pc - Pc/2 on the bottom. When Pr or Pc is odd, this means
// we pad more on the right and bottom than on the top and left.
void getConvPoolOutParams(const int inputH, const int inputW, const cv::Size &kernel,
const cv::Size &stride, cv::Size& pad, const cv::String &padMode,
int &outH, int &outW)
{
if (padMode == "VALID")
{
outH = (inputH - kernel.height + stride.height) / stride.height;
outW = (inputW - kernel.width + stride.width) / stride.width;
pad = cv::Size(0,0);
}
else if (padMode == "SAME")
{
outH = (inputH - 1 + stride.height) / stride.height;
outW = (inputW - 1 + stride.width) / stride.width;
int Ph = std::max(0, (outH - 1) * stride.height + kernel.height - inputH);
int Pw = std::max(0, (outW - 1) * stride.width + kernel.width - inputW);
// For odd values of total padding, add more padding at the 'right'
// side of the given dimension.
pad = cv::Size(Pw / 2, Ph / 2);
}
else
{
CV_Error(Error::StsError, "Unsupported padding mode");
}
}
}
}

@ -50,10 +50,15 @@ namespace cv
namespace dnn
{
void getConvolutionKernelParams(LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW, int &strideH, int &strideW, int &dilationH, int &dilationW);
void getConvolutionKernelParams(LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW,
int &strideH, int &strideW, int &dilationH, int &dilationW, cv::String& padMode);
void getPoolingKernelParams(LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling, int &padH, int &padW, int &strideH, int &strideW);
void getPoolingKernelParams(LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling,
int &padH, int &padW, int &strideH, int &strideW, cv::String& padMode);
void getConvPoolOutParams(const int inputH, const int inputW, const cv::Size& kernel,
const cv::Size& stride, cv::Size &pad, const cv::String& padMode,
int &outH, int &outW);
}
}

@ -53,12 +53,14 @@ namespace cv
namespace dnn
{
LRNLayerImpl::LRNLayerImpl(int type_, int size_, double alpha_, double beta_)
LRNLayerImpl::LRNLayerImpl(int type_, int size_, double alpha_, double beta_, double bias_, bool normBySize_)
{
type = type_;
size = size_;
alpha = alpha_;
beta = beta_;
bias = bias_;
normBySize = normBySize_;
}
void LRNLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
@ -118,6 +120,7 @@ void LRNLayerImpl::channelNoramlization_(Blob &srcBlob, Blob &dstBlob)
int num = srcBlob.num();
int channels = srcBlob.channels();
int ksize = (size - 1) / 2;
int sizeNormFactor = normBySize ? size : 1;
XMat srcMat = srcBlob.getRefConst<XMat>();
XMat dstMat = dstBlob.getRef<XMat>();
@ -146,7 +149,7 @@ void LRNLayerImpl::channelNoramlization_(Blob &srcBlob, Blob &dstBlob)
}
XMat dst = getPlane(dstMat, n, cn);
accum.convertTo(dst, dst.type(), alpha/size, 1);
accum.convertTo(dst, dst.type(), alpha/sizeNormFactor, bias);
cv::pow(dst, beta, dst);
cv::divide(getPlane(srcMat, n, cn), dst, dst);
}
@ -171,13 +174,15 @@ bool LRNLayerImpl::channelNoramlization_ocl(const UMat &src, UMat &dst)
Shape shape = Shape::like(src);
int ksize = (size - 1) / 2;
int sizeNormFactor = normBySize ? size : 1;
// TODO: add bias
size_t wgSize = ocl::Device::getDefault().maxWorkGroupSize();
UMat &scaleBuf = buf.umatRef();
size_t nthreads = (size_t)(shape.total() / shape[1]);
kerScale.args((int)nthreads,
ocl::KernelArg::PtrReadOnly(src), shape[0], shape[1], shape[2], shape[3],
size, (float)(alpha/size), (float)ksize, ocl::KernelArg::PtrWriteOnly(scaleBuf));
size, (float)(alpha/sizeNormFactor), (float)ksize, ocl::KernelArg::PtrWriteOnly(scaleBuf));
if (!kerScale.run(1, &nthreads, &wgSize, true))
return false;
@ -223,6 +228,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
{
int num = srcBlob.num();
int channels = srcBlob.channels();
int sizeNormFactor = normBySize ? size*size : 1;
XMat srcMat = srcBlob.getRefConst<XMat>();
XMat dstMat = dstBlob.getRef<XMat>();
@ -236,7 +242,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
sqrBoxFilter_(src, dst);
dst.convertTo(dst, dst.type(), alpha/(size*size), 1);
dst.convertTo(dst, dst.type(), alpha/sizeNormFactor, bias);
cv::pow(dst, beta, dst);
cv::divide(src, dst, dst);
}
@ -244,9 +250,10 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
}
Ptr<LRNLayer> LRNLayer::create(int type, int size, double alpha, double beta)
Ptr<LRNLayer> LRNLayer::create(int type, int size, double alpha, double beta, double bias,
bool normBySize)
{
return Ptr<LRNLayer>(new LRNLayerImpl(type, size, alpha, beta));
return Ptr<LRNLayer>(new LRNLayerImpl(type, size, alpha, beta, bias, normBySize));
}
}

@ -67,11 +67,13 @@ class LRNLayerImpl : public LRNLayer
public:
LRNLayerImpl(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
LRNLayerImpl(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75, double bias = 1,
bool normBySize = true);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -43,6 +43,7 @@
#include <opencv2/core/ocl.hpp>
#include "opencl_kernels_dnn.hpp"
#include "op_im2col.hpp"
#include "opencl_kernels_dnn.hpp"
namespace cv
{
@ -124,3 +125,44 @@ bool col2im_ocl(const UMat &col,
#endif
}
}
namespace cv
{
namespace dnn
{
#ifdef HAVE_OPENCL
void im2col_ocl(UMat &img,
int channels, int height, int width,
int kernel_h, int kernel_w,
int pad_h, int pad_w,
int stride_h, int stride_w,
int height_out, int width_out,
UMat &col)
{
int h_out = height_out;
int w_out = width_out;
CV_Assert(img.isContinuous() && col.isContinuous());
CV_Assert(img.total() == (size_t)channels * height * width);
CV_Assert(col.total() == (size_t)channels * kernel_h * kernel_w * h_out * w_out);
ocl::Kernel im2col_ker("im2col", ocl::dnn::im2col_oclsrc);
CV_Assert(!im2col_ker.empty());
im2col_ker.args(ocl::KernelArg::PtrReadOnly(img), (int)img.offset,
channels, height, width,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
h_out, w_out,
ocl::KernelArg::PtrWriteOnly(col), (int)col.offset
);
size_t localSize = ocl::Device::getDefault().maxWorkGroupSize();
size_t globalSize = (size_t)channels * h_out * w_out;
CV_Assert(im2col_ker.run(1, &globalSize, &localSize, true));
}
#endif // HAVE_OPENCL
}
}

@ -70,6 +70,7 @@ public:
int pad_h, int pad_w,
int stride_h, int stride_w,
int dilation_h, int dilation_w,
int height_col, int width_col,
Dtype* data_col)
{
im2col_CpuPBody<Dtype> t;
@ -82,8 +83,8 @@ public:
t.stride_h = stride_h; t.stride_w = stride_w;
t.dilation_h = dilation_h; t.dilation_w = dilation_w;
t.height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
t.width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
t.height_col = height_col;
t.width_col = width_col;
t.channels_col = channels * kernel_h * kernel_w;
cv::parallel_for_(Range(0, t.channels_col), t);

@ -60,13 +60,14 @@ PoolingLayerImpl::PoolingLayerImpl()
globalPooling = false;
}
PoolingLayerImpl::PoolingLayerImpl(int type_, Size kernel_, Size stride_, Size pad_)
PoolingLayerImpl::PoolingLayerImpl(int type_, Size kernel_, Size stride_, Size pad_, const String &padMode_)
{
globalPooling = false;
type = type_;
kernel = kernel_;
pad = pad_;
stride = stride_;
padMode = padMode_;
}
void PoolingLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
@ -251,26 +252,36 @@ void PoolingLayerImpl::avePooling_cpu(Blob &src, Blob &dst)
void PoolingLayerImpl::computeOutputShape(Size inpSz)
{
//Yeah, something strange Caffe scheme-)
out.height = static_cast<int>(ceil(static_cast<float>(inpSz.height + 2 * pad.height - kernel.height) / stride.height)) + 1;
out.width = static_cast<int>(ceil(static_cast<float>(inpSz.width + 2 * pad.width - kernel.width) / stride.width)) + 1;
if (pad.height || pad.width)
if (padMode.empty()) {
//Yeah, something strange Caffe scheme-)
out.height = static_cast<int>(ceil(static_cast<float>(inpSz.height + 2 * pad.height -
kernel.height) / stride.height)) + 1;
out.width = static_cast<int>(ceil(static_cast<float>(inpSz.width + 2 * pad.width -
kernel.width) / stride.width)) + 1;
if (pad.height || pad.width)
{
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
if ((out.height - 1) * stride.height >= inpSz.height + pad.height)
--out.height;
if ((out.width - 1) * stride.width >= inpSz.width + pad.width)
--out.width;
CV_Assert((out.height - 1) * stride.height < inpSz.height + pad.height);
CV_Assert((out.width - 1) * stride.width < inpSz.width + pad.width);
}
}
else
{
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
if ((out.height - 1) * stride.height >= inpSz.height + pad.height)
--out.height;
if ((out.width - 1) * stride.width >= inpSz.width + pad.width)
--out.width;
CV_Assert((out.height - 1) * stride.height < inpSz.height + pad.height);
CV_Assert((out.width - 1) * stride.width < inpSz.width + pad.width);
getConvPoolOutParams(inpSz.height, inpSz.width, kernel, stride, pad,
padMode, out.height, out.width);
}
}
Ptr<PoolingLayer> PoolingLayer::create(int type, Size kernel, Size stride, Size pad)
Ptr<PoolingLayer> PoolingLayer::create(int type, Size kernel, Size stride, Size pad,
const String& padMode)
{
return Ptr<PoolingLayer>(new PoolingLayerImpl(type, kernel, stride, pad));
return Ptr<PoolingLayer>(new PoolingLayerImpl(type, kernel, stride, pad, padMode));
}
Ptr<PoolingLayer> PoolingLayer::createGlobal(int type)

@ -1,4 +1,4 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
@ -69,7 +69,7 @@ class PoolingLayerImpl : public PoolingLayer
public:
PoolingLayerImpl();
PoolingLayerImpl(int type, Size kernel, Size stride, Size pad);
PoolingLayerImpl(int type, Size kernel, Size stride, Size pad, const String& padMode);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
@ -77,4 +77,5 @@ public:
}
}
#endif

@ -49,7 +49,8 @@ namespace cv
namespace dnn
{
ReshapeLayerImpl::ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_)
ReshapeLayerImpl::ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_, bool enableReordering_) :
enableReordering(enableReordering_)
{
newShapeDesc = newShape_;
newShapeRange = applyingRange_;
@ -72,14 +73,49 @@ void ReshapeLayerImpl::forward(std::vector<Blob*> &inputs, std::vector<Blob> &ou
{
for (size_t i = 0; i < outputs.size(); i++)
{
outputs[i].shareFrom(*inputs[i]);
Blob& srcBlob = *inputs[i];
BlobShape inputShape = inputs[i]->shape();
bool channelsReduced = inputShape.dims() > outShapes[i].dims() ||
(inputShape.dims() == 4 && inputShape[1] > outShapes[i][1]);
bool performReordering = enableReordering && inputShape.dims() == 4 && channelsReduced;
if (performReordering)
{
Blob reordered_blob(inputShape, inputs[i]->type());
float *dstData = reordered_blob.matRef().ptr<float>();
const float *srcData = srcBlob.matRefConst().ptr<float>();
int num = inputShape[0], channels = inputShape[1], height = inputShape[2], width = inputShape[3];
int total = num*channels*height*width;
for(int i_n = 0; i_n < num; i_n++) {
for(int i_c = 0; i_c < channels; i_c++) {
for(int i_h = 0; i_h < height; i_h++) {
for(int i_w = 0; i_w < width; i_w++) {
int src_i = channels*height*width*i_n + height*width*i_c + width*i_h + i_w;
int dst_i = channels*height*width*i_n + i_c + channels*width*i_h + channels*i_w;
CV_Assert(dst_i < total);
CV_Assert(src_i < total);
dstData[dst_i] = srcData[src_i];
}
}
}
}
srcBlob = reordered_blob;
}
outputs[i].shareFrom(srcBlob);
outputs[i].reshape(outShapes[i]);
}
}
Ptr<ReshapeLayer> ReshapeLayer::create(const BlobShape &newShape, Range applyingRange /*= Range::all()*/)
Ptr<ReshapeLayer> ReshapeLayer::create(const BlobShape &newShape, Range applyingRange /*= Range::all()*/,
bool enableReordering /*= false*/)
{
return Ptr<ReshapeLayer>(new ReshapeLayerImpl(newShape, applyingRange));
return Ptr<ReshapeLayer>(new ReshapeLayerImpl(newShape, applyingRange, enableReordering));
}

@ -52,9 +52,10 @@ namespace dnn
class ReshapeLayerImpl : public ReshapeLayer
{
std::vector<BlobShape> outShapes;
bool enableReordering;
public:
ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_);
ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_, bool enableReordering_);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
@ -65,4 +66,5 @@ Ptr<Layer> createFlattenLayer(LayerParams&);
}
}
#endif

@ -0,0 +1,157 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of shift layer, which adds up const values to blob.
*/
#include "../precomp.hpp"
#include "shift_layer.hpp"
#include "op_blas.hpp"
namespace cv
{
namespace dnn
{
class ShiftLayerImpl {
public:
static Ptr<ShiftLayerImpl> create(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs,
const std::vector<Blob>& blobs);
virtual ~ShiftLayerImpl() {}
virtual void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) = 0;
protected:
ShiftLayerImpl() {}
virtual void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) = 0;
};
namespace {
class ShiftChannelsLayerImpl : public ShiftLayerImpl {
public:
virtual void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) {
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Blob &inpBlob = *inputs[ii];
Blob &outBlob = outputs[ii];
inpBlob.matRef().copyTo(outBlob.matRef());
for (int n = 0; n < inpBlob.num(); n++)
{
Mat dstMat(inpBlob.channels(), inpBlob.rows() * inpBlob.cols(),
outBlob.type(), outBlob.ptr(n));
dnn::gemm(blobs[0].matRefConst(), biasOnesMat, 1, dstMat, 1); //TODO: gemv
}
}
}
protected:
virtual void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) {
CV_Assert(inputs.size() > 0);
const Blob &inpBlob = *inputs[0];
CV_Assert(inpBlob.dims() == 4 && inpBlob.type() == CV_32F);
const Blob &biasBlob = blobs[0];
CV_Assert(biasBlob.total() == (size_t)inpBlob.channels());
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->type() == inpBlob.type());
CV_Assert(inputs[i]->dims() == 4 && inputs[i]->channels() == inpBlob.channels());
outputs[i].shareFrom(*inputs[i]);
}
biasOnesMat = Mat::ones(1, inpBlob.rows() * inpBlob.cols(), inpBlob.type());
}
private:
Mat biasOnesMat;
};
class ShiftElementsLayerImpl : public ShiftLayerImpl {
public:
virtual void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) {
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Blob &inpBlob = *inputs[ii];
Blob &outBlob = outputs[ii];
outBlob.matRef() = inpBlob.matRef() + blobs[0].matRefConst();
}
}
protected:
virtual void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs, const std::vector<Blob>& blobs) {
CV_Assert(inputs.size() > 0);
const Blob &inpBlob = *inputs[0];
CV_Assert(inpBlob.type() == CV_32F);
const Blob &biasBlob = blobs[0];
CV_Assert(biasBlob.dims() == inpBlob.dims());
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->type() == inpBlob.type());
CV_Assert(inputs[i]->dims() == inpBlob.dims());
outputs[i].shareFrom(*inputs[i]);
}
}
};
}
Ptr<ShiftLayerImpl> ShiftLayerImpl::create(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs,
const std::vector<Blob>& blobs) {
Ptr<ShiftLayerImpl> impl;
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() > 0);
if(inputs[0]->dims() == blobs[0].dims())
impl = Ptr<ShiftLayerImpl>(new ShiftElementsLayerImpl);
else
impl = Ptr<ShiftLayerImpl>(new ShiftChannelsLayerImpl);
impl->allocate(inputs, outputs, blobs);
return impl;
}
ShiftLayer::ShiftLayer(LayerParams &params) : Layer(params)
{
CV_Assert(blobs.size() == 1);
#if HAVE_CBLAS
{
if (getBlasThreads() != cv::getThreadNum())
{
setBlasThreads(cv::getThreadNum());
}
}
#endif
}
void ShiftLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
impl = ShiftLayerImpl::create(inputs, outputs, blobs);
}
void ShiftLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
impl->forward(inputs, outputs, blobs);
}
}
}

@ -0,0 +1,36 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Declaration of shift layer, which adds up const values to blob.
*/
#ifndef __OPENCV_DNN_LAYERS_SHIFT_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_SHIFT_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class ShiftLayerImpl;
class ShiftLayer : public Layer
{
cv::Ptr<ShiftLayerImpl> impl;
public:
ShiftLayer() {}
ShiftLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -0,0 +1,60 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "AttrValueProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "tensor.proto";
import "tensor_shape.proto";
import "types.proto";
// Protocol buffer representing the value for an attr used to configure an Op.
// Comment indicates the corresponding attr type. Only the field matching the
// attr type may be filled.
message AttrValue {
message ListValue {
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3 [packed = true]; // "list(int)"
repeated float f = 4 [packed = true]; // "list(float)"
repeated bool b = 5 [packed = true]; // "list(bool)"
repeated DataType type = 6 [packed = true]; // "list(type)"
repeated TensorShapeProto shape = 7; // "list(shape)"
repeated TensorProto tensor = 8; // "list(tensor)"
// TODO(zhifengc/josh11b): implements list(func) if needed.
}
oneof value {
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
DataType type = 6; // "type"
TensorShapeProto shape = 7; // "shape"
TensorProto tensor = 8; // "tensor"
ListValue list = 1; // any "list(...)"
// "func" represents a function. func.name is a function's name or
// a primitive op's name. func.attr.first is the name of an attr
// defined for that function. func.attr.second is the value for
// that attr in the instantiation.
NameAttrList func = 10;
// This is a placeholder only used in nodes defined inside a
// function. It indicates the attr value will be supplied when
// the function is instantiated. For example, let us suppose a
// node "N" in function "FN". "N" has an attr "A" with value
// placeholder = "foo". When FN is instantiated with attr "foo"
// set to "bar", the instantiated node N's attr A will have been
// given the value "bar".
string placeholder = 9;
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NameAttrList {
string name = 1;
map<string, AttrValue> attr = 2;
}

@ -0,0 +1,95 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "FunctionProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "op_def.proto";
// A library is a set of named functions.
message FunctionDefLibrary {
repeated FunctionDef function = 1;
repeated GradientDef gradient = 2;
}
// A function can be instantiated when the runtime can bind every attr
// with a value. When a GraphDef has a call to a function, it must
// have binding for every attr defined in the signature.
//
// TODO(zhifengc):
// * device spec, etc.
message FunctionDef {
// The definition of the function's name, arguments, return values,
// attrs etc.
OpDef signature = 1;
// The body of the function.
repeated Node node = 2; // function.node.ret[*] are unique.
// A node is a multi-value assignment:
// (ret[0], ret[1], ...) = func(arg[0], arg[1], ...)
//
// By convention, "func" is resolved by consulting with a user-defined
// library first. If not resolved, "func" is assumed to be a builtin op.
message Node {
// This node produces multiple outputs. They are named ret[0],
// ret[1], ..., etc.
//
// REQUIRES: function.node.ret[*] are unique across all nodes.
// REQUIRES: ret.size == func/op def's number of output args.
repeated string ret = 1;
// The op/function name.
string op = 2;
// Arguments passed to this func/op.
//
// arg[i] must be either one of
// function.signature.input_args[*].name or one of
// function.node[*].ret[*].
//
// REQUIRES: arg.size == func/op def's number of input args.
repeated string arg = 3;
// Control dependencies.
//
// dep[i] must be one of function.node[*].ret[*] or one of
// function.signature.input_args[*].name.
repeated string dep = 4;
// Attrs.
//
// 'attr' maps names defined by 'func's attr defs to attr values.
// attr values may have placeholders which are substituted
// recursively by concrete values when this node is instantiated.
// These placeholders must name an attr listed in the FunctionDef's
// signature.
map<string, AttrValue> attr = 5;
}
}
// GradientDef defines the gradient function of a function defined in
// a function library.
//
// A gradient function g (specified by gradient_func) for a function f
// (specified by function_name) must follow the following:
//
// The function 'f' must be a numerical function which takes N inputs
// and produces M outputs. Its gradient function 'g', which is a
// function taking N + M inputs and produces N outputs.
//
// I.e. if we have
// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
// then, g is
// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
// dL/dy1, dL/dy2, ..., dL/dy_M),
// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
// loss function). dL/dx_i is the partial derivative of L with respect
// to x_i.
message GradientDef {
string function_name = 1; // The function name.
string gradient_func = 2; // The gradient function's name.
}

@ -0,0 +1,112 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "GraphProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "function.proto";
import "versions.proto";
// Represents the graph of operations
message GraphDef {
repeated NodeDef node = 1;
// Compatibility versions of the graph. See core/public/version.h for version
// history. The GraphDef version is distinct from the TensorFlow version, and
// each release of TensorFlow will support a range of GraphDef versions.
VersionDef versions = 4;
// Deprecated single version field; use versions above instead. Since all
// GraphDef changes before "versions" was introduced were forward
// compatible, this field is entirely ignored.
int32 version = 3 [deprecated = true];
// EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
//
// "library" provides user-defined functions.
//
// Naming:
// * library.function.name are in a flat namespace.
// NOTE: We may need to change it to be hierarchical to support
// different orgs. E.g.,
// { "/google/nn", { ... }},
// { "/google/vision", { ... }}
// { "/org_foo/module_bar", {...}}
// map<string, FunctionDefLib> named_lib;
// * If node[i].op is the name of one function in "library",
// node[i] is deemed as a function call. Otherwise, node[i].op
// must be a primitive operation supported by the runtime.
//
//
// Function call semantics:
//
// * The callee may start execution as soon as some of its inputs
// are ready. The caller may want to use Tuple() mechanism to
// ensure all inputs are ready in the same time.
//
// * The consumer of return values may start executing as soon as
// the return values the consumer depends on are ready. The
// consumer may want to use Tuple() mechanism to ensure the
// consumer does not start until all return values of the callee
// function are ready.
FunctionDefLibrary library = 2;
};
message NodeDef {
// The name given to this operator. Used for naming inputs,
// logging, visualization, etc. Unique within a single GraphDef.
// Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*".
string name = 1;
// The operation name. There may be custom parameters in attrs.
// Op names starting with an underscore are reserved for internal use.
string op = 2;
// Each input is "node:src_output" with "node" being a string name and
// "src_output" indicating which output tensor to use from "node". If
// "src_output" is 0 the ":0" suffix can be omitted. Regular inputs
// may optionally be followed by control inputs that have the format
// "^node".
repeated string input = 3;
// A (possibly partial) specification for the device on which this
// node should be placed.
// The expected syntax for this string is as follows:
//
// DEVICE_SPEC ::= COLOCATED_NODE | PARTIAL_SPEC
//
// COLOCATED_NODE ::= "@" NODE_NAME // See NodeDef.name above.
// PARTIAL_SPEC ::= ("/" CONSTRAINT) *
// CONSTRAINT ::= ("job:" JOB_NAME)
// | ("replica:" [1-9][0-9]*)
// | ("task:" [1-9][0-9]*)
// | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") )
//
// Valid values for this string include:
// * "@other/node" (colocate with "other/node")
// * "/job:worker/replica:0/task:1/gpu:3" (full specification)
// * "/job:worker/gpu:3" (partial specification)
// * "" (no specification)
//
// If the constraints do not resolve to a single device (or if this
// field is empty or not present), the runtime will attempt to
// choose a device automatically.
string device = 4;
// Operation-specific graph-construction-time configuration.
// Note that this should include all attrs defined in the
// corresponding OpDef, including those with a value matching
// the default -- this allows the default to change and makes
// NodeDefs easier to interpret on their own. However, if
// an attr with a default is not specified in this list, the
// default will be used.
// The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and
// one of the names from the corresponding OpDef's attr field).
// The values must have a type matching the corresponding OpDef
// attr's type field.
// TODO(josh11b): Add some examples here showing best practices.
map<string, AttrValue> attr = 5;
};

@ -0,0 +1,157 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "OpDefProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "types.proto";
// Defines an operation. A NodeDef in a GraphDef specifies an Op by
// using the "op" field which should match the name of a OpDef.
message OpDef {
// Op names starting with an underscore are reserved for internal use.
// Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*".
string name = 1;
// For describing inputs and outputs.
message ArgDef {
// Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*".
string name = 1;
// Human readable description.
string description = 2;
// Describes the type of one or more tensors that are accepted/produced
// by this input/output arg. The only legal combinations are:
// * For a single tensor: either the "type" field is set or the
// "type_attr" field is set to the name of an attr with type "type".
// * For a sequence of tensors with the same type: the "number_attr"
// field will be set to the name of an attr with type "int", and
// either the "type" or "type_attr" field will be set as for
// single tensors.
// * For a sequence of tensors, the "type_list_attr" field will be set
// to the name of an attr with type "list(type)".
DataType type = 3;
string type_attr = 4; // if specified, attr must have type "type"
string number_attr = 5; // if specified, attr must have type "int"
// If specified, attr must have type "list(type)", and none of
// type, type_attr, and number_attr may be specified.
string type_list_attr = 6;
// For inputs: if true, the inputs are required to be refs.
// By default, inputs can be either refs or non-refs.
// For outputs: if true, outputs are refs, otherwise they are not.
bool is_ref = 16;
};
// Description of the input(s).
repeated ArgDef input_arg = 2;
// Description of the output(s).
repeated ArgDef output_arg = 3;
// Description of the graph-construction-time configuration of this
// Op. That is to say, this describes the attr fields that will
// be specified in the NodeDef.
message AttrDef {
// A descriptive name for the argument. May be used, e.g. by the
// Python client, as a keyword argument name, and so should match
// the regexp "[a-z][a-z0-9_]+".
string name = 1;
// One of the type names from attr_value.proto ("string", "list(string)",
// "int", etc.).
string type = 2;
// A reasonable default for this attribute if the user does not supply
// a value. If not specified, the user must supply a value.
AttrValue default_value = 3;
// Human-readable description.
string description = 4;
// TODO(josh11b): bool is_optional?
// --- Constraints ---
// These constraints are only in effect if specified. Default is no
// constraints.
// For type == "int", this is a minimum value. For "list(___)"
// types, this is the minimum length.
bool has_minimum = 5;
int64 minimum = 6;
// The set of allowed values. Has type that is the "list" version
// of the "type" field above (uses the "list" field of AttrValue).
// If type == "type" or "list(type)" above, then the "type" field
// of "allowed_values.list" has the set of allowed DataTypes.
// If type == "string" or "list(string)", then the "s" field of
// "allowed_values.list" has the set of allowed strings.
AttrValue allowed_values = 7;
}
repeated AttrDef attr = 4;
// Optional deprecation based on GraphDef versions.
OpDeprecation deprecation = 8;
// One-line human-readable description of what the Op does.
string summary = 5;
// Additional, longer human-readable description of what the Op does.
string description = 6;
// -------------------------------------------------------------------------
// Which optimizations this operation can participate in.
// True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs)
bool is_commutative = 18;
// If is_aggregate is true, then this operation accepts N >= 2
// inputs and produces 1 output all of the same type. Should be
// associative and commutative, and produce output with the same
// shape as the input. The optimizer may replace an aggregate op
// taking input from multiple devices with a tree of aggregate ops
// that aggregate locally within each device (and possibly within
// groups of nearby devices) before communicating.
// TODO(josh11b): Implement that optimization.
bool is_aggregate = 16; // for things like add
// Other optimizations go here, like
// can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc.
// -------------------------------------------------------------------------
// Optimization constraints.
// By default Ops may be moved between devices. Stateful ops should
// either not be moved, or should only be moved if that state can also
// be moved (e.g. via some sort of save / restore).
// Stateful ops are guaranteed to never be optimized away by Common
// Subexpression Elimination (CSE).
bool is_stateful = 17; // for things like variables, queue
// -------------------------------------------------------------------------
// Non-standard options.
// By default, all inputs to an Op must be initialized Tensors. Ops
// that may initialize tensors for the first time should set this
// field to true, to allow the Op to take an uninitialized Tensor as
// input.
bool allows_uninitialized_input = 19; // for Assign, etc.
};
// Information about version-dependent deprecation of an op
message OpDeprecation {
// First GraphDef version at which the op is disallowed.
int32 version = 1;
// Explanation of why it was deprecated and what to use instead.
string explanation = 2;
};
// A collection of OpDefs
message OpList {
repeated OpDef op = 1;
};

@ -0,0 +1,68 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "TensorProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "tensor_shape.proto";
import "types.proto";
// Protocol buffer representing a tensor.
message TensorProto {
DataType dtype = 1;
// Shape of the tensor. TODO(touts): sort out the 0-rank issues.
TensorShapeProto tensor_shape = 2;
// Only one of the representations below is set, one of "tensor_contents" and
// the "xxx_val" attributes. We are not using oneof because as oneofs cannot
// contain repeated fields it would require another extra set of messages.
// Version number.
//
// In version 0, if the "repeated xxx" representations contain only one
// element, that element is repeated to fill the shape. This makes it easy
// to represent a constant Tensor with a single value.
int32 version_number = 3;
// Serialized content from Tensor::AsProtoTensorContent(). This representation
// can be used for all tensor types.
bytes tensor_content = 4;
// Type specific representations that make it easy to create tensor protos in
// all languages. Only the representation corresponding to "dtype" can
// be set. The values hold the flattened representation of the tensor in
// row major order.
// DT_HALF. Note that since protobuf has no int16 type, we'll have some
// pointless zero padding for each value here.
repeated int32 half_val = 13 [packed = true];
// DT_FLOAT.
repeated float float_val = 5 [packed = true];
// DT_DOUBLE.
repeated double double_val = 6 [packed = true];
// DT_INT32, DT_INT16, DT_INT8, DT_UINT8.
repeated int32 int_val = 7 [packed = true];
// DT_STRING
repeated bytes string_val = 8;
// DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real
// and imaginary parts of i-th single precision complex.
repeated float scomplex_val = 9 [packed = true];
// DT_INT64
repeated int64 int64_val = 10 [packed = true];
// DT_BOOL
repeated bool bool_val = 11 [packed = true];
// DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
// and imaginary parts of i-th double precision complex.
repeated double dcomplex_val = 12 [packed = true];
};

@ -0,0 +1,45 @@
// Protocol buffer representing the shape of tensors.
syntax = "proto3";
option cc_enable_arenas = true;
option java_outer_classname = "TensorShapeProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
package tensorflow;
// Dimensions of a tensor.
message TensorShapeProto {
// One dimension of the tensor.
message Dim {
// Size of the tensor in that dimension.
// This value must be >= -1, but values of -1 are reserved for "unknown"
// shapes (values of -1 mean "unknown" dimension). Certain wrappers
// that work with TensorShapeProto may fail at runtime when deserializing
// a TensorShapeProto containing a dim value of -1.
int64 size = 1;
// Optional name of the tensor dimension.
string name = 2;
};
// Dimensions of the tensor, such as {"input", 30}, {"output", 40}
// for a 30 x 40 2D tensor. If an entry has size -1, this
// corresponds to a dimension of unknown size. The names are
// optional.
//
// The order of entries in "dim" matters: It indicates the layout of the
// values in the tensor in-memory representation.
//
// The first entry in "dim" is the outermost dimension used to layout the
// values, the last entry is the innermost dimension. This matches the
// in-memory layout of RowMajor Eigen tensors.
//
// If "dim.size()" > 0, "unknown_rank" must be false.
repeated Dim dim = 2;
// If true, the number of dimensions in the shape is unknown.
//
// If true, "dim.size()" must be 0.
bool unknown_rank = 3;
};

@ -0,0 +1,749 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of Tensorflow models parser
*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::dnn;
#if HAVE_PROTOBUF
#include "graph.pb.h"
#include <iostream>
#include <fstream>
#include <algorithm>
#include <string>
#include <google/protobuf/message.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include "tf_io.hpp"
using ::google::protobuf::RepeatedField;
using ::google::protobuf::RepeatedPtrField;
using ::google::protobuf::Message;
using ::google::protobuf::Descriptor;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::Reflection;
namespace
{
static int toNCHW[] = {0, 2, 3, 1};
typedef std::vector<std::pair<String, int> > StrIntVector;
struct Pin
{
Pin(const std::string &_name, int _blobIndex = 0) :
name(_name), blobIndex(_blobIndex) {}
Pin() :
name(""), blobIndex(-1) {}
std::string name;
int blobIndex;
};
BlobShape blobShapeFromTensor(const tensorflow::TensorProto &tensor)
{
if (tensor.has_tensor_shape())
{
const tensorflow::TensorShapeProto &_shape = tensor.tensor_shape();
BlobShape shape = BlobShape::all(_shape.dim_size());
for (int i = 0; i < _shape.dim_size(); i++)
shape[i] = (int)_shape.dim(i).size();
return shape;
}
else
{
CV_Error(Error::StsError, "Unknown shape of input tensor");
return BlobShape();
}
}
template <typename T>
void parseTensor(const tensorflow::TensorProto &tensor, Blob &dstBlob)
{
BlobShape shape = blobShapeFromTensor(tensor);
if (shape.dims() == 4)
{
// REORDER blob NHWC to NCHW
swap(shape[2], shape[3]); // NHCW
swap(shape[1], shape[2]); // NCHW
}
dstBlob.create(shape, CV_32F);
int size = tensor.tensor_content().size() / sizeof(T);
CV_Assert(size == (int)dstBlob.matRefConst().total());
float *dstData = dstBlob.matRef().ptr<float>();
const T *data = reinterpret_cast<const T*>(tensor.tensor_content().c_str());
if (shape.dims() == 4)
{
int num = shape[0], channels = shape[1], height = shape[2], width = shape[3];
int total = num*channels*height*width;
for(int i_n = 0; i_n < shape[0]; i_n++) {
for(int i_c = 0; i_c < shape[1]; i_c++) {
for(int i_h = 0; i_h < shape[2]; i_h++) {
for(int i_w = 0; i_w < shape[3]; i_w++) {
int dst_i = channels*height*width*i_n + height*width*i_c + width*i_h + i_w;
int src_i = channels*height*width*i_n + i_c + channels*width*i_h + channels*i_w;
CV_Assert(dst_i < total);
CV_Assert(src_i < total);
dstData[dst_i] = data[src_i];
}
}
}
}
} else {
for (int i = 0; i < size; i++)
dstData[i] = data[i];
}
}
void blobFromTensor(const tensorflow::TensorProto &tensor, Blob &dstBlob)
{
switch (tensor.dtype()) {
case tensorflow::DT_FLOAT:
parseTensor<float>(tensor, dstBlob);
break;
case tensorflow::DT_DOUBLE:
parseTensor<double>(tensor, dstBlob);
break;
default:
CV_Error(Error::StsError, "Tensor's data type is not supported");
break;
}
}
void printList(const tensorflow::AttrValue::ListValue &val)
{
std::cout << "(";
for (int i = 0; i < val.i_size(); i++)
std::cout << " " << val.i(i);
std::cout << " )";
}
void printTensorShape(const tensorflow::TensorShapeProto &shape)
{
std::cout << "[ ";
for (int d = 0; d < shape.dim_size(); d++)
std::cout << shape.dim(d).name() <<
":" << shape.dim(d).size() << " ";
std::cout << "]";
}
void printTensor(const tensorflow::TensorProto &tensor)
{
printTensorShape(tensor.tensor_shape());
if (tensor.tensor_content().empty())
return;
switch (tensor.dtype())
{
case 1: // float
{
const float *data = reinterpret_cast<const float*>(tensor.tensor_content().c_str());
int size = tensor.tensor_content().size() / sizeof(float);
for (int i = 0; i < std::min(10, size); i++)
std::cout << " " << data[i];
if (size > 10)
std::cout << " ... " << size - 10 << " more";
break;
}
case 3: // int32
{
const int *data = reinterpret_cast<const int*>(tensor.tensor_content().c_str());
int size = tensor.tensor_content().size() / sizeof(int);
for (int i = 0; i < std::min(10, size); i++)
std::cout << " " << data[i];
if (size > 10)
std::cout << " ... " << size - 10 << " more";
break;
}
default:
CV_Error(Error::StsError, "Tensor type is not supported");
break;
}
}
void printLayerAttr(const tensorflow::NodeDef &layer)
{
std::cout << std::endl << layer.name() << ":" << layer.op();
for (int ii = 0; ii < layer.input_size(); ii++)
std::cout << "(" << layer.input(ii) << ")";
std::cout << std::endl;
google::protobuf::Map<std::string, tensorflow::AttrValue> attr
= layer.attr();
for (google::protobuf::Map<std::string, tensorflow::AttrValue>::const_iterator ai = attr.begin();
ai != attr.end(); ++ai)
{
std::cout << ai->first << ":";
if (ai->first == "dtype" || ai->first == "T")
std::cout << ai->second.i();
else if (ai->first == "padding")
std::cout << ai->second.s();
else if (ai->first == "transpose_a" || ai->first == "transpose_b")
std::cout << ai->second.b();
// else if (ai->first == "shape")
// printTensorShape(ai->second.shape());
else if (ai->first == "strides" || ai->first == "ksize")
printList(ai->second.list());
else
printTensor(ai->second.tensor());
std::cout << std::endl;
}
}
bool hasLayerAttr(const tensorflow::NodeDef &layer, const std::string &name)
{
google::protobuf::Map<std::string, tensorflow::AttrValue> attr = layer.attr();
return attr.find(name) != attr.end();
}
const tensorflow::AttrValue& getLayerAttr(const tensorflow::NodeDef &layer, const std::string &name)
{
return layer.attr().at(name);
}
void setStrides(LayerParams &layerParams, const tensorflow::NodeDef &layer)
{
if (hasLayerAttr(layer, "strides"))
{
const tensorflow::AttrValue& val = getLayerAttr(layer, "strides");
if (val.list().i_size() != 4 ||
val.list().i(0) != 1 || val.list().i(3) != 1)
CV_Error(Error::StsError, "Unsupported strides");
layerParams.set("stride_h", static_cast<int>(val.list().i(1)));
layerParams.set("stride_w", static_cast<int>(val.list().i(2)));
}
}
DictValue parseDims(const tensorflow::TensorProto &tensor) {
BlobShape shape = blobShapeFromTensor(tensor);
CV_Assert(tensor.dtype() == tensorflow::DT_INT32);
CV_Assert(shape.dims() == 1);
int size = tensor.tensor_content().size() / sizeof(int);
const int *data = reinterpret_cast<const int*>(tensor.tensor_content().c_str());
// TODO: add reordering shape if dims == 4
return DictValue::arrayInt(data, size);
}
void setKSize(LayerParams &layerParams, const tensorflow::NodeDef &layer)
{
if (hasLayerAttr(layer, "ksize"))
{
const tensorflow::AttrValue& val = getLayerAttr(layer, "ksize");
if (val.list().i_size() != 4 ||
val.list().i(0) != 1 || val.list().i(3) != 1)
CV_Error(Error::StsError, "Unsupported ksize");
layerParams.set("kernel_h", static_cast<int>(val.list().i(1)));
layerParams.set("kernel_w", static_cast<int>(val.list().i(2)));
}
else
{
layerParams.set("kernel_h", 1);
layerParams.set("kernel_w", 1);
}
}
void setPadding(LayerParams &layerParams, const tensorflow::NodeDef &layer)
{
if (hasLayerAttr(layer, "padding"))
layerParams.set("pad_mode", getLayerAttr(layer, "padding").s());
}
void RemoveIdentityOps(tensorflow::GraphDef& net) {
typedef std::map<String, String> IdentityOpsMap;
IdentityOpsMap identity_ops;
std::vector<int> identity_ops_idx;
int layersCount = net.node_size();
for (int li = 0; li < layersCount; li++)
{
const tensorflow::NodeDef &layer = net.node(li);
String type = layer.op();
if (type == "Identity") {
identity_ops_idx.push_back(li);
identity_ops[layer.name()] = layer.input(0);
}
}
for (int li = 0; li < layersCount; li++)
{
tensorflow::NodeDef* layer = net.mutable_node(li);
for (int input_id = 0; input_id < layer->input_size(); input_id++) {
String input_op_name = layer->input(input_id);
IdentityOpsMap::iterator it = identity_ops.find(input_op_name);
if (it != identity_ops.end()) {
layer->set_input(input_id, it->second);
}
}
}
std::sort(identity_ops_idx.begin(), identity_ops_idx.end());
int removed_nodes = 0;
for(size_t i = 0; i < identity_ops_idx.size(); i++) {
int start_id = identity_ops_idx[i] - removed_nodes;
net.mutable_node()->DeleteSubrange(start_id, 1);
removed_nodes++;
}
}
Pin parsePin(const std::string &name)
{
Pin pin(name);
size_t delimiter_pos = name.find_first_of(":");
if (delimiter_pos != std::string::npos)
{
pin.name = name.substr(0, delimiter_pos);
std::istringstream(name.substr(delimiter_pos + 1)) >> pin.blobIndex;
}
return pin;
}
StrIntVector getNextLayers(const tensorflow::GraphDef& net, const String& layer_name, const String& type = "")
{
StrIntVector layers;
for (int li = 0; li < net.node_size(); li++)
{
const tensorflow::NodeDef& layer = net.node(li);
for (int input_id = 0; input_id < layer.input_size(); input_id++) {
String input_op_name = parsePin(layer.input(input_id)).name;
bool type_ok = type.empty() ? true : type == layer.op();
if (input_op_name == layer_name && type_ok)
layers.push_back(std::make_pair(layer.name(), li));
}
}
return layers;
}
void ExcludeLayer(tensorflow::GraphDef& net, const int layer_index, const int input_blob_index, bool remove_from_net = true) {
String layer_name = net.node(layer_index).name();
StrIntVector layers = getNextLayers(net, layer_name);
String removed_layer_input = net.node(layer_index).input(input_blob_index);
for (size_t i = 0; i < layers.size(); i++)
{
tensorflow::NodeDef* layer = net.mutable_node(layers[i].second);
for (int input_id = 0; input_id < layer->input_size(); input_id++) {
String input_op_name = layer->input(input_id);
if (input_op_name == layer_name) {
layer->set_input(input_id, removed_layer_input);
}
}
}
if (remove_from_net)
net.mutable_node()->DeleteSubrange(layer_index, 1);
}
class TFImporter : public Importer {
public:
TFImporter(const char *model);
void populateNet(Net dstNet);
~TFImporter() {}
private:
void kernelFromTensor(const tensorflow::TensorProto &tensor, Blob &dstBlob);
void connect(const std::map<String, int>& layers_name_id_map, Net& network, const Pin& outPin,
const int input_layer_id, const int input_blob_id);
void connectToAllBlobs(const std::map<String, int>& layer_id, Net& network, const Pin& outPin,
const int input_layer_id, const int input_blobs_count);
const tensorflow::TensorProto& getConstBlob(const tensorflow::NodeDef &layer, std::map<String, int> const_layers,
int input_blob_index = -1, int* actual_inp_blob_idx = 0);
tensorflow::GraphDef net;
};
TFImporter::TFImporter(const char *model)
{
if (model && model[0])
ReadTFNetParamsFromBinaryFileOrDie(model, &net);
}
void TFImporter::kernelFromTensor(const tensorflow::TensorProto &tensor, Blob &dstBlob)
{
BlobShape shape = blobShapeFromTensor(tensor);
// TODO: other blob types
CV_Assert(tensor.dtype() == tensorflow::DT_FLOAT);
CV_Assert(shape.dims() == 4);
// REORDER kernel HWIO to OIHW
swap(shape[0], shape[2]); // IWHO
swap(shape[1], shape[3]); // IOHW
swap(shape[0], shape[1]); // OIHW
dstBlob.create(shape, CV_32F);
int size = tensor.tensor_content().size() / sizeof(float);
CV_Assert(size == (int)dstBlob.matRefConst().total());
float *dstData = dstBlob.matRef().ptr<float>();
const float *data = reinterpret_cast<const float*>(tensor.tensor_content().c_str());
int out_c = shape[0], input_c = shape[1], height = shape[2], width = shape[3];
int total = out_c*input_c*height*width;
for(int i_oc = 0; i_oc < out_c; i_oc++) {
for(int i_ic = 0; i_ic < input_c; i_ic++) {
for(int i_h = 0; i_h < height; i_h++) {
for(int i_w = 0; i_w < width; i_w++) {
int dst_i = input_c*height*width*i_oc + height*width*i_ic + width*i_h + i_w;
int src_i = out_c*input_c*width*i_h + out_c*input_c*i_w + out_c*i_ic + i_oc;
CV_Assert(dst_i < total);
CV_Assert(src_i < total);
dstData[dst_i] = data[src_i];
}
}
}
}
}
void TFImporter::connect(const std::map<String, int>& layers_name_id_map, Net& network, const Pin& outPin,
const int input_layer_id, const int input_blob_id)
{
std::map<String, int>::const_iterator it = layers_name_id_map.find(outPin.name);
if (it == layers_name_id_map.end())
CV_Error(Error::StsError, "Input layer not found: " + outPin.name);
network.connect(it->second, outPin.blobIndex, input_layer_id, input_blob_id);
}
void TFImporter::connectToAllBlobs(const std::map<String, int>& layer_id, Net& network, const Pin& outPin,
const int input_layer_id, const int input_blobs_count)
{
for (int input_blob_id = 0; input_blob_id < input_blobs_count; input_blob_id++)
connect(layer_id, network, outPin, input_layer_id, input_blob_id);
}
const tensorflow::TensorProto& TFImporter::getConstBlob(const tensorflow::NodeDef &layer, std::map<String, int> const_layers,
int input_blob_index, int* actual_inp_blob_idx) {
if (input_blob_index == -1) {
for(int i = 0; i < layer.input_size(); i++) {
Pin input = parsePin(layer.input(i));
if (const_layers.find(input.name) != const_layers.end()) {
if (input_blob_index != -1)
CV_Error(Error::StsError, "More than one input is Const op");
input_blob_index = i;
}
}
}
if (input_blob_index == -1)
CV_Error(Error::StsError, "Const input blob for weights not found");
Pin kernel_inp = parsePin(layer.input(input_blob_index));
if (const_layers.find(kernel_inp.name) == const_layers.end())
CV_Error(Error::StsError, "Const kernel input not found");
if (kernel_inp.blobIndex != 0)
CV_Error(Error::StsError, "Unsupported kernel input");
if(actual_inp_blob_idx) {
*actual_inp_blob_idx = input_blob_index;
}
return net.node(const_layers.at(kernel_inp.name)).attr().at("value").tensor();
}
void TFImporter::populateNet(Net dstNet)
{
RemoveIdentityOps(net);
std::map<int, String> layers_to_ignore;
int layersSize = net.node_size();
// find all Const layers for params
std::map<String, int> value_id;
for (int li = 0; li < layersSize; li++)
{
const tensorflow::NodeDef &layer = net.node(li);
String name = layer.name();
String type = layer.op();
if (type != "Const")
continue; // only Const parameters are supported
if (layer.attr().find("value") != layer.attr().end())
{
value_id.insert(std::make_pair(name, li));
}
layers_to_ignore[li] = name;
}
std::map<String, int> layer_id;
for (int li = 0; li < layersSize; li++)
{
const tensorflow::NodeDef &layer = net.node(li);
String name = layer.name();
String type = layer.op();
LayerParams layerParams;
if(layers_to_ignore.find(li) != layers_to_ignore.end())
continue;
if (type == "Conv2D")
{
layerParams.set("bias_term", false);
layerParams.blobs.resize(1);
StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
if (next_layers.size() == 1) {
layerParams.set("bias_term", true);
layerParams.blobs.resize(2);
int weights_layer_index = next_layers[0].second;
blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
ExcludeLayer(net, weights_layer_index, 0, false);
layers_to_ignore[weights_layer_index] = next_layers[0].first;
}
kernelFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
BlobShape kshape = layerParams.blobs[0].shape();
layerParams.set("kernel_h", kshape[2]);
layerParams.set("kernel_w", kshape[3]);
layerParams.set("num_output", kshape[0]);
setStrides(layerParams, layer);
setPadding(layerParams, layer);
int id = dstNet.addLayer(name, "Convolution", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
}
else if (type == "BiasAdd" || type == "Add")
{
layerParams.blobs.resize(1);
blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
int id = dstNet.addLayer(name, "Shift", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
}
else if (type == "Identity")
{
int id = dstNet.addLayer(name, "Identity", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "MatMul")
{
CV_Assert(layer.input_size() == 2);
layerParams.set("axis", 0);
layerParams.set("bias_term", false);
layerParams.blobs.resize(1);
StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
if (next_layers.size() == 1) {
layerParams.set("bias_term", true);
layerParams.blobs.resize(2);
int weights_layer_index = next_layers[0].second;
blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
ExcludeLayer(net, weights_layer_index, 0, false);
layers_to_ignore[weights_layer_index] = next_layers[0].first;
}
int kernel_blob_index = -1;
blobFromTensor(getConstBlob(layer, value_id, -1, &kernel_blob_index), layerParams.blobs[0]);
if (kernel_blob_index == 1) { // In this case output is computed by x*W formula - W should be transposed
Mat data = layerParams.blobs[0].matRef().t();
BlobShape shape(data.rows, data.cols);
layerParams.blobs[0].fill(shape, layerParams.blobs[0].type(), data.data);
}
BlobShape kshape = layerParams.blobs[0].shape();
layerParams.set("num_output", kshape[0]);
int id = dstNet.addLayer(name, "InnerProduct", layerParams);
layer_id[name] = id;
// one input only
int input_blob_index = kernel_blob_index == 0 ? 1 : 0;
connect(layer_id, dstNet, parsePin(layer.input(input_blob_index)), id, 0);
}
else if (type == "Reshape")
{
layerParams.set("dim", parseDims(getConstBlob(layer, value_id, 1)));
layerParams.set("reorder_dims", true);
int id = dstNet.addLayer(name, "Reshape", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
}
else if (type == "Const")
{
}
else if (type == "Softmax")
{
layerParams.set("axis", -1);
int id = dstNet.addLayer(name, "Softmax", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "LRN")
{
if(hasLayerAttr(layer, "alpha")) {
layerParams.set("alpha", getLayerAttr(layer, "alpha").f());
}
if(hasLayerAttr(layer, "beta")) {
layerParams.set("beta", getLayerAttr(layer, "beta").f());
}
if(hasLayerAttr(layer, "depth_radius")) {
int radius = (int)getLayerAttr(layer, "depth_radius").i();
layerParams.set("local_size", 2*radius + 1);
}
if(hasLayerAttr(layer, "bias")) {
layerParams.set("bias", getLayerAttr(layer, "bias").f());
}
layerParams.set("norm_sz", false);
int id = dstNet.addLayer(name, "LRN", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "Concat")
{
int axis = getConstBlob(layer, value_id, 0).int_val().Get(0);
layerParams.set("axis", toNCHW[axis]);
int id = dstNet.addLayer(name, "Concat", layerParams);
layer_id[name] = id;
// input(0) is concat_dim
for (int ii = 1; ii < layer.input_size(); ii++)
{
Pin inp = parsePin(layer.input(ii));
if (layer_id.find(inp.name) == layer_id.end())
CV_Error(Error::StsError, "Input layer not found: " + inp.name);
dstNet.connect(layer_id.at(inp.name), inp.blobIndex, id, ii - 1);
}
}
else if (type == "Relu")
{
int id = dstNet.addLayer(name, "ReLU", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "MaxPool")
{
layerParams.set("pool", "max");
setKSize(layerParams, layer);
setStrides(layerParams, layer);
setPadding(layerParams, layer);
int id = dstNet.addLayer(name, "Pooling", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "AvgPool")
{
layerParams.set("pool", "ave");
setKSize(layerParams, layer);
setStrides(layerParams, layer);
setPadding(layerParams, layer);
int id = dstNet.addLayer(name, "Pooling", layerParams);
layer_id[name] = id;
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "Placeholder")
{
std::vector<String> netInputs(1);
netInputs[0] = name;
layer_id[name] = 0;
dstNet.setNetInputs(netInputs);
}
else if (type == "Split") {
// TODO: determing axis index remapping by input dimensions order of input blob
// TODO: slicing input may be Const op
// TODO: slicing kernels for convolutions - in current implenmentation it is impossible
// TODO: add parsing num of slices parameter
CV_Assert(layer.input_size() == 2);
// num_split
// 1st blob is dims tensor
layerParams.set("slice_point", DictValue::arrayReal((double*)0, 0));
int axis = getConstBlob(layer, value_id, 0).int_val().Get(0);
layerParams.set("axis", toNCHW[axis]);
int id = dstNet.addLayer(name, "Slice", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
}
else
{
printLayerAttr(layer);
CV_Error_(Error::StsError, ("Unknown layer type %s in op %s", type.c_str(), name.c_str()));
}
}
}
} // namespace
Ptr<Importer> cv::dnn::createTensorflowImporter(const String &model)
{
return Ptr<Importer>(new TFImporter(model.c_str()));
}
#else //HAVE_PROTOBUF
Ptr<Importer> cv::dnn::createTensorflowImporter(const String&)
{
CV_Error(cv::Error::StsNotImplemented, "libprotobuf required to import data from TensorFlow models");
return Ptr<Importer>();
}
#endif //HAVE_PROTOBUF

@ -0,0 +1,63 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of various functions which are related to Tensorflow models reading.
*/
#if HAVE_PROTOBUF
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
#include <opencv2/core.hpp>
#include <map>
#include <string>
#include <fstream>
#include <vector>
#include "graph.pb.h"
#include "tf_io.hpp"
#include "../caffe/glog_emulator.hpp"
namespace cv {
namespace dnn {
using std::string;
using std::map;
using namespace tensorflow;
using namespace ::google::protobuf;
using namespace ::google::protobuf::io;
const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte.
// TODO: remove Caffe duplicate
bool ReadProtoFromBinaryFileTF(const char* filename, Message* proto) {
std::ifstream fs(filename, std::ifstream::in | std::ifstream::binary);
CHECK(fs.is_open()) << "Can't open \"" << filename << "\"";
ZeroCopyInputStream* raw_input = new IstreamInputStream(&fs);
CodedInputStream* coded_input = new CodedInputStream(raw_input);
coded_input->SetTotalBytesLimit(kProtoReadBytesLimit, 536870912);
bool success = proto->ParseFromCodedStream(coded_input);
delete coded_input;
delete raw_input;
fs.close();
return success;
}
void ReadTFNetParamsFromBinaryFileOrDie(const char* param_file,
tensorflow::GraphDef* param) {
CHECK(ReadProtoFromBinaryFileTF(param_file, param))
<< "Failed to parse GraphDef file: " << param_file;
}
}
}
#endif

@ -0,0 +1,29 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Declaration of various functions which are related to Tensorflow models reading.
*/
#ifndef __OPENCV_DNN_TF_IO_HPP__
#define __OPENCV_DNN_TF_IO_HPP__
#if HAVE_PROTOBUF
#include "graph.pb.h"
namespace cv {
namespace dnn {
// Read parameters from a file into a GraphDef proto message.
void ReadTFNetParamsFromBinaryFileOrDie(const char* param_file,
tensorflow::GraphDef* param);
}
}
#endif
#endif

@ -0,0 +1,60 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "TypesProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
enum DataType {
// Not a legal value for DataType. Used to indicate a DataType field
// has not been set.
DT_INVALID = 0;
// Data types that all computation devices are expected to be
// capable to support.
DT_FLOAT = 1;
DT_DOUBLE = 2;
DT_INT32 = 3;
DT_UINT8 = 4;
DT_INT16 = 5;
DT_INT8 = 6;
DT_STRING = 7;
DT_COMPLEX64 = 8; // Single-precision complex
DT_INT64 = 9;
DT_BOOL = 10;
DT_QINT8 = 11; // Quantized int8
DT_QUINT8 = 12; // Quantized uint8
DT_QINT32 = 13; // Quantized int32
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
DT_QINT16 = 15; // Quantized int16
DT_QUINT16 = 16; // Quantized uint16
DT_UINT16 = 17;
DT_COMPLEX128 = 18; // Double-precision complex
DT_HALF = 19;
// TODO(josh11b): DT_GENERIC_PROTO = ??;
// TODO(jeff,josh11b): DT_UINT64? DT_UINT32?
// Do not use! These are only for parameters. Every enum above
// should have a corresponding value below (verified by types_test).
DT_FLOAT_REF = 101;
DT_DOUBLE_REF = 102;
DT_INT32_REF = 103;
DT_UINT8_REF = 104;
DT_INT16_REF = 105;
DT_INT8_REF = 106;
DT_STRING_REF = 107;
DT_COMPLEX64_REF = 108;
DT_INT64_REF = 109;
DT_BOOL_REF = 110;
DT_QINT8_REF = 111;
DT_QUINT8_REF = 112;
DT_QINT32_REF = 113;
DT_BFLOAT16_REF = 114;
DT_QINT16_REF = 115;
DT_QUINT16_REF = 116;
DT_UINT16_REF = 117;
DT_COMPLEX128_REF = 118;
DT_HALF_REF = 119;
}

@ -0,0 +1,31 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "VersionsProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
// Version information for a piece of serialized data
//
// There are different types of versions for each type of data
// (GraphDef, etc.), but they all have the same common shape
// described here.
//
// Each consumer has "consumer" and "min_producer" versions (specified
// elsewhere). A consumer is allowed to consume this data if
//
// producer >= min_producer
// consumer >= min_consumer
// consumer not in bad_consumers
//
message VersionDef {
// The version of the code that produced this data.
int32 producer = 1;
// Any consumer below this version is not allowed to consume this data.
int32 min_consumer = 2;
// Specific consumer versions which are disallowed (e.g. due to bugs).
repeated int32 bad_consumers = 3;
};

@ -0,0 +1,51 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Test for Tensorflow models loading
*/
#include "test_precomp.hpp"
namespace cvtest
{
using namespace cv;
using namespace cv::dnn;
template<typename TString>
static std::string _tf(TString filename)
{
return (getOpenCVExtraDir() + "/dnn/") + filename;
}
TEST(Test_TensorFlow, read_inception)
{
Net net;
{
Ptr<Importer> importer = createTensorflowImporter(_tf("tensorflow_inception_graph.pb"));
ASSERT_TRUE(importer != NULL);
importer->populateNet(net);
}
Mat sample = imread(_tf("grace_hopper.jpg"));
ASSERT_TRUE(!sample.empty());
Mat input;
resize(sample, input, Size(224, 224));
input -= 128; // mean sub
std::vector<Mat> inpMats;
inpMats.push_back(input);
net.setBlob("_input.input", Blob(inpMats));
net.forward();
Blob out = net.getBlob("output");
std::cout << out.dims() << std::endl;
}
}
Loading…
Cancel
Save