1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/protobuf/tensor_bundle.proto
3
4 #include "tensorflow/core/protobuf/tensor_bundle.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
BundleHeaderProto(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR BundleHeaderProto::BundleHeaderProto(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.version_)*/nullptr
25 , /*decltype(_impl_.num_shards_)*/0
26 , /*decltype(_impl_.endianness_)*/0
27 , /*decltype(_impl_._cached_size_)*/{}} {}
28 struct BundleHeaderProtoDefaultTypeInternal {
BundleHeaderProtoDefaultTypeInternaltensorflow::BundleHeaderProtoDefaultTypeInternal29 PROTOBUF_CONSTEXPR BundleHeaderProtoDefaultTypeInternal()
30 : _instance(::_pbi::ConstantInitialized{}) {}
~BundleHeaderProtoDefaultTypeInternaltensorflow::BundleHeaderProtoDefaultTypeInternal31 ~BundleHeaderProtoDefaultTypeInternal() {}
32 union { // NOLINT(misc-non-private-member-variables-in-classes)
33 BundleHeaderProto _instance;
34 };
35 };
36 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BundleHeaderProtoDefaultTypeInternal _BundleHeaderProto_default_instance_;
BundleEntryProto(::_pbi::ConstantInitialized)37 PROTOBUF_CONSTEXPR BundleEntryProto::BundleEntryProto(
38 ::_pbi::ConstantInitialized): _impl_{
39 /*decltype(_impl_.slices_)*/{}
40 , /*decltype(_impl_.shape_)*/nullptr
41 , /*decltype(_impl_.dtype_)*/0
42 , /*decltype(_impl_.shard_id_)*/0
43 , /*decltype(_impl_.offset_)*/::int64_t{0}
44 , /*decltype(_impl_.size_)*/::int64_t{0}
45 , /*decltype(_impl_.crc32c_)*/0u
46 , /*decltype(_impl_._cached_size_)*/{}} {}
47 struct BundleEntryProtoDefaultTypeInternal {
BundleEntryProtoDefaultTypeInternaltensorflow::BundleEntryProtoDefaultTypeInternal48 PROTOBUF_CONSTEXPR BundleEntryProtoDefaultTypeInternal()
49 : _instance(::_pbi::ConstantInitialized{}) {}
~BundleEntryProtoDefaultTypeInternaltensorflow::BundleEntryProtoDefaultTypeInternal50 ~BundleEntryProtoDefaultTypeInternal() {}
51 union { // NOLINT(misc-non-private-member-variables-in-classes)
52 BundleEntryProto _instance;
53 };
54 };
55 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BundleEntryProtoDefaultTypeInternal _BundleEntryProto_default_instance_;
56 } // namespace tensorflow
57 namespace tensorflow {
BundleHeaderProto_Endianness_IsValid(int value)58 bool BundleHeaderProto_Endianness_IsValid(int value) {
59 switch (value) {
60 case 0:
61 case 1:
62 return true;
63 default:
64 return false;
65 }
66 }
67
68 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> BundleHeaderProto_Endianness_strings[2] = {};
69
70 static const char BundleHeaderProto_Endianness_names[] =
71 "BIG"
72 "LITTLE";
73
74 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry BundleHeaderProto_Endianness_entries[] = {
75 { {BundleHeaderProto_Endianness_names + 0, 3}, 1 },
76 { {BundleHeaderProto_Endianness_names + 3, 6}, 0 },
77 };
78
79 static const int BundleHeaderProto_Endianness_entries_by_number[] = {
80 1, // 0 -> LITTLE
81 0, // 1 -> BIG
82 };
83
BundleHeaderProto_Endianness_Name(BundleHeaderProto_Endianness value)84 const std::string& BundleHeaderProto_Endianness_Name(
85 BundleHeaderProto_Endianness value) {
86 static const bool dummy =
87 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
88 BundleHeaderProto_Endianness_entries,
89 BundleHeaderProto_Endianness_entries_by_number,
90 2, BundleHeaderProto_Endianness_strings);
91 (void) dummy;
92 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
93 BundleHeaderProto_Endianness_entries,
94 BundleHeaderProto_Endianness_entries_by_number,
95 2, value);
96 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
97 BundleHeaderProto_Endianness_strings[idx].get();
98 }
BundleHeaderProto_Endianness_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,BundleHeaderProto_Endianness * value)99 bool BundleHeaderProto_Endianness_Parse(
100 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, BundleHeaderProto_Endianness* value) {
101 int int_value;
102 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
103 BundleHeaderProto_Endianness_entries, 2, name, &int_value);
104 if (success) {
105 *value = static_cast<BundleHeaderProto_Endianness>(int_value);
106 }
107 return success;
108 }
109 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
110 constexpr BundleHeaderProto_Endianness BundleHeaderProto::LITTLE;
111 constexpr BundleHeaderProto_Endianness BundleHeaderProto::BIG;
112 constexpr BundleHeaderProto_Endianness BundleHeaderProto::Endianness_MIN;
113 constexpr BundleHeaderProto_Endianness BundleHeaderProto::Endianness_MAX;
114 constexpr int BundleHeaderProto::Endianness_ARRAYSIZE;
115 #endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
116
117 // ===================================================================
118
119 class BundleHeaderProto::_Internal {
120 public:
121 static const ::tensorflow::VersionDef& version(const BundleHeaderProto* msg);
122 };
123
124 const ::tensorflow::VersionDef&
version(const BundleHeaderProto * msg)125 BundleHeaderProto::_Internal::version(const BundleHeaderProto* msg) {
126 return *msg->_impl_.version_;
127 }
clear_version()128 void BundleHeaderProto::clear_version() {
129 if (GetArenaForAllocation() == nullptr && _impl_.version_ != nullptr) {
130 delete _impl_.version_;
131 }
132 _impl_.version_ = nullptr;
133 }
BundleHeaderProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)134 BundleHeaderProto::BundleHeaderProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
135 bool is_message_owned)
136 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
137 SharedCtor(arena, is_message_owned);
138 // @@protoc_insertion_point(arena_constructor:tensorflow.BundleHeaderProto)
139 }
BundleHeaderProto(const BundleHeaderProto & from)140 BundleHeaderProto::BundleHeaderProto(const BundleHeaderProto& from)
141 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
142 BundleHeaderProto* const _this = this; (void)_this;
143 new (&_impl_) Impl_{
144 decltype(_impl_.version_){nullptr}
145 , decltype(_impl_.num_shards_){}
146 , decltype(_impl_.endianness_){}
147 , /*decltype(_impl_._cached_size_)*/{}};
148
149 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
150 if (from._internal_has_version()) {
151 _this->_impl_.version_ = new ::tensorflow::VersionDef(*from._impl_.version_);
152 }
153 ::memcpy(&_impl_.num_shards_, &from._impl_.num_shards_,
154 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.endianness_) -
155 reinterpret_cast<char*>(&_impl_.num_shards_)) + sizeof(_impl_.endianness_));
156 // @@protoc_insertion_point(copy_constructor:tensorflow.BundleHeaderProto)
157 }
158
SharedCtor(::_pb::Arena * arena,bool is_message_owned)159 inline void BundleHeaderProto::SharedCtor(
160 ::_pb::Arena* arena, bool is_message_owned) {
161 (void)arena;
162 (void)is_message_owned;
163 new (&_impl_) Impl_{
164 decltype(_impl_.version_){nullptr}
165 , decltype(_impl_.num_shards_){0}
166 , decltype(_impl_.endianness_){0}
167 , /*decltype(_impl_._cached_size_)*/{}
168 };
169 }
170
~BundleHeaderProto()171 BundleHeaderProto::~BundleHeaderProto() {
172 // @@protoc_insertion_point(destructor:tensorflow.BundleHeaderProto)
173 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
174 (void)arena;
175 return;
176 }
177 SharedDtor();
178 }
179
SharedDtor()180 inline void BundleHeaderProto::SharedDtor() {
181 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
182 if (this != internal_default_instance()) delete _impl_.version_;
183 }
184
SetCachedSize(int size) const185 void BundleHeaderProto::SetCachedSize(int size) const {
186 _impl_._cached_size_.Set(size);
187 }
188
Clear()189 void BundleHeaderProto::Clear() {
190 // @@protoc_insertion_point(message_clear_start:tensorflow.BundleHeaderProto)
191 ::uint32_t cached_has_bits = 0;
192 // Prevent compiler warnings about cached_has_bits being unused
193 (void) cached_has_bits;
194
195 if (GetArenaForAllocation() == nullptr && _impl_.version_ != nullptr) {
196 delete _impl_.version_;
197 }
198 _impl_.version_ = nullptr;
199 ::memset(&_impl_.num_shards_, 0, static_cast<size_t>(
200 reinterpret_cast<char*>(&_impl_.endianness_) -
201 reinterpret_cast<char*>(&_impl_.num_shards_)) + sizeof(_impl_.endianness_));
202 _internal_metadata_.Clear<std::string>();
203 }
204
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)205 const char* BundleHeaderProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
206 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
207 while (!ctx->Done(&ptr)) {
208 ::uint32_t tag;
209 ptr = ::_pbi::ReadTag(ptr, &tag);
210 switch (tag >> 3) {
211 // int32 num_shards = 1;
212 case 1:
213 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
214 _impl_.num_shards_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
215 CHK_(ptr);
216 } else {
217 goto handle_unusual;
218 }
219 continue;
220 // .tensorflow.BundleHeaderProto.Endianness endianness = 2;
221 case 2:
222 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
223 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
224 CHK_(ptr);
225 _internal_set_endianness(static_cast<::tensorflow::BundleHeaderProto_Endianness>(val));
226 } else {
227 goto handle_unusual;
228 }
229 continue;
230 // .tensorflow.VersionDef version = 3;
231 case 3:
232 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
233 ptr = ctx->ParseMessage(_internal_mutable_version(), ptr);
234 CHK_(ptr);
235 } else {
236 goto handle_unusual;
237 }
238 continue;
239 default:
240 goto handle_unusual;
241 } // switch
242 handle_unusual:
243 if ((tag == 0) || ((tag & 7) == 4)) {
244 CHK_(ptr);
245 ctx->SetLastTag(tag);
246 goto message_done;
247 }
248 ptr = UnknownFieldParse(
249 tag,
250 _internal_metadata_.mutable_unknown_fields<std::string>(),
251 ptr, ctx);
252 CHK_(ptr != nullptr);
253 } // while
254 message_done:
255 return ptr;
256 failure:
257 ptr = nullptr;
258 goto message_done;
259 #undef CHK_
260 }
261
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const262 ::uint8_t* BundleHeaderProto::_InternalSerialize(
263 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
264 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.BundleHeaderProto)
265 ::uint32_t cached_has_bits = 0;
266 (void) cached_has_bits;
267
268 // int32 num_shards = 1;
269 if (this->_internal_num_shards() != 0) {
270 target = stream->EnsureSpace(target);
271 target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_num_shards(), target);
272 }
273
274 // .tensorflow.BundleHeaderProto.Endianness endianness = 2;
275 if (this->_internal_endianness() != 0) {
276 target = stream->EnsureSpace(target);
277 target = ::_pbi::WireFormatLite::WriteEnumToArray(
278 2, this->_internal_endianness(), target);
279 }
280
281 // .tensorflow.VersionDef version = 3;
282 if (this->_internal_has_version()) {
283 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
284 InternalWriteMessage(3, _Internal::version(this),
285 _Internal::version(this).GetCachedSize(), target, stream);
286 }
287
288 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
289 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
290 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
291 }
292 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.BundleHeaderProto)
293 return target;
294 }
295
ByteSizeLong() const296 size_t BundleHeaderProto::ByteSizeLong() const {
297 // @@protoc_insertion_point(message_byte_size_start:tensorflow.BundleHeaderProto)
298 size_t total_size = 0;
299
300 ::uint32_t cached_has_bits = 0;
301 // Prevent compiler warnings about cached_has_bits being unused
302 (void) cached_has_bits;
303
304 // .tensorflow.VersionDef version = 3;
305 if (this->_internal_has_version()) {
306 total_size += 1 +
307 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
308 *_impl_.version_);
309 }
310
311 // int32 num_shards = 1;
312 if (this->_internal_num_shards() != 0) {
313 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_shards());
314 }
315
316 // .tensorflow.BundleHeaderProto.Endianness endianness = 2;
317 if (this->_internal_endianness() != 0) {
318 total_size += 1 +
319 ::_pbi::WireFormatLite::EnumSize(this->_internal_endianness());
320 }
321
322 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
323 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
324 }
325 int cached_size = ::_pbi::ToCachedSize(total_size);
326 SetCachedSize(cached_size);
327 return total_size;
328 }
329
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)330 void BundleHeaderProto::CheckTypeAndMergeFrom(
331 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
332 MergeFrom(*::_pbi::DownCast<const BundleHeaderProto*>(
333 &from));
334 }
335
MergeFrom(const BundleHeaderProto & from)336 void BundleHeaderProto::MergeFrom(const BundleHeaderProto& from) {
337 BundleHeaderProto* const _this = this;
338 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.BundleHeaderProto)
339 GOOGLE_DCHECK_NE(&from, _this);
340 ::uint32_t cached_has_bits = 0;
341 (void) cached_has_bits;
342
343 if (from._internal_has_version()) {
344 _this->_internal_mutable_version()->::tensorflow::VersionDef::MergeFrom(
345 from._internal_version());
346 }
347 if (from._internal_num_shards() != 0) {
348 _this->_internal_set_num_shards(from._internal_num_shards());
349 }
350 if (from._internal_endianness() != 0) {
351 _this->_internal_set_endianness(from._internal_endianness());
352 }
353 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
354 }
355
CopyFrom(const BundleHeaderProto & from)356 void BundleHeaderProto::CopyFrom(const BundleHeaderProto& from) {
357 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.BundleHeaderProto)
358 if (&from == this) return;
359 Clear();
360 MergeFrom(from);
361 }
362
IsInitialized() const363 bool BundleHeaderProto::IsInitialized() const {
364 return true;
365 }
366
InternalSwap(BundleHeaderProto * other)367 void BundleHeaderProto::InternalSwap(BundleHeaderProto* other) {
368 using std::swap;
369 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
370 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
371 PROTOBUF_FIELD_OFFSET(BundleHeaderProto, _impl_.endianness_)
372 + sizeof(BundleHeaderProto::_impl_.endianness_) // NOLINT
373 - PROTOBUF_FIELD_OFFSET(BundleHeaderProto, _impl_.version_)>(
374 reinterpret_cast<char*>(&_impl_.version_),
375 reinterpret_cast<char*>(&other->_impl_.version_));
376 }
377
GetTypeName() const378 std::string BundleHeaderProto::GetTypeName() const {
379 return "tensorflow.BundleHeaderProto";
380 }
381
382
383 // ===================================================================
384
385 class BundleEntryProto::_Internal {
386 public:
387 static const ::tensorflow::TensorShapeProto& shape(const BundleEntryProto* msg);
388 };
389
390 const ::tensorflow::TensorShapeProto&
shape(const BundleEntryProto * msg)391 BundleEntryProto::_Internal::shape(const BundleEntryProto* msg) {
392 return *msg->_impl_.shape_;
393 }
clear_shape()394 void BundleEntryProto::clear_shape() {
395 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
396 delete _impl_.shape_;
397 }
398 _impl_.shape_ = nullptr;
399 }
clear_slices()400 void BundleEntryProto::clear_slices() {
401 _impl_.slices_.Clear();
402 }
BundleEntryProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)403 BundleEntryProto::BundleEntryProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
404 bool is_message_owned)
405 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
406 SharedCtor(arena, is_message_owned);
407 // @@protoc_insertion_point(arena_constructor:tensorflow.BundleEntryProto)
408 }
BundleEntryProto(const BundleEntryProto & from)409 BundleEntryProto::BundleEntryProto(const BundleEntryProto& from)
410 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
411 BundleEntryProto* const _this = this; (void)_this;
412 new (&_impl_) Impl_{
413 decltype(_impl_.slices_){from._impl_.slices_}
414 , decltype(_impl_.shape_){nullptr}
415 , decltype(_impl_.dtype_){}
416 , decltype(_impl_.shard_id_){}
417 , decltype(_impl_.offset_){}
418 , decltype(_impl_.size_){}
419 , decltype(_impl_.crc32c_){}
420 , /*decltype(_impl_._cached_size_)*/{}};
421
422 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
423 if (from._internal_has_shape()) {
424 _this->_impl_.shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.shape_);
425 }
426 ::memcpy(&_impl_.dtype_, &from._impl_.dtype_,
427 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.crc32c_) -
428 reinterpret_cast<char*>(&_impl_.dtype_)) + sizeof(_impl_.crc32c_));
429 // @@protoc_insertion_point(copy_constructor:tensorflow.BundleEntryProto)
430 }
431
SharedCtor(::_pb::Arena * arena,bool is_message_owned)432 inline void BundleEntryProto::SharedCtor(
433 ::_pb::Arena* arena, bool is_message_owned) {
434 (void)arena;
435 (void)is_message_owned;
436 new (&_impl_) Impl_{
437 decltype(_impl_.slices_){arena}
438 , decltype(_impl_.shape_){nullptr}
439 , decltype(_impl_.dtype_){0}
440 , decltype(_impl_.shard_id_){0}
441 , decltype(_impl_.offset_){::int64_t{0}}
442 , decltype(_impl_.size_){::int64_t{0}}
443 , decltype(_impl_.crc32c_){0u}
444 , /*decltype(_impl_._cached_size_)*/{}
445 };
446 }
447
~BundleEntryProto()448 BundleEntryProto::~BundleEntryProto() {
449 // @@protoc_insertion_point(destructor:tensorflow.BundleEntryProto)
450 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
451 (void)arena;
452 return;
453 }
454 SharedDtor();
455 }
456
SharedDtor()457 inline void BundleEntryProto::SharedDtor() {
458 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
459 _impl_.slices_.~RepeatedPtrField();
460 if (this != internal_default_instance()) delete _impl_.shape_;
461 }
462
SetCachedSize(int size) const463 void BundleEntryProto::SetCachedSize(int size) const {
464 _impl_._cached_size_.Set(size);
465 }
466
Clear()467 void BundleEntryProto::Clear() {
468 // @@protoc_insertion_point(message_clear_start:tensorflow.BundleEntryProto)
469 ::uint32_t cached_has_bits = 0;
470 // Prevent compiler warnings about cached_has_bits being unused
471 (void) cached_has_bits;
472
473 _impl_.slices_.Clear();
474 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
475 delete _impl_.shape_;
476 }
477 _impl_.shape_ = nullptr;
478 ::memset(&_impl_.dtype_, 0, static_cast<size_t>(
479 reinterpret_cast<char*>(&_impl_.crc32c_) -
480 reinterpret_cast<char*>(&_impl_.dtype_)) + sizeof(_impl_.crc32c_));
481 _internal_metadata_.Clear<std::string>();
482 }
483
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)484 const char* BundleEntryProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
485 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
486 while (!ctx->Done(&ptr)) {
487 ::uint32_t tag;
488 ptr = ::_pbi::ReadTag(ptr, &tag);
489 switch (tag >> 3) {
490 // .tensorflow.DataType dtype = 1;
491 case 1:
492 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
493 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
494 CHK_(ptr);
495 _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
496 } else {
497 goto handle_unusual;
498 }
499 continue;
500 // .tensorflow.TensorShapeProto shape = 2;
501 case 2:
502 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
503 ptr = ctx->ParseMessage(_internal_mutable_shape(), ptr);
504 CHK_(ptr);
505 } else {
506 goto handle_unusual;
507 }
508 continue;
509 // int32 shard_id = 3;
510 case 3:
511 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
512 _impl_.shard_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
513 CHK_(ptr);
514 } else {
515 goto handle_unusual;
516 }
517 continue;
518 // int64 offset = 4;
519 case 4:
520 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
521 _impl_.offset_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
522 CHK_(ptr);
523 } else {
524 goto handle_unusual;
525 }
526 continue;
527 // int64 size = 5;
528 case 5:
529 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
530 _impl_.size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
531 CHK_(ptr);
532 } else {
533 goto handle_unusual;
534 }
535 continue;
536 // fixed32 crc32c = 6;
537 case 6:
538 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 53)) {
539 _impl_.crc32c_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint32_t>(ptr);
540 ptr += sizeof(::uint32_t);
541 } else {
542 goto handle_unusual;
543 }
544 continue;
545 // repeated .tensorflow.TensorSliceProto slices = 7;
546 case 7:
547 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
548 ptr -= 1;
549 do {
550 ptr += 1;
551 ptr = ctx->ParseMessage(_internal_add_slices(), ptr);
552 CHK_(ptr);
553 if (!ctx->DataAvailable(ptr)) break;
554 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<58>(ptr));
555 } else {
556 goto handle_unusual;
557 }
558 continue;
559 default:
560 goto handle_unusual;
561 } // switch
562 handle_unusual:
563 if ((tag == 0) || ((tag & 7) == 4)) {
564 CHK_(ptr);
565 ctx->SetLastTag(tag);
566 goto message_done;
567 }
568 ptr = UnknownFieldParse(
569 tag,
570 _internal_metadata_.mutable_unknown_fields<std::string>(),
571 ptr, ctx);
572 CHK_(ptr != nullptr);
573 } // while
574 message_done:
575 return ptr;
576 failure:
577 ptr = nullptr;
578 goto message_done;
579 #undef CHK_
580 }
581
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const582 ::uint8_t* BundleEntryProto::_InternalSerialize(
583 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
584 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.BundleEntryProto)
585 ::uint32_t cached_has_bits = 0;
586 (void) cached_has_bits;
587
588 // .tensorflow.DataType dtype = 1;
589 if (this->_internal_dtype() != 0) {
590 target = stream->EnsureSpace(target);
591 target = ::_pbi::WireFormatLite::WriteEnumToArray(
592 1, this->_internal_dtype(), target);
593 }
594
595 // .tensorflow.TensorShapeProto shape = 2;
596 if (this->_internal_has_shape()) {
597 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
598 InternalWriteMessage(2, _Internal::shape(this),
599 _Internal::shape(this).GetCachedSize(), target, stream);
600 }
601
602 // int32 shard_id = 3;
603 if (this->_internal_shard_id() != 0) {
604 target = stream->EnsureSpace(target);
605 target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_shard_id(), target);
606 }
607
608 // int64 offset = 4;
609 if (this->_internal_offset() != 0) {
610 target = stream->EnsureSpace(target);
611 target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_offset(), target);
612 }
613
614 // int64 size = 5;
615 if (this->_internal_size() != 0) {
616 target = stream->EnsureSpace(target);
617 target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_size(), target);
618 }
619
620 // fixed32 crc32c = 6;
621 if (this->_internal_crc32c() != 0) {
622 target = stream->EnsureSpace(target);
623 target = ::_pbi::WireFormatLite::WriteFixed32ToArray(6, this->_internal_crc32c(), target);
624 }
625
626 // repeated .tensorflow.TensorSliceProto slices = 7;
627 for (unsigned i = 0,
628 n = static_cast<unsigned>(this->_internal_slices_size()); i < n; i++) {
629 const auto& repfield = this->_internal_slices(i);
630 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
631 InternalWriteMessage(7, repfield, repfield.GetCachedSize(), target, stream);
632 }
633
634 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
635 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
636 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
637 }
638 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.BundleEntryProto)
639 return target;
640 }
641
ByteSizeLong() const642 size_t BundleEntryProto::ByteSizeLong() const {
643 // @@protoc_insertion_point(message_byte_size_start:tensorflow.BundleEntryProto)
644 size_t total_size = 0;
645
646 ::uint32_t cached_has_bits = 0;
647 // Prevent compiler warnings about cached_has_bits being unused
648 (void) cached_has_bits;
649
650 // repeated .tensorflow.TensorSliceProto slices = 7;
651 total_size += 1UL * this->_internal_slices_size();
652 for (const auto& msg : this->_impl_.slices_) {
653 total_size +=
654 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
655 }
656
657 // .tensorflow.TensorShapeProto shape = 2;
658 if (this->_internal_has_shape()) {
659 total_size += 1 +
660 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
661 *_impl_.shape_);
662 }
663
664 // .tensorflow.DataType dtype = 1;
665 if (this->_internal_dtype() != 0) {
666 total_size += 1 +
667 ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
668 }
669
670 // int32 shard_id = 3;
671 if (this->_internal_shard_id() != 0) {
672 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_shard_id());
673 }
674
675 // int64 offset = 4;
676 if (this->_internal_offset() != 0) {
677 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_offset());
678 }
679
680 // int64 size = 5;
681 if (this->_internal_size() != 0) {
682 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_size());
683 }
684
685 // fixed32 crc32c = 6;
686 if (this->_internal_crc32c() != 0) {
687 total_size += 1 + 4;
688 }
689
690 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
691 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
692 }
693 int cached_size = ::_pbi::ToCachedSize(total_size);
694 SetCachedSize(cached_size);
695 return total_size;
696 }
697
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)698 void BundleEntryProto::CheckTypeAndMergeFrom(
699 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
700 MergeFrom(*::_pbi::DownCast<const BundleEntryProto*>(
701 &from));
702 }
703
MergeFrom(const BundleEntryProto & from)704 void BundleEntryProto::MergeFrom(const BundleEntryProto& from) {
705 BundleEntryProto* const _this = this;
706 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.BundleEntryProto)
707 GOOGLE_DCHECK_NE(&from, _this);
708 ::uint32_t cached_has_bits = 0;
709 (void) cached_has_bits;
710
711 _this->_impl_.slices_.MergeFrom(from._impl_.slices_);
712 if (from._internal_has_shape()) {
713 _this->_internal_mutable_shape()->::tensorflow::TensorShapeProto::MergeFrom(
714 from._internal_shape());
715 }
716 if (from._internal_dtype() != 0) {
717 _this->_internal_set_dtype(from._internal_dtype());
718 }
719 if (from._internal_shard_id() != 0) {
720 _this->_internal_set_shard_id(from._internal_shard_id());
721 }
722 if (from._internal_offset() != 0) {
723 _this->_internal_set_offset(from._internal_offset());
724 }
725 if (from._internal_size() != 0) {
726 _this->_internal_set_size(from._internal_size());
727 }
728 if (from._internal_crc32c() != 0) {
729 _this->_internal_set_crc32c(from._internal_crc32c());
730 }
731 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
732 }
733
CopyFrom(const BundleEntryProto & from)734 void BundleEntryProto::CopyFrom(const BundleEntryProto& from) {
735 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.BundleEntryProto)
736 if (&from == this) return;
737 Clear();
738 MergeFrom(from);
739 }
740
IsInitialized() const741 bool BundleEntryProto::IsInitialized() const {
742 return true;
743 }
744
InternalSwap(BundleEntryProto * other)745 void BundleEntryProto::InternalSwap(BundleEntryProto* other) {
746 using std::swap;
747 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
748 _impl_.slices_.InternalSwap(&other->_impl_.slices_);
749 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
750 PROTOBUF_FIELD_OFFSET(BundleEntryProto, _impl_.crc32c_)
751 + sizeof(BundleEntryProto::_impl_.crc32c_) // NOLINT
752 - PROTOBUF_FIELD_OFFSET(BundleEntryProto, _impl_.shape_)>(
753 reinterpret_cast<char*>(&_impl_.shape_),
754 reinterpret_cast<char*>(&other->_impl_.shape_));
755 }
756
GetTypeName() const757 std::string BundleEntryProto::GetTypeName() const {
758 return "tensorflow.BundleEntryProto";
759 }
760
761
762 // @@protoc_insertion_point(namespace_scope)
763 } // namespace tensorflow
764 PROTOBUF_NAMESPACE_OPEN
765 template<> PROTOBUF_NOINLINE ::tensorflow::BundleHeaderProto*
CreateMaybeMessage(Arena * arena)766 Arena::CreateMaybeMessage< ::tensorflow::BundleHeaderProto >(Arena* arena) {
767 return Arena::CreateMessageInternal< ::tensorflow::BundleHeaderProto >(arena);
768 }
769 template<> PROTOBUF_NOINLINE ::tensorflow::BundleEntryProto*
CreateMaybeMessage(Arena * arena)770 Arena::CreateMaybeMessage< ::tensorflow::BundleEntryProto >(Arena* arena) {
771 return Arena::CreateMessageInternal< ::tensorflow::BundleEntryProto >(arena);
772 }
773 PROTOBUF_NAMESPACE_CLOSE
774
775 // @@protoc_insertion_point(global_scope)
776 #include <google/protobuf/port_undef.inc>
777