1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/model.proto
3
4 #include "tensorflow/core/framework/model.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
22 namespace data {
23 namespace model {
ModelProto_Node_Parameter(::_pbi::ConstantInitialized)24 PROTOBUF_CONSTEXPR ModelProto_Node_Parameter::ModelProto_Node_Parameter(
25 ::_pbi::ConstantInitialized): _impl_{
26 /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
27 , /*decltype(_impl_.value_)*/0
28 , /*decltype(_impl_.state_value_)*/0
29 , /*decltype(_impl_.min_)*/0
30 , /*decltype(_impl_.max_)*/0
31 , /*decltype(_impl_.tunable_)*/false
32 , /*decltype(_impl_._cached_size_)*/{}} {}
33 struct ModelProto_Node_ParameterDefaultTypeInternal {
ModelProto_Node_ParameterDefaultTypeInternaltensorflow::data::model::ModelProto_Node_ParameterDefaultTypeInternal34 PROTOBUF_CONSTEXPR ModelProto_Node_ParameterDefaultTypeInternal()
35 : _instance(::_pbi::ConstantInitialized{}) {}
~ModelProto_Node_ParameterDefaultTypeInternaltensorflow::data::model::ModelProto_Node_ParameterDefaultTypeInternal36 ~ModelProto_Node_ParameterDefaultTypeInternal() {}
37 union { // NOLINT(misc-non-private-member-variables-in-classes)
38 ModelProto_Node_Parameter _instance;
39 };
40 };
41 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ModelProto_Node_ParameterDefaultTypeInternal _ModelProto_Node_Parameter_default_instance_;
ModelProto_Node(::_pbi::ConstantInitialized)42 PROTOBUF_CONSTEXPR ModelProto_Node::ModelProto_Node(
43 ::_pbi::ConstantInitialized): _impl_{
44 /*decltype(_impl_.parameters_)*/{}
45 , /*decltype(_impl_.inputs_)*/{}
46 , /*decltype(_impl_._inputs_cached_byte_size_)*/{0}
47 , /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
48 , /*decltype(_impl_.id_)*/::int64_t{0}
49 , /*decltype(_impl_.buffered_bytes_)*/::int64_t{0}
50 , /*decltype(_impl_.buffered_elements_)*/::int64_t{0}
51 , /*decltype(_impl_.bytes_consumed_)*/::int64_t{0}
52 , /*decltype(_impl_.bytes_produced_)*/::int64_t{0}
53 , /*decltype(_impl_.num_elements_)*/::int64_t{0}
54 , /*decltype(_impl_.processing_time_)*/::int64_t{0}
55 , /*decltype(_impl_.autotune_)*/false
56 , /*decltype(_impl_.record_metrics_)*/false
57 , /*decltype(_impl_.node_class_)*/0
58 , /*decltype(_impl_.input_processing_time_sum_)*/0
59 , /*decltype(_impl_.input_processing_time_count_)*/::int64_t{0}
60 , /*decltype(_impl_.ratio_)*/0
61 , /*decltype(_impl_.memory_ratio_)*/0
62 , /*decltype(_impl_._cached_size_)*/{}} {}
63 struct ModelProto_NodeDefaultTypeInternal {
ModelProto_NodeDefaultTypeInternaltensorflow::data::model::ModelProto_NodeDefaultTypeInternal64 PROTOBUF_CONSTEXPR ModelProto_NodeDefaultTypeInternal()
65 : _instance(::_pbi::ConstantInitialized{}) {}
~ModelProto_NodeDefaultTypeInternaltensorflow::data::model::ModelProto_NodeDefaultTypeInternal66 ~ModelProto_NodeDefaultTypeInternal() {}
67 union { // NOLINT(misc-non-private-member-variables-in-classes)
68 ModelProto_Node _instance;
69 };
70 };
71 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ModelProto_NodeDefaultTypeInternal _ModelProto_Node_default_instance_;
ModelProto_NodesEntry_DoNotUse(::_pbi::ConstantInitialized)72 PROTOBUF_CONSTEXPR ModelProto_NodesEntry_DoNotUse::ModelProto_NodesEntry_DoNotUse(
73 ::_pbi::ConstantInitialized) {}
74 struct ModelProto_NodesEntry_DoNotUseDefaultTypeInternal {
ModelProto_NodesEntry_DoNotUseDefaultTypeInternaltensorflow::data::model::ModelProto_NodesEntry_DoNotUseDefaultTypeInternal75 PROTOBUF_CONSTEXPR ModelProto_NodesEntry_DoNotUseDefaultTypeInternal()
76 : _instance(::_pbi::ConstantInitialized{}) {}
~ModelProto_NodesEntry_DoNotUseDefaultTypeInternaltensorflow::data::model::ModelProto_NodesEntry_DoNotUseDefaultTypeInternal77 ~ModelProto_NodesEntry_DoNotUseDefaultTypeInternal() {}
78 union { // NOLINT(misc-non-private-member-variables-in-classes)
79 ModelProto_NodesEntry_DoNotUse _instance;
80 };
81 };
82 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ModelProto_NodesEntry_DoNotUseDefaultTypeInternal _ModelProto_NodesEntry_DoNotUse_default_instance_;
ModelProto_OptimizationParams(::_pbi::ConstantInitialized)83 PROTOBUF_CONSTEXPR ModelProto_OptimizationParams::ModelProto_OptimizationParams(
84 ::_pbi::ConstantInitialized): _impl_{
85 /*decltype(_impl_.cpu_budget_)*/::int64_t{0}
86 , /*decltype(_impl_.ram_budget_)*/::int64_t{0}
87 , /*decltype(_impl_.model_input_time_)*/0
88 , /*decltype(_impl_.algorithm_)*/0
89 , /*decltype(_impl_._cached_size_)*/{}} {}
90 struct ModelProto_OptimizationParamsDefaultTypeInternal {
ModelProto_OptimizationParamsDefaultTypeInternaltensorflow::data::model::ModelProto_OptimizationParamsDefaultTypeInternal91 PROTOBUF_CONSTEXPR ModelProto_OptimizationParamsDefaultTypeInternal()
92 : _instance(::_pbi::ConstantInitialized{}) {}
~ModelProto_OptimizationParamsDefaultTypeInternaltensorflow::data::model::ModelProto_OptimizationParamsDefaultTypeInternal93 ~ModelProto_OptimizationParamsDefaultTypeInternal() {}
94 union { // NOLINT(misc-non-private-member-variables-in-classes)
95 ModelProto_OptimizationParams _instance;
96 };
97 };
98 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ModelProto_OptimizationParamsDefaultTypeInternal _ModelProto_OptimizationParams_default_instance_;
ModelProto(::_pbi::ConstantInitialized)99 PROTOBUF_CONSTEXPR ModelProto::ModelProto(
100 ::_pbi::ConstantInitialized): _impl_{
101 /*decltype(_impl_.nodes_)*/{}
102 , /*decltype(_impl_.optimization_params_)*/nullptr
103 , /*decltype(_impl_.output_)*/::int64_t{0}
104 , /*decltype(_impl_.id_counter_)*/::int64_t{0}
105 , /*decltype(_impl_._cached_size_)*/{}} {}
106 struct ModelProtoDefaultTypeInternal {
ModelProtoDefaultTypeInternaltensorflow::data::model::ModelProtoDefaultTypeInternal107 PROTOBUF_CONSTEXPR ModelProtoDefaultTypeInternal()
108 : _instance(::_pbi::ConstantInitialized{}) {}
~ModelProtoDefaultTypeInternaltensorflow::data::model::ModelProtoDefaultTypeInternal109 ~ModelProtoDefaultTypeInternal() {}
110 union { // NOLINT(misc-non-private-member-variables-in-classes)
111 ModelProto _instance;
112 };
113 };
114 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ModelProtoDefaultTypeInternal _ModelProto_default_instance_;
115 } // namespace model
116 } // namespace data
117 } // namespace tensorflow
118 namespace tensorflow {
119 namespace data {
120 namespace model {
NodeClass_IsValid(int value)121 bool NodeClass_IsValid(int value) {
122 switch (value) {
123 case 0:
124 case 1:
125 case 2:
126 case 3:
127 case 4:
128 case 5:
129 case 6:
130 return true;
131 default:
132 return false;
133 }
134 }
135
136 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> NodeClass_strings[7] = {};
137
138 static const char NodeClass_names[] =
139 "ASYNC_INTERLEAVE_MANY"
140 "ASYNC_KNOWN_RATIO"
141 "ASYNC_UNKNOWN_RATIO"
142 "INTERLEAVE_MANY"
143 "KNOWN_RATIO"
144 "UNKNOWN"
145 "UNKNOWN_RATIO";
146
147 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry NodeClass_entries[] = {
148 { {NodeClass_names + 0, 21}, 2 },
149 { {NodeClass_names + 21, 17}, 4 },
150 { {NodeClass_names + 38, 19}, 6 },
151 { {NodeClass_names + 57, 15}, 1 },
152 { {NodeClass_names + 72, 11}, 3 },
153 { {NodeClass_names + 83, 7}, 0 },
154 { {NodeClass_names + 90, 13}, 5 },
155 };
156
157 static const int NodeClass_entries_by_number[] = {
158 5, // 0 -> UNKNOWN
159 3, // 1 -> INTERLEAVE_MANY
160 0, // 2 -> ASYNC_INTERLEAVE_MANY
161 4, // 3 -> KNOWN_RATIO
162 1, // 4 -> ASYNC_KNOWN_RATIO
163 6, // 5 -> UNKNOWN_RATIO
164 2, // 6 -> ASYNC_UNKNOWN_RATIO
165 };
166
NodeClass_Name(NodeClass value)167 const std::string& NodeClass_Name(
168 NodeClass value) {
169 static const bool dummy =
170 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
171 NodeClass_entries,
172 NodeClass_entries_by_number,
173 7, NodeClass_strings);
174 (void) dummy;
175 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
176 NodeClass_entries,
177 NodeClass_entries_by_number,
178 7, value);
179 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
180 NodeClass_strings[idx].get();
181 }
NodeClass_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,NodeClass * value)182 bool NodeClass_Parse(
183 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, NodeClass* value) {
184 int int_value;
185 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
186 NodeClass_entries, 7, name, &int_value);
187 if (success) {
188 *value = static_cast<NodeClass>(int_value);
189 }
190 return success;
191 }
AutotuneAlgorithm_IsValid(int value)192 bool AutotuneAlgorithm_IsValid(int value) {
193 switch (value) {
194 case 0:
195 case 1:
196 case 2:
197 case 3:
198 case 4:
199 return true;
200 default:
201 return false;
202 }
203 }
204
205 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> AutotuneAlgorithm_strings[5] = {};
206
207 static const char AutotuneAlgorithm_names[] =
208 "DEFAULT"
209 "GRADIENT_DESCENT"
210 "HILL_CLIMB"
211 "MAX_PARALLELISM"
212 "STAGE_BASED";
213
214 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry AutotuneAlgorithm_entries[] = {
215 { {AutotuneAlgorithm_names + 0, 7}, 0 },
216 { {AutotuneAlgorithm_names + 7, 16}, 2 },
217 { {AutotuneAlgorithm_names + 23, 10}, 1 },
218 { {AutotuneAlgorithm_names + 33, 15}, 3 },
219 { {AutotuneAlgorithm_names + 48, 11}, 4 },
220 };
221
222 static const int AutotuneAlgorithm_entries_by_number[] = {
223 0, // 0 -> DEFAULT
224 2, // 1 -> HILL_CLIMB
225 1, // 2 -> GRADIENT_DESCENT
226 3, // 3 -> MAX_PARALLELISM
227 4, // 4 -> STAGE_BASED
228 };
229
AutotuneAlgorithm_Name(AutotuneAlgorithm value)230 const std::string& AutotuneAlgorithm_Name(
231 AutotuneAlgorithm value) {
232 static const bool dummy =
233 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
234 AutotuneAlgorithm_entries,
235 AutotuneAlgorithm_entries_by_number,
236 5, AutotuneAlgorithm_strings);
237 (void) dummy;
238 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
239 AutotuneAlgorithm_entries,
240 AutotuneAlgorithm_entries_by_number,
241 5, value);
242 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
243 AutotuneAlgorithm_strings[idx].get();
244 }
AutotuneAlgorithm_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,AutotuneAlgorithm * value)245 bool AutotuneAlgorithm_Parse(
246 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, AutotuneAlgorithm* value) {
247 int int_value;
248 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
249 AutotuneAlgorithm_entries, 5, name, &int_value);
250 if (success) {
251 *value = static_cast<AutotuneAlgorithm>(int_value);
252 }
253 return success;
254 }
255
256 // ===================================================================
257
258 class ModelProto_Node_Parameter::_Internal {
259 public:
260 };
261
ModelProto_Node_Parameter(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)262 ModelProto_Node_Parameter::ModelProto_Node_Parameter(::PROTOBUF_NAMESPACE_ID::Arena* arena,
263 bool is_message_owned)
264 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
265 SharedCtor(arena, is_message_owned);
266 // @@protoc_insertion_point(arena_constructor:tensorflow.data.model.ModelProto.Node.Parameter)
267 }
ModelProto_Node_Parameter(const ModelProto_Node_Parameter & from)268 ModelProto_Node_Parameter::ModelProto_Node_Parameter(const ModelProto_Node_Parameter& from)
269 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
270 ModelProto_Node_Parameter* const _this = this; (void)_this;
271 new (&_impl_) Impl_{
272 decltype(_impl_.name_){}
273 , decltype(_impl_.value_){}
274 , decltype(_impl_.state_value_){}
275 , decltype(_impl_.min_){}
276 , decltype(_impl_.max_){}
277 , decltype(_impl_.tunable_){}
278 , /*decltype(_impl_._cached_size_)*/{}};
279
280 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
281 _impl_.name_.InitDefault();
282 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
283 _impl_.name_.Set("", GetArenaForAllocation());
284 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
285 if (!from._internal_name().empty()) {
286 _this->_impl_.name_.Set(from._internal_name(),
287 _this->GetArenaForAllocation());
288 }
289 ::memcpy(&_impl_.value_, &from._impl_.value_,
290 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.tunable_) -
291 reinterpret_cast<char*>(&_impl_.value_)) + sizeof(_impl_.tunable_));
292 // @@protoc_insertion_point(copy_constructor:tensorflow.data.model.ModelProto.Node.Parameter)
293 }
294
SharedCtor(::_pb::Arena * arena,bool is_message_owned)295 inline void ModelProto_Node_Parameter::SharedCtor(
296 ::_pb::Arena* arena, bool is_message_owned) {
297 (void)arena;
298 (void)is_message_owned;
299 new (&_impl_) Impl_{
300 decltype(_impl_.name_){}
301 , decltype(_impl_.value_){0}
302 , decltype(_impl_.state_value_){0}
303 , decltype(_impl_.min_){0}
304 , decltype(_impl_.max_){0}
305 , decltype(_impl_.tunable_){false}
306 , /*decltype(_impl_._cached_size_)*/{}
307 };
308 _impl_.name_.InitDefault();
309 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
310 _impl_.name_.Set("", GetArenaForAllocation());
311 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
312 }
313
~ModelProto_Node_Parameter()314 ModelProto_Node_Parameter::~ModelProto_Node_Parameter() {
315 // @@protoc_insertion_point(destructor:tensorflow.data.model.ModelProto.Node.Parameter)
316 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
317 (void)arena;
318 return;
319 }
320 SharedDtor();
321 }
322
SharedDtor()323 inline void ModelProto_Node_Parameter::SharedDtor() {
324 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
325 _impl_.name_.Destroy();
326 }
327
SetCachedSize(int size) const328 void ModelProto_Node_Parameter::SetCachedSize(int size) const {
329 _impl_._cached_size_.Set(size);
330 }
331
Clear()332 void ModelProto_Node_Parameter::Clear() {
333 // @@protoc_insertion_point(message_clear_start:tensorflow.data.model.ModelProto.Node.Parameter)
334 ::uint32_t cached_has_bits = 0;
335 // Prevent compiler warnings about cached_has_bits being unused
336 (void) cached_has_bits;
337
338 _impl_.name_.ClearToEmpty();
339 ::memset(&_impl_.value_, 0, static_cast<size_t>(
340 reinterpret_cast<char*>(&_impl_.tunable_) -
341 reinterpret_cast<char*>(&_impl_.value_)) + sizeof(_impl_.tunable_));
342 _internal_metadata_.Clear<std::string>();
343 }
344
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)345 const char* ModelProto_Node_Parameter::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
346 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
347 while (!ctx->Done(&ptr)) {
348 ::uint32_t tag;
349 ptr = ::_pbi::ReadTag(ptr, &tag);
350 switch (tag >> 3) {
351 // string name = 1;
352 case 1:
353 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
354 auto str = _internal_mutable_name();
355 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
356 CHK_(ptr);
357 CHK_(::_pbi::VerifyUTF8(str, nullptr));
358 } else {
359 goto handle_unusual;
360 }
361 continue;
362 // double value = 2;
363 case 2:
364 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) {
365 _impl_.value_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
366 ptr += sizeof(double);
367 } else {
368 goto handle_unusual;
369 }
370 continue;
371 // double state_value = 3;
372 case 3:
373 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 25)) {
374 _impl_.state_value_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
375 ptr += sizeof(double);
376 } else {
377 goto handle_unusual;
378 }
379 continue;
380 // double min = 4;
381 case 4:
382 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) {
383 _impl_.min_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
384 ptr += sizeof(double);
385 } else {
386 goto handle_unusual;
387 }
388 continue;
389 // double max = 5;
390 case 5:
391 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 41)) {
392 _impl_.max_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
393 ptr += sizeof(double);
394 } else {
395 goto handle_unusual;
396 }
397 continue;
398 // bool tunable = 6;
399 case 6:
400 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
401 _impl_.tunable_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
402 CHK_(ptr);
403 } else {
404 goto handle_unusual;
405 }
406 continue;
407 default:
408 goto handle_unusual;
409 } // switch
410 handle_unusual:
411 if ((tag == 0) || ((tag & 7) == 4)) {
412 CHK_(ptr);
413 ctx->SetLastTag(tag);
414 goto message_done;
415 }
416 ptr = UnknownFieldParse(
417 tag,
418 _internal_metadata_.mutable_unknown_fields<std::string>(),
419 ptr, ctx);
420 CHK_(ptr != nullptr);
421 } // while
422 message_done:
423 return ptr;
424 failure:
425 ptr = nullptr;
426 goto message_done;
427 #undef CHK_
428 }
429
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const430 ::uint8_t* ModelProto_Node_Parameter::_InternalSerialize(
431 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
432 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.model.ModelProto.Node.Parameter)
433 ::uint32_t cached_has_bits = 0;
434 (void) cached_has_bits;
435
436 // string name = 1;
437 if (!this->_internal_name().empty()) {
438 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
439 this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
440 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
441 "tensorflow.data.model.ModelProto.Node.Parameter.name");
442 target = stream->WriteStringMaybeAliased(
443 1, this->_internal_name(), target);
444 }
445
446 // double value = 2;
447 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
448 double tmp_value = this->_internal_value();
449 ::uint64_t raw_value;
450 memcpy(&raw_value, &tmp_value, sizeof(tmp_value));
451 if (raw_value != 0) {
452 target = stream->EnsureSpace(target);
453 target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_value(), target);
454 }
455
456 // double state_value = 3;
457 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
458 double tmp_state_value = this->_internal_state_value();
459 ::uint64_t raw_state_value;
460 memcpy(&raw_state_value, &tmp_state_value, sizeof(tmp_state_value));
461 if (raw_state_value != 0) {
462 target = stream->EnsureSpace(target);
463 target = ::_pbi::WireFormatLite::WriteDoubleToArray(3, this->_internal_state_value(), target);
464 }
465
466 // double min = 4;
467 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
468 double tmp_min = this->_internal_min();
469 ::uint64_t raw_min;
470 memcpy(&raw_min, &tmp_min, sizeof(tmp_min));
471 if (raw_min != 0) {
472 target = stream->EnsureSpace(target);
473 target = ::_pbi::WireFormatLite::WriteDoubleToArray(4, this->_internal_min(), target);
474 }
475
476 // double max = 5;
477 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
478 double tmp_max = this->_internal_max();
479 ::uint64_t raw_max;
480 memcpy(&raw_max, &tmp_max, sizeof(tmp_max));
481 if (raw_max != 0) {
482 target = stream->EnsureSpace(target);
483 target = ::_pbi::WireFormatLite::WriteDoubleToArray(5, this->_internal_max(), target);
484 }
485
486 // bool tunable = 6;
487 if (this->_internal_tunable() != 0) {
488 target = stream->EnsureSpace(target);
489 target = ::_pbi::WireFormatLite::WriteBoolToArray(6, this->_internal_tunable(), target);
490 }
491
492 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
493 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
494 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
495 }
496 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.model.ModelProto.Node.Parameter)
497 return target;
498 }
499
ByteSizeLong() const500 size_t ModelProto_Node_Parameter::ByteSizeLong() const {
501 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.model.ModelProto.Node.Parameter)
502 size_t total_size = 0;
503
504 ::uint32_t cached_has_bits = 0;
505 // Prevent compiler warnings about cached_has_bits being unused
506 (void) cached_has_bits;
507
508 // string name = 1;
509 if (!this->_internal_name().empty()) {
510 total_size += 1 +
511 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
512 this->_internal_name());
513 }
514
515 // double value = 2;
516 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
517 double tmp_value = this->_internal_value();
518 ::uint64_t raw_value;
519 memcpy(&raw_value, &tmp_value, sizeof(tmp_value));
520 if (raw_value != 0) {
521 total_size += 1 + 8;
522 }
523
524 // double state_value = 3;
525 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
526 double tmp_state_value = this->_internal_state_value();
527 ::uint64_t raw_state_value;
528 memcpy(&raw_state_value, &tmp_state_value, sizeof(tmp_state_value));
529 if (raw_state_value != 0) {
530 total_size += 1 + 8;
531 }
532
533 // double min = 4;
534 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
535 double tmp_min = this->_internal_min();
536 ::uint64_t raw_min;
537 memcpy(&raw_min, &tmp_min, sizeof(tmp_min));
538 if (raw_min != 0) {
539 total_size += 1 + 8;
540 }
541
542 // double max = 5;
543 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
544 double tmp_max = this->_internal_max();
545 ::uint64_t raw_max;
546 memcpy(&raw_max, &tmp_max, sizeof(tmp_max));
547 if (raw_max != 0) {
548 total_size += 1 + 8;
549 }
550
551 // bool tunable = 6;
552 if (this->_internal_tunable() != 0) {
553 total_size += 1 + 1;
554 }
555
556 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
557 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
558 }
559 int cached_size = ::_pbi::ToCachedSize(total_size);
560 SetCachedSize(cached_size);
561 return total_size;
562 }
563
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)564 void ModelProto_Node_Parameter::CheckTypeAndMergeFrom(
565 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
566 MergeFrom(*::_pbi::DownCast<const ModelProto_Node_Parameter*>(
567 &from));
568 }
569
MergeFrom(const ModelProto_Node_Parameter & from)570 void ModelProto_Node_Parameter::MergeFrom(const ModelProto_Node_Parameter& from) {
571 ModelProto_Node_Parameter* const _this = this;
572 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.model.ModelProto.Node.Parameter)
573 GOOGLE_DCHECK_NE(&from, _this);
574 ::uint32_t cached_has_bits = 0;
575 (void) cached_has_bits;
576
577 if (!from._internal_name().empty()) {
578 _this->_internal_set_name(from._internal_name());
579 }
580 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
581 double tmp_value = from._internal_value();
582 ::uint64_t raw_value;
583 memcpy(&raw_value, &tmp_value, sizeof(tmp_value));
584 if (raw_value != 0) {
585 _this->_internal_set_value(from._internal_value());
586 }
587 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
588 double tmp_state_value = from._internal_state_value();
589 ::uint64_t raw_state_value;
590 memcpy(&raw_state_value, &tmp_state_value, sizeof(tmp_state_value));
591 if (raw_state_value != 0) {
592 _this->_internal_set_state_value(from._internal_state_value());
593 }
594 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
595 double tmp_min = from._internal_min();
596 ::uint64_t raw_min;
597 memcpy(&raw_min, &tmp_min, sizeof(tmp_min));
598 if (raw_min != 0) {
599 _this->_internal_set_min(from._internal_min());
600 }
601 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
602 double tmp_max = from._internal_max();
603 ::uint64_t raw_max;
604 memcpy(&raw_max, &tmp_max, sizeof(tmp_max));
605 if (raw_max != 0) {
606 _this->_internal_set_max(from._internal_max());
607 }
608 if (from._internal_tunable() != 0) {
609 _this->_internal_set_tunable(from._internal_tunable());
610 }
611 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
612 }
613
CopyFrom(const ModelProto_Node_Parameter & from)614 void ModelProto_Node_Parameter::CopyFrom(const ModelProto_Node_Parameter& from) {
615 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.model.ModelProto.Node.Parameter)
616 if (&from == this) return;
617 Clear();
618 MergeFrom(from);
619 }
620
IsInitialized() const621 bool ModelProto_Node_Parameter::IsInitialized() const {
622 return true;
623 }
624
InternalSwap(ModelProto_Node_Parameter * other)625 void ModelProto_Node_Parameter::InternalSwap(ModelProto_Node_Parameter* other) {
626 using std::swap;
627 auto* lhs_arena = GetArenaForAllocation();
628 auto* rhs_arena = other->GetArenaForAllocation();
629 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
630 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
631 &_impl_.name_, lhs_arena,
632 &other->_impl_.name_, rhs_arena
633 );
634 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
635 PROTOBUF_FIELD_OFFSET(ModelProto_Node_Parameter, _impl_.tunable_)
636 + sizeof(ModelProto_Node_Parameter::_impl_.tunable_) // NOLINT
637 - PROTOBUF_FIELD_OFFSET(ModelProto_Node_Parameter, _impl_.value_)>(
638 reinterpret_cast<char*>(&_impl_.value_),
639 reinterpret_cast<char*>(&other->_impl_.value_));
640 }
641
GetTypeName() const642 std::string ModelProto_Node_Parameter::GetTypeName() const {
643 return "tensorflow.data.model.ModelProto.Node.Parameter";
644 }
645
646
647 // ===================================================================
648
649 class ModelProto_Node::_Internal {
650 public:
651 };
652
ModelProto_Node(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)653 ModelProto_Node::ModelProto_Node(::PROTOBUF_NAMESPACE_ID::Arena* arena,
654 bool is_message_owned)
655 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
656 SharedCtor(arena, is_message_owned);
657 // @@protoc_insertion_point(arena_constructor:tensorflow.data.model.ModelProto.Node)
658 }
ModelProto_Node(const ModelProto_Node & from)659 ModelProto_Node::ModelProto_Node(const ModelProto_Node& from)
660 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
661 ModelProto_Node* const _this = this; (void)_this;
662 new (&_impl_) Impl_{
663 decltype(_impl_.parameters_){from._impl_.parameters_}
664 , decltype(_impl_.inputs_){from._impl_.inputs_}
665 , /*decltype(_impl_._inputs_cached_byte_size_)*/{0}
666 , decltype(_impl_.name_){}
667 , decltype(_impl_.id_){}
668 , decltype(_impl_.buffered_bytes_){}
669 , decltype(_impl_.buffered_elements_){}
670 , decltype(_impl_.bytes_consumed_){}
671 , decltype(_impl_.bytes_produced_){}
672 , decltype(_impl_.num_elements_){}
673 , decltype(_impl_.processing_time_){}
674 , decltype(_impl_.autotune_){}
675 , decltype(_impl_.record_metrics_){}
676 , decltype(_impl_.node_class_){}
677 , decltype(_impl_.input_processing_time_sum_){}
678 , decltype(_impl_.input_processing_time_count_){}
679 , decltype(_impl_.ratio_){}
680 , decltype(_impl_.memory_ratio_){}
681 , /*decltype(_impl_._cached_size_)*/{}};
682
683 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
684 _impl_.name_.InitDefault();
685 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
686 _impl_.name_.Set("", GetArenaForAllocation());
687 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
688 if (!from._internal_name().empty()) {
689 _this->_impl_.name_.Set(from._internal_name(),
690 _this->GetArenaForAllocation());
691 }
692 ::memcpy(&_impl_.id_, &from._impl_.id_,
693 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.memory_ratio_) -
694 reinterpret_cast<char*>(&_impl_.id_)) + sizeof(_impl_.memory_ratio_));
695 // @@protoc_insertion_point(copy_constructor:tensorflow.data.model.ModelProto.Node)
696 }
697
SharedCtor(::_pb::Arena * arena,bool is_message_owned)698 inline void ModelProto_Node::SharedCtor(
699 ::_pb::Arena* arena, bool is_message_owned) {
700 (void)arena;
701 (void)is_message_owned;
702 new (&_impl_) Impl_{
703 decltype(_impl_.parameters_){arena}
704 , decltype(_impl_.inputs_){arena}
705 , /*decltype(_impl_._inputs_cached_byte_size_)*/{0}
706 , decltype(_impl_.name_){}
707 , decltype(_impl_.id_){::int64_t{0}}
708 , decltype(_impl_.buffered_bytes_){::int64_t{0}}
709 , decltype(_impl_.buffered_elements_){::int64_t{0}}
710 , decltype(_impl_.bytes_consumed_){::int64_t{0}}
711 , decltype(_impl_.bytes_produced_){::int64_t{0}}
712 , decltype(_impl_.num_elements_){::int64_t{0}}
713 , decltype(_impl_.processing_time_){::int64_t{0}}
714 , decltype(_impl_.autotune_){false}
715 , decltype(_impl_.record_metrics_){false}
716 , decltype(_impl_.node_class_){0}
717 , decltype(_impl_.input_processing_time_sum_){0}
718 , decltype(_impl_.input_processing_time_count_){::int64_t{0}}
719 , decltype(_impl_.ratio_){0}
720 , decltype(_impl_.memory_ratio_){0}
721 , /*decltype(_impl_._cached_size_)*/{}
722 };
723 _impl_.name_.InitDefault();
724 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
725 _impl_.name_.Set("", GetArenaForAllocation());
726 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
727 }
728
~ModelProto_Node()729 ModelProto_Node::~ModelProto_Node() {
730 // @@protoc_insertion_point(destructor:tensorflow.data.model.ModelProto.Node)
731 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
732 (void)arena;
733 return;
734 }
735 SharedDtor();
736 }
737
SharedDtor()738 inline void ModelProto_Node::SharedDtor() {
739 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
740 _impl_.parameters_.~RepeatedPtrField();
741 _impl_.inputs_.~RepeatedField();
742 _impl_.name_.Destroy();
743 }
744
SetCachedSize(int size) const745 void ModelProto_Node::SetCachedSize(int size) const {
746 _impl_._cached_size_.Set(size);
747 }
748
Clear()749 void ModelProto_Node::Clear() {
750 // @@protoc_insertion_point(message_clear_start:tensorflow.data.model.ModelProto.Node)
751 ::uint32_t cached_has_bits = 0;
752 // Prevent compiler warnings about cached_has_bits being unused
753 (void) cached_has_bits;
754
755 _impl_.parameters_.Clear();
756 _impl_.inputs_.Clear();
757 _impl_.name_.ClearToEmpty();
758 ::memset(&_impl_.id_, 0, static_cast<size_t>(
759 reinterpret_cast<char*>(&_impl_.memory_ratio_) -
760 reinterpret_cast<char*>(&_impl_.id_)) + sizeof(_impl_.memory_ratio_));
761 _internal_metadata_.Clear<std::string>();
762 }
763
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)764 const char* ModelProto_Node::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
765 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
766 while (!ctx->Done(&ptr)) {
767 ::uint32_t tag;
768 ptr = ::_pbi::ReadTag(ptr, &tag);
769 switch (tag >> 3) {
770 // int64 id = 1;
771 case 1:
772 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
773 _impl_.id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
774 CHK_(ptr);
775 } else {
776 goto handle_unusual;
777 }
778 continue;
779 // string name = 2;
780 case 2:
781 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
782 auto str = _internal_mutable_name();
783 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
784 CHK_(ptr);
785 CHK_(::_pbi::VerifyUTF8(str, nullptr));
786 } else {
787 goto handle_unusual;
788 }
789 continue;
790 // bool autotune = 3;
791 case 3:
792 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
793 _impl_.autotune_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
794 CHK_(ptr);
795 } else {
796 goto handle_unusual;
797 }
798 continue;
799 // int64 buffered_bytes = 4;
800 case 4:
801 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
802 _impl_.buffered_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
803 CHK_(ptr);
804 } else {
805 goto handle_unusual;
806 }
807 continue;
808 // int64 buffered_elements = 5;
809 case 5:
810 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
811 _impl_.buffered_elements_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
812 CHK_(ptr);
813 } else {
814 goto handle_unusual;
815 }
816 continue;
817 // int64 bytes_consumed = 6;
818 case 6:
819 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
820 _impl_.bytes_consumed_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
821 CHK_(ptr);
822 } else {
823 goto handle_unusual;
824 }
825 continue;
826 // int64 bytes_produced = 7;
827 case 7:
828 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
829 _impl_.bytes_produced_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
830 CHK_(ptr);
831 } else {
832 goto handle_unusual;
833 }
834 continue;
835 // int64 num_elements = 8;
836 case 8:
837 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
838 _impl_.num_elements_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
839 CHK_(ptr);
840 } else {
841 goto handle_unusual;
842 }
843 continue;
844 // int64 processing_time = 9;
845 case 9:
846 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
847 _impl_.processing_time_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
848 CHK_(ptr);
849 } else {
850 goto handle_unusual;
851 }
852 continue;
853 // bool record_metrics = 10;
854 case 10:
855 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) {
856 _impl_.record_metrics_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
857 CHK_(ptr);
858 } else {
859 goto handle_unusual;
860 }
861 continue;
862 // repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11;
863 case 11:
864 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) {
865 ptr -= 1;
866 do {
867 ptr += 1;
868 ptr = ctx->ParseMessage(_internal_add_parameters(), ptr);
869 CHK_(ptr);
870 if (!ctx->DataAvailable(ptr)) break;
871 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<90>(ptr));
872 } else {
873 goto handle_unusual;
874 }
875 continue;
876 // double input_processing_time_sum = 12;
877 case 12:
878 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 97)) {
879 _impl_.input_processing_time_sum_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
880 ptr += sizeof(double);
881 } else {
882 goto handle_unusual;
883 }
884 continue;
885 // int64 input_processing_time_count = 13;
886 case 13:
887 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 104)) {
888 _impl_.input_processing_time_count_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
889 CHK_(ptr);
890 } else {
891 goto handle_unusual;
892 }
893 continue;
894 // repeated int64 inputs = 14;
895 case 14:
896 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 114)) {
897 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_inputs(), ptr, ctx);
898 CHK_(ptr);
899 } else if (static_cast<::uint8_t>(tag) == 112) {
900 _internal_add_inputs(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
901 CHK_(ptr);
902 } else {
903 goto handle_unusual;
904 }
905 continue;
906 // .tensorflow.data.model.NodeClass node_class = 15;
907 case 15:
908 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
909 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
910 CHK_(ptr);
911 _internal_set_node_class(static_cast<::tensorflow::data::model::NodeClass>(val));
912 } else {
913 goto handle_unusual;
914 }
915 continue;
916 // double ratio = 16;
917 case 16:
918 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 129)) {
919 _impl_.ratio_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
920 ptr += sizeof(double);
921 } else {
922 goto handle_unusual;
923 }
924 continue;
925 // double memory_ratio = 17;
926 case 17:
927 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 137)) {
928 _impl_.memory_ratio_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
929 ptr += sizeof(double);
930 } else {
931 goto handle_unusual;
932 }
933 continue;
934 default:
935 goto handle_unusual;
936 } // switch
937 handle_unusual:
938 if ((tag == 0) || ((tag & 7) == 4)) {
939 CHK_(ptr);
940 ctx->SetLastTag(tag);
941 goto message_done;
942 }
943 ptr = UnknownFieldParse(
944 tag,
945 _internal_metadata_.mutable_unknown_fields<std::string>(),
946 ptr, ctx);
947 CHK_(ptr != nullptr);
948 } // while
949 message_done:
950 return ptr;
951 failure:
952 ptr = nullptr;
953 goto message_done;
954 #undef CHK_
955 }
956
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const957 ::uint8_t* ModelProto_Node::_InternalSerialize(
958 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
959 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.model.ModelProto.Node)
960 ::uint32_t cached_has_bits = 0;
961 (void) cached_has_bits;
962
963 // int64 id = 1;
964 if (this->_internal_id() != 0) {
965 target = stream->EnsureSpace(target);
966 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_id(), target);
967 }
968
969 // string name = 2;
970 if (!this->_internal_name().empty()) {
971 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
972 this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
973 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
974 "tensorflow.data.model.ModelProto.Node.name");
975 target = stream->WriteStringMaybeAliased(
976 2, this->_internal_name(), target);
977 }
978
979 // bool autotune = 3;
980 if (this->_internal_autotune() != 0) {
981 target = stream->EnsureSpace(target);
982 target = ::_pbi::WireFormatLite::WriteBoolToArray(3, this->_internal_autotune(), target);
983 }
984
985 // int64 buffered_bytes = 4;
986 if (this->_internal_buffered_bytes() != 0) {
987 target = stream->EnsureSpace(target);
988 target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_buffered_bytes(), target);
989 }
990
991 // int64 buffered_elements = 5;
992 if (this->_internal_buffered_elements() != 0) {
993 target = stream->EnsureSpace(target);
994 target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_buffered_elements(), target);
995 }
996
997 // int64 bytes_consumed = 6;
998 if (this->_internal_bytes_consumed() != 0) {
999 target = stream->EnsureSpace(target);
1000 target = ::_pbi::WireFormatLite::WriteInt64ToArray(6, this->_internal_bytes_consumed(), target);
1001 }
1002
1003 // int64 bytes_produced = 7;
1004 if (this->_internal_bytes_produced() != 0) {
1005 target = stream->EnsureSpace(target);
1006 target = ::_pbi::WireFormatLite::WriteInt64ToArray(7, this->_internal_bytes_produced(), target);
1007 }
1008
1009 // int64 num_elements = 8;
1010 if (this->_internal_num_elements() != 0) {
1011 target = stream->EnsureSpace(target);
1012 target = ::_pbi::WireFormatLite::WriteInt64ToArray(8, this->_internal_num_elements(), target);
1013 }
1014
1015 // int64 processing_time = 9;
1016 if (this->_internal_processing_time() != 0) {
1017 target = stream->EnsureSpace(target);
1018 target = ::_pbi::WireFormatLite::WriteInt64ToArray(9, this->_internal_processing_time(), target);
1019 }
1020
1021 // bool record_metrics = 10;
1022 if (this->_internal_record_metrics() != 0) {
1023 target = stream->EnsureSpace(target);
1024 target = ::_pbi::WireFormatLite::WriteBoolToArray(10, this->_internal_record_metrics(), target);
1025 }
1026
1027 // repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11;
1028 for (unsigned i = 0,
1029 n = static_cast<unsigned>(this->_internal_parameters_size()); i < n; i++) {
1030 const auto& repfield = this->_internal_parameters(i);
1031 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1032 InternalWriteMessage(11, repfield, repfield.GetCachedSize(), target, stream);
1033 }
1034
1035 // double input_processing_time_sum = 12;
1036 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1037 double tmp_input_processing_time_sum = this->_internal_input_processing_time_sum();
1038 ::uint64_t raw_input_processing_time_sum;
1039 memcpy(&raw_input_processing_time_sum, &tmp_input_processing_time_sum, sizeof(tmp_input_processing_time_sum));
1040 if (raw_input_processing_time_sum != 0) {
1041 target = stream->EnsureSpace(target);
1042 target = ::_pbi::WireFormatLite::WriteDoubleToArray(12, this->_internal_input_processing_time_sum(), target);
1043 }
1044
1045 // int64 input_processing_time_count = 13;
1046 if (this->_internal_input_processing_time_count() != 0) {
1047 target = stream->EnsureSpace(target);
1048 target = ::_pbi::WireFormatLite::WriteInt64ToArray(13, this->_internal_input_processing_time_count(), target);
1049 }
1050
1051 // repeated int64 inputs = 14;
1052 {
1053 int byte_size = _impl_._inputs_cached_byte_size_.load(std::memory_order_relaxed);
1054 if (byte_size > 0) {
1055 target = stream->WriteInt64Packed(
1056 14, _internal_inputs(), byte_size, target);
1057 }
1058 }
1059
1060 // .tensorflow.data.model.NodeClass node_class = 15;
1061 if (this->_internal_node_class() != 0) {
1062 target = stream->EnsureSpace(target);
1063 target = ::_pbi::WireFormatLite::WriteEnumToArray(
1064 15, this->_internal_node_class(), target);
1065 }
1066
1067 // double ratio = 16;
1068 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1069 double tmp_ratio = this->_internal_ratio();
1070 ::uint64_t raw_ratio;
1071 memcpy(&raw_ratio, &tmp_ratio, sizeof(tmp_ratio));
1072 if (raw_ratio != 0) {
1073 target = stream->EnsureSpace(target);
1074 target = ::_pbi::WireFormatLite::WriteDoubleToArray(16, this->_internal_ratio(), target);
1075 }
1076
1077 // double memory_ratio = 17;
1078 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1079 double tmp_memory_ratio = this->_internal_memory_ratio();
1080 ::uint64_t raw_memory_ratio;
1081 memcpy(&raw_memory_ratio, &tmp_memory_ratio, sizeof(tmp_memory_ratio));
1082 if (raw_memory_ratio != 0) {
1083 target = stream->EnsureSpace(target);
1084 target = ::_pbi::WireFormatLite::WriteDoubleToArray(17, this->_internal_memory_ratio(), target);
1085 }
1086
1087 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1088 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1089 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1090 }
1091 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.model.ModelProto.Node)
1092 return target;
1093 }
1094
ByteSizeLong() const1095 size_t ModelProto_Node::ByteSizeLong() const {
1096 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.model.ModelProto.Node)
1097 size_t total_size = 0;
1098
1099 ::uint32_t cached_has_bits = 0;
1100 // Prevent compiler warnings about cached_has_bits being unused
1101 (void) cached_has_bits;
1102
1103 // repeated .tensorflow.data.model.ModelProto.Node.Parameter parameters = 11;
1104 total_size += 1UL * this->_internal_parameters_size();
1105 for (const auto& msg : this->_impl_.parameters_) {
1106 total_size +=
1107 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1108 }
1109
1110 // repeated int64 inputs = 14;
1111 {
1112 size_t data_size = ::_pbi::WireFormatLite::
1113 Int64Size(this->_impl_.inputs_);
1114 if (data_size > 0) {
1115 total_size += 1 +
1116 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
1117 }
1118 int cached_size = ::_pbi::ToCachedSize(data_size);
1119 _impl_._inputs_cached_byte_size_.store(cached_size,
1120 std::memory_order_relaxed);
1121 total_size += data_size;
1122 }
1123
1124 // string name = 2;
1125 if (!this->_internal_name().empty()) {
1126 total_size += 1 +
1127 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1128 this->_internal_name());
1129 }
1130
1131 // int64 id = 1;
1132 if (this->_internal_id() != 0) {
1133 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_id());
1134 }
1135
1136 // int64 buffered_bytes = 4;
1137 if (this->_internal_buffered_bytes() != 0) {
1138 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_buffered_bytes());
1139 }
1140
1141 // int64 buffered_elements = 5;
1142 if (this->_internal_buffered_elements() != 0) {
1143 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_buffered_elements());
1144 }
1145
1146 // int64 bytes_consumed = 6;
1147 if (this->_internal_bytes_consumed() != 0) {
1148 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_bytes_consumed());
1149 }
1150
1151 // int64 bytes_produced = 7;
1152 if (this->_internal_bytes_produced() != 0) {
1153 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_bytes_produced());
1154 }
1155
1156 // int64 num_elements = 8;
1157 if (this->_internal_num_elements() != 0) {
1158 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_num_elements());
1159 }
1160
1161 // int64 processing_time = 9;
1162 if (this->_internal_processing_time() != 0) {
1163 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_processing_time());
1164 }
1165
1166 // bool autotune = 3;
1167 if (this->_internal_autotune() != 0) {
1168 total_size += 1 + 1;
1169 }
1170
1171 // bool record_metrics = 10;
1172 if (this->_internal_record_metrics() != 0) {
1173 total_size += 1 + 1;
1174 }
1175
1176 // .tensorflow.data.model.NodeClass node_class = 15;
1177 if (this->_internal_node_class() != 0) {
1178 total_size += 1 +
1179 ::_pbi::WireFormatLite::EnumSize(this->_internal_node_class());
1180 }
1181
1182 // double input_processing_time_sum = 12;
1183 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1184 double tmp_input_processing_time_sum = this->_internal_input_processing_time_sum();
1185 ::uint64_t raw_input_processing_time_sum;
1186 memcpy(&raw_input_processing_time_sum, &tmp_input_processing_time_sum, sizeof(tmp_input_processing_time_sum));
1187 if (raw_input_processing_time_sum != 0) {
1188 total_size += 1 + 8;
1189 }
1190
1191 // int64 input_processing_time_count = 13;
1192 if (this->_internal_input_processing_time_count() != 0) {
1193 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_input_processing_time_count());
1194 }
1195
1196 // double ratio = 16;
1197 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1198 double tmp_ratio = this->_internal_ratio();
1199 ::uint64_t raw_ratio;
1200 memcpy(&raw_ratio, &tmp_ratio, sizeof(tmp_ratio));
1201 if (raw_ratio != 0) {
1202 total_size += 2 + 8;
1203 }
1204
1205 // double memory_ratio = 17;
1206 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1207 double tmp_memory_ratio = this->_internal_memory_ratio();
1208 ::uint64_t raw_memory_ratio;
1209 memcpy(&raw_memory_ratio, &tmp_memory_ratio, sizeof(tmp_memory_ratio));
1210 if (raw_memory_ratio != 0) {
1211 total_size += 2 + 8;
1212 }
1213
1214 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1215 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1216 }
1217 int cached_size = ::_pbi::ToCachedSize(total_size);
1218 SetCachedSize(cached_size);
1219 return total_size;
1220 }
1221
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1222 void ModelProto_Node::CheckTypeAndMergeFrom(
1223 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1224 MergeFrom(*::_pbi::DownCast<const ModelProto_Node*>(
1225 &from));
1226 }
1227
MergeFrom(const ModelProto_Node & from)1228 void ModelProto_Node::MergeFrom(const ModelProto_Node& from) {
1229 ModelProto_Node* const _this = this;
1230 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.model.ModelProto.Node)
1231 GOOGLE_DCHECK_NE(&from, _this);
1232 ::uint32_t cached_has_bits = 0;
1233 (void) cached_has_bits;
1234
1235 _this->_impl_.parameters_.MergeFrom(from._impl_.parameters_);
1236 _this->_impl_.inputs_.MergeFrom(from._impl_.inputs_);
1237 if (!from._internal_name().empty()) {
1238 _this->_internal_set_name(from._internal_name());
1239 }
1240 if (from._internal_id() != 0) {
1241 _this->_internal_set_id(from._internal_id());
1242 }
1243 if (from._internal_buffered_bytes() != 0) {
1244 _this->_internal_set_buffered_bytes(from._internal_buffered_bytes());
1245 }
1246 if (from._internal_buffered_elements() != 0) {
1247 _this->_internal_set_buffered_elements(from._internal_buffered_elements());
1248 }
1249 if (from._internal_bytes_consumed() != 0) {
1250 _this->_internal_set_bytes_consumed(from._internal_bytes_consumed());
1251 }
1252 if (from._internal_bytes_produced() != 0) {
1253 _this->_internal_set_bytes_produced(from._internal_bytes_produced());
1254 }
1255 if (from._internal_num_elements() != 0) {
1256 _this->_internal_set_num_elements(from._internal_num_elements());
1257 }
1258 if (from._internal_processing_time() != 0) {
1259 _this->_internal_set_processing_time(from._internal_processing_time());
1260 }
1261 if (from._internal_autotune() != 0) {
1262 _this->_internal_set_autotune(from._internal_autotune());
1263 }
1264 if (from._internal_record_metrics() != 0) {
1265 _this->_internal_set_record_metrics(from._internal_record_metrics());
1266 }
1267 if (from._internal_node_class() != 0) {
1268 _this->_internal_set_node_class(from._internal_node_class());
1269 }
1270 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1271 double tmp_input_processing_time_sum = from._internal_input_processing_time_sum();
1272 ::uint64_t raw_input_processing_time_sum;
1273 memcpy(&raw_input_processing_time_sum, &tmp_input_processing_time_sum, sizeof(tmp_input_processing_time_sum));
1274 if (raw_input_processing_time_sum != 0) {
1275 _this->_internal_set_input_processing_time_sum(from._internal_input_processing_time_sum());
1276 }
1277 if (from._internal_input_processing_time_count() != 0) {
1278 _this->_internal_set_input_processing_time_count(from._internal_input_processing_time_count());
1279 }
1280 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1281 double tmp_ratio = from._internal_ratio();
1282 ::uint64_t raw_ratio;
1283 memcpy(&raw_ratio, &tmp_ratio, sizeof(tmp_ratio));
1284 if (raw_ratio != 0) {
1285 _this->_internal_set_ratio(from._internal_ratio());
1286 }
1287 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1288 double tmp_memory_ratio = from._internal_memory_ratio();
1289 ::uint64_t raw_memory_ratio;
1290 memcpy(&raw_memory_ratio, &tmp_memory_ratio, sizeof(tmp_memory_ratio));
1291 if (raw_memory_ratio != 0) {
1292 _this->_internal_set_memory_ratio(from._internal_memory_ratio());
1293 }
1294 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1295 }
1296
CopyFrom(const ModelProto_Node & from)1297 void ModelProto_Node::CopyFrom(const ModelProto_Node& from) {
1298 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.model.ModelProto.Node)
1299 if (&from == this) return;
1300 Clear();
1301 MergeFrom(from);
1302 }
1303
IsInitialized() const1304 bool ModelProto_Node::IsInitialized() const {
1305 return true;
1306 }
1307
InternalSwap(ModelProto_Node * other)1308 void ModelProto_Node::InternalSwap(ModelProto_Node* other) {
1309 using std::swap;
1310 auto* lhs_arena = GetArenaForAllocation();
1311 auto* rhs_arena = other->GetArenaForAllocation();
1312 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1313 _impl_.parameters_.InternalSwap(&other->_impl_.parameters_);
1314 _impl_.inputs_.InternalSwap(&other->_impl_.inputs_);
1315 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1316 &_impl_.name_, lhs_arena,
1317 &other->_impl_.name_, rhs_arena
1318 );
1319 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1320 PROTOBUF_FIELD_OFFSET(ModelProto_Node, _impl_.memory_ratio_)
1321 + sizeof(ModelProto_Node::_impl_.memory_ratio_) // NOLINT
1322 - PROTOBUF_FIELD_OFFSET(ModelProto_Node, _impl_.id_)>(
1323 reinterpret_cast<char*>(&_impl_.id_),
1324 reinterpret_cast<char*>(&other->_impl_.id_));
1325 }
1326
GetTypeName() const1327 std::string ModelProto_Node::GetTypeName() const {
1328 return "tensorflow.data.model.ModelProto.Node";
1329 }
1330
1331
1332 // ===================================================================
1333
ModelProto_NodesEntry_DoNotUse()1334 ModelProto_NodesEntry_DoNotUse::ModelProto_NodesEntry_DoNotUse() {}
ModelProto_NodesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)1335 ModelProto_NodesEntry_DoNotUse::ModelProto_NodesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1336 : SuperType(arena) {}
MergeFrom(const ModelProto_NodesEntry_DoNotUse & other)1337 void ModelProto_NodesEntry_DoNotUse::MergeFrom(const ModelProto_NodesEntry_DoNotUse& other) {
1338 MergeFromInternal(other);
1339 }
1340
1341 // ===================================================================
1342
1343 class ModelProto_OptimizationParams::_Internal {
1344 public:
1345 };
1346
ModelProto_OptimizationParams(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1347 ModelProto_OptimizationParams::ModelProto_OptimizationParams(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1348 bool is_message_owned)
1349 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1350 SharedCtor(arena, is_message_owned);
1351 // @@protoc_insertion_point(arena_constructor:tensorflow.data.model.ModelProto.OptimizationParams)
1352 }
ModelProto_OptimizationParams(const ModelProto_OptimizationParams & from)1353 ModelProto_OptimizationParams::ModelProto_OptimizationParams(const ModelProto_OptimizationParams& from)
1354 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1355 ModelProto_OptimizationParams* const _this = this; (void)_this;
1356 new (&_impl_) Impl_{
1357 decltype(_impl_.cpu_budget_){}
1358 , decltype(_impl_.ram_budget_){}
1359 , decltype(_impl_.model_input_time_){}
1360 , decltype(_impl_.algorithm_){}
1361 , /*decltype(_impl_._cached_size_)*/{}};
1362
1363 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1364 ::memcpy(&_impl_.cpu_budget_, &from._impl_.cpu_budget_,
1365 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.algorithm_) -
1366 reinterpret_cast<char*>(&_impl_.cpu_budget_)) + sizeof(_impl_.algorithm_));
1367 // @@protoc_insertion_point(copy_constructor:tensorflow.data.model.ModelProto.OptimizationParams)
1368 }
1369
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1370 inline void ModelProto_OptimizationParams::SharedCtor(
1371 ::_pb::Arena* arena, bool is_message_owned) {
1372 (void)arena;
1373 (void)is_message_owned;
1374 new (&_impl_) Impl_{
1375 decltype(_impl_.cpu_budget_){::int64_t{0}}
1376 , decltype(_impl_.ram_budget_){::int64_t{0}}
1377 , decltype(_impl_.model_input_time_){0}
1378 , decltype(_impl_.algorithm_){0}
1379 , /*decltype(_impl_._cached_size_)*/{}
1380 };
1381 }
1382
~ModelProto_OptimizationParams()1383 ModelProto_OptimizationParams::~ModelProto_OptimizationParams() {
1384 // @@protoc_insertion_point(destructor:tensorflow.data.model.ModelProto.OptimizationParams)
1385 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1386 (void)arena;
1387 return;
1388 }
1389 SharedDtor();
1390 }
1391
SharedDtor()1392 inline void ModelProto_OptimizationParams::SharedDtor() {
1393 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1394 }
1395
SetCachedSize(int size) const1396 void ModelProto_OptimizationParams::SetCachedSize(int size) const {
1397 _impl_._cached_size_.Set(size);
1398 }
1399
Clear()1400 void ModelProto_OptimizationParams::Clear() {
1401 // @@protoc_insertion_point(message_clear_start:tensorflow.data.model.ModelProto.OptimizationParams)
1402 ::uint32_t cached_has_bits = 0;
1403 // Prevent compiler warnings about cached_has_bits being unused
1404 (void) cached_has_bits;
1405
1406 ::memset(&_impl_.cpu_budget_, 0, static_cast<size_t>(
1407 reinterpret_cast<char*>(&_impl_.algorithm_) -
1408 reinterpret_cast<char*>(&_impl_.cpu_budget_)) + sizeof(_impl_.algorithm_));
1409 _internal_metadata_.Clear<std::string>();
1410 }
1411
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1412 const char* ModelProto_OptimizationParams::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1413 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1414 while (!ctx->Done(&ptr)) {
1415 ::uint32_t tag;
1416 ptr = ::_pbi::ReadTag(ptr, &tag);
1417 switch (tag >> 3) {
1418 // .tensorflow.data.model.AutotuneAlgorithm algorithm = 1;
1419 case 1:
1420 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1421 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1422 CHK_(ptr);
1423 _internal_set_algorithm(static_cast<::tensorflow::data::model::AutotuneAlgorithm>(val));
1424 } else {
1425 goto handle_unusual;
1426 }
1427 continue;
1428 // int64 cpu_budget = 2;
1429 case 2:
1430 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1431 _impl_.cpu_budget_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1432 CHK_(ptr);
1433 } else {
1434 goto handle_unusual;
1435 }
1436 continue;
1437 // int64 ram_budget = 3;
1438 case 3:
1439 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1440 _impl_.ram_budget_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1441 CHK_(ptr);
1442 } else {
1443 goto handle_unusual;
1444 }
1445 continue;
1446 // double model_input_time = 4;
1447 case 4:
1448 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) {
1449 _impl_.model_input_time_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1450 ptr += sizeof(double);
1451 } else {
1452 goto handle_unusual;
1453 }
1454 continue;
1455 default:
1456 goto handle_unusual;
1457 } // switch
1458 handle_unusual:
1459 if ((tag == 0) || ((tag & 7) == 4)) {
1460 CHK_(ptr);
1461 ctx->SetLastTag(tag);
1462 goto message_done;
1463 }
1464 ptr = UnknownFieldParse(
1465 tag,
1466 _internal_metadata_.mutable_unknown_fields<std::string>(),
1467 ptr, ctx);
1468 CHK_(ptr != nullptr);
1469 } // while
1470 message_done:
1471 return ptr;
1472 failure:
1473 ptr = nullptr;
1474 goto message_done;
1475 #undef CHK_
1476 }
1477
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1478 ::uint8_t* ModelProto_OptimizationParams::_InternalSerialize(
1479 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1480 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.model.ModelProto.OptimizationParams)
1481 ::uint32_t cached_has_bits = 0;
1482 (void) cached_has_bits;
1483
1484 // .tensorflow.data.model.AutotuneAlgorithm algorithm = 1;
1485 if (this->_internal_algorithm() != 0) {
1486 target = stream->EnsureSpace(target);
1487 target = ::_pbi::WireFormatLite::WriteEnumToArray(
1488 1, this->_internal_algorithm(), target);
1489 }
1490
1491 // int64 cpu_budget = 2;
1492 if (this->_internal_cpu_budget() != 0) {
1493 target = stream->EnsureSpace(target);
1494 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_cpu_budget(), target);
1495 }
1496
1497 // int64 ram_budget = 3;
1498 if (this->_internal_ram_budget() != 0) {
1499 target = stream->EnsureSpace(target);
1500 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_ram_budget(), target);
1501 }
1502
1503 // double model_input_time = 4;
1504 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1505 double tmp_model_input_time = this->_internal_model_input_time();
1506 ::uint64_t raw_model_input_time;
1507 memcpy(&raw_model_input_time, &tmp_model_input_time, sizeof(tmp_model_input_time));
1508 if (raw_model_input_time != 0) {
1509 target = stream->EnsureSpace(target);
1510 target = ::_pbi::WireFormatLite::WriteDoubleToArray(4, this->_internal_model_input_time(), target);
1511 }
1512
1513 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1514 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1515 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1516 }
1517 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.model.ModelProto.OptimizationParams)
1518 return target;
1519 }
1520
ByteSizeLong() const1521 size_t ModelProto_OptimizationParams::ByteSizeLong() const {
1522 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.model.ModelProto.OptimizationParams)
1523 size_t total_size = 0;
1524
1525 ::uint32_t cached_has_bits = 0;
1526 // Prevent compiler warnings about cached_has_bits being unused
1527 (void) cached_has_bits;
1528
1529 // int64 cpu_budget = 2;
1530 if (this->_internal_cpu_budget() != 0) {
1531 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_cpu_budget());
1532 }
1533
1534 // int64 ram_budget = 3;
1535 if (this->_internal_ram_budget() != 0) {
1536 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_ram_budget());
1537 }
1538
1539 // double model_input_time = 4;
1540 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1541 double tmp_model_input_time = this->_internal_model_input_time();
1542 ::uint64_t raw_model_input_time;
1543 memcpy(&raw_model_input_time, &tmp_model_input_time, sizeof(tmp_model_input_time));
1544 if (raw_model_input_time != 0) {
1545 total_size += 1 + 8;
1546 }
1547
1548 // .tensorflow.data.model.AutotuneAlgorithm algorithm = 1;
1549 if (this->_internal_algorithm() != 0) {
1550 total_size += 1 +
1551 ::_pbi::WireFormatLite::EnumSize(this->_internal_algorithm());
1552 }
1553
1554 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1555 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1556 }
1557 int cached_size = ::_pbi::ToCachedSize(total_size);
1558 SetCachedSize(cached_size);
1559 return total_size;
1560 }
1561
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1562 void ModelProto_OptimizationParams::CheckTypeAndMergeFrom(
1563 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1564 MergeFrom(*::_pbi::DownCast<const ModelProto_OptimizationParams*>(
1565 &from));
1566 }
1567
MergeFrom(const ModelProto_OptimizationParams & from)1568 void ModelProto_OptimizationParams::MergeFrom(const ModelProto_OptimizationParams& from) {
1569 ModelProto_OptimizationParams* const _this = this;
1570 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.model.ModelProto.OptimizationParams)
1571 GOOGLE_DCHECK_NE(&from, _this);
1572 ::uint32_t cached_has_bits = 0;
1573 (void) cached_has_bits;
1574
1575 if (from._internal_cpu_budget() != 0) {
1576 _this->_internal_set_cpu_budget(from._internal_cpu_budget());
1577 }
1578 if (from._internal_ram_budget() != 0) {
1579 _this->_internal_set_ram_budget(from._internal_ram_budget());
1580 }
1581 static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1582 double tmp_model_input_time = from._internal_model_input_time();
1583 ::uint64_t raw_model_input_time;
1584 memcpy(&raw_model_input_time, &tmp_model_input_time, sizeof(tmp_model_input_time));
1585 if (raw_model_input_time != 0) {
1586 _this->_internal_set_model_input_time(from._internal_model_input_time());
1587 }
1588 if (from._internal_algorithm() != 0) {
1589 _this->_internal_set_algorithm(from._internal_algorithm());
1590 }
1591 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1592 }
1593
CopyFrom(const ModelProto_OptimizationParams & from)1594 void ModelProto_OptimizationParams::CopyFrom(const ModelProto_OptimizationParams& from) {
1595 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.model.ModelProto.OptimizationParams)
1596 if (&from == this) return;
1597 Clear();
1598 MergeFrom(from);
1599 }
1600
IsInitialized() const1601 bool ModelProto_OptimizationParams::IsInitialized() const {
1602 return true;
1603 }
1604
InternalSwap(ModelProto_OptimizationParams * other)1605 void ModelProto_OptimizationParams::InternalSwap(ModelProto_OptimizationParams* other) {
1606 using std::swap;
1607 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1608 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1609 PROTOBUF_FIELD_OFFSET(ModelProto_OptimizationParams, _impl_.algorithm_)
1610 + sizeof(ModelProto_OptimizationParams::_impl_.algorithm_) // NOLINT
1611 - PROTOBUF_FIELD_OFFSET(ModelProto_OptimizationParams, _impl_.cpu_budget_)>(
1612 reinterpret_cast<char*>(&_impl_.cpu_budget_),
1613 reinterpret_cast<char*>(&other->_impl_.cpu_budget_));
1614 }
1615
GetTypeName() const1616 std::string ModelProto_OptimizationParams::GetTypeName() const {
1617 return "tensorflow.data.model.ModelProto.OptimizationParams";
1618 }
1619
1620
1621 // ===================================================================
1622
1623 class ModelProto::_Internal {
1624 public:
1625 static const ::tensorflow::data::model::ModelProto_OptimizationParams& optimization_params(const ModelProto* msg);
1626 };
1627
1628 const ::tensorflow::data::model::ModelProto_OptimizationParams&
optimization_params(const ModelProto * msg)1629 ModelProto::_Internal::optimization_params(const ModelProto* msg) {
1630 return *msg->_impl_.optimization_params_;
1631 }
ModelProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1632 ModelProto::ModelProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1633 bool is_message_owned)
1634 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1635 SharedCtor(arena, is_message_owned);
1636 // @@protoc_insertion_point(arena_constructor:tensorflow.data.model.ModelProto)
1637 }
ModelProto(const ModelProto & from)1638 ModelProto::ModelProto(const ModelProto& from)
1639 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1640 ModelProto* const _this = this; (void)_this;
1641 new (&_impl_) Impl_{
1642 /*decltype(_impl_.nodes_)*/{}
1643 , decltype(_impl_.optimization_params_){nullptr}
1644 , decltype(_impl_.output_){}
1645 , decltype(_impl_.id_counter_){}
1646 , /*decltype(_impl_._cached_size_)*/{}};
1647
1648 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1649 _this->_impl_.nodes_.MergeFrom(from._impl_.nodes_);
1650 if (from._internal_has_optimization_params()) {
1651 _this->_impl_.optimization_params_ = new ::tensorflow::data::model::ModelProto_OptimizationParams(*from._impl_.optimization_params_);
1652 }
1653 ::memcpy(&_impl_.output_, &from._impl_.output_,
1654 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.id_counter_) -
1655 reinterpret_cast<char*>(&_impl_.output_)) + sizeof(_impl_.id_counter_));
1656 // @@protoc_insertion_point(copy_constructor:tensorflow.data.model.ModelProto)
1657 }
1658
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1659 inline void ModelProto::SharedCtor(
1660 ::_pb::Arena* arena, bool is_message_owned) {
1661 (void)arena;
1662 (void)is_message_owned;
1663 new (&_impl_) Impl_{
1664 /*decltype(_impl_.nodes_)*/{::_pbi::ArenaInitialized(), arena}
1665 , decltype(_impl_.optimization_params_){nullptr}
1666 , decltype(_impl_.output_){::int64_t{0}}
1667 , decltype(_impl_.id_counter_){::int64_t{0}}
1668 , /*decltype(_impl_._cached_size_)*/{}
1669 };
1670 }
1671
~ModelProto()1672 ModelProto::~ModelProto() {
1673 // @@protoc_insertion_point(destructor:tensorflow.data.model.ModelProto)
1674 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1675 (void)arena;
1676 return;
1677 }
1678 SharedDtor();
1679 }
1680
SharedDtor()1681 inline void ModelProto::SharedDtor() {
1682 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1683 _impl_.nodes_.Destruct();
1684 _impl_.nodes_.~MapFieldLite();
1685 if (this != internal_default_instance()) delete _impl_.optimization_params_;
1686 }
1687
SetCachedSize(int size) const1688 void ModelProto::SetCachedSize(int size) const {
1689 _impl_._cached_size_.Set(size);
1690 }
1691
Clear()1692 void ModelProto::Clear() {
1693 // @@protoc_insertion_point(message_clear_start:tensorflow.data.model.ModelProto)
1694 ::uint32_t cached_has_bits = 0;
1695 // Prevent compiler warnings about cached_has_bits being unused
1696 (void) cached_has_bits;
1697
1698 _impl_.nodes_.Clear();
1699 if (GetArenaForAllocation() == nullptr && _impl_.optimization_params_ != nullptr) {
1700 delete _impl_.optimization_params_;
1701 }
1702 _impl_.optimization_params_ = nullptr;
1703 ::memset(&_impl_.output_, 0, static_cast<size_t>(
1704 reinterpret_cast<char*>(&_impl_.id_counter_) -
1705 reinterpret_cast<char*>(&_impl_.output_)) + sizeof(_impl_.id_counter_));
1706 _internal_metadata_.Clear<std::string>();
1707 }
1708
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1709 const char* ModelProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1710 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1711 while (!ctx->Done(&ptr)) {
1712 ::uint32_t tag;
1713 ptr = ::_pbi::ReadTag(ptr, &tag);
1714 switch (tag >> 3) {
1715 // map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1;
1716 case 1:
1717 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1718 ptr -= 1;
1719 do {
1720 ptr += 1;
1721 ptr = ctx->ParseMessage(&_impl_.nodes_, ptr);
1722 CHK_(ptr);
1723 if (!ctx->DataAvailable(ptr)) break;
1724 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1725 } else {
1726 goto handle_unusual;
1727 }
1728 continue;
1729 // int64 output = 2;
1730 case 2:
1731 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1732 _impl_.output_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1733 CHK_(ptr);
1734 } else {
1735 goto handle_unusual;
1736 }
1737 continue;
1738 // int64 id_counter = 3;
1739 case 3:
1740 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1741 _impl_.id_counter_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1742 CHK_(ptr);
1743 } else {
1744 goto handle_unusual;
1745 }
1746 continue;
1747 // .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5;
1748 case 5:
1749 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
1750 ptr = ctx->ParseMessage(_internal_mutable_optimization_params(), ptr);
1751 CHK_(ptr);
1752 } else {
1753 goto handle_unusual;
1754 }
1755 continue;
1756 default:
1757 goto handle_unusual;
1758 } // switch
1759 handle_unusual:
1760 if ((tag == 0) || ((tag & 7) == 4)) {
1761 CHK_(ptr);
1762 ctx->SetLastTag(tag);
1763 goto message_done;
1764 }
1765 ptr = UnknownFieldParse(
1766 tag,
1767 _internal_metadata_.mutable_unknown_fields<std::string>(),
1768 ptr, ctx);
1769 CHK_(ptr != nullptr);
1770 } // while
1771 message_done:
1772 return ptr;
1773 failure:
1774 ptr = nullptr;
1775 goto message_done;
1776 #undef CHK_
1777 }
1778
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1779 ::uint8_t* ModelProto::_InternalSerialize(
1780 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1781 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.model.ModelProto)
1782 ::uint32_t cached_has_bits = 0;
1783 (void) cached_has_bits;
1784
1785 // map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1;
1786 if (!this->_internal_nodes().empty()) {
1787 using MapType = ::_pb::Map<::int64_t, ::tensorflow::data::model::ModelProto_Node>;
1788 using WireHelper = ModelProto_NodesEntry_DoNotUse::Funcs;
1789 const auto& map_field = this->_internal_nodes();
1790
1791 if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
1792 for (const auto& entry : ::_pbi::MapSorterFlat<MapType>(map_field)) {
1793 target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
1794 }
1795 } else {
1796 for (const auto& entry : map_field) {
1797 target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
1798 }
1799 }
1800 }
1801
1802 // int64 output = 2;
1803 if (this->_internal_output() != 0) {
1804 target = stream->EnsureSpace(target);
1805 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_output(), target);
1806 }
1807
1808 // int64 id_counter = 3;
1809 if (this->_internal_id_counter() != 0) {
1810 target = stream->EnsureSpace(target);
1811 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_id_counter(), target);
1812 }
1813
1814 // .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5;
1815 if (this->_internal_has_optimization_params()) {
1816 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1817 InternalWriteMessage(5, _Internal::optimization_params(this),
1818 _Internal::optimization_params(this).GetCachedSize(), target, stream);
1819 }
1820
1821 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1822 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1823 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1824 }
1825 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.model.ModelProto)
1826 return target;
1827 }
1828
ByteSizeLong() const1829 size_t ModelProto::ByteSizeLong() const {
1830 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.model.ModelProto)
1831 size_t total_size = 0;
1832
1833 ::uint32_t cached_has_bits = 0;
1834 // Prevent compiler warnings about cached_has_bits being unused
1835 (void) cached_has_bits;
1836
1837 // map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1;
1838 total_size += 1 *
1839 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_nodes_size());
1840 for (::PROTOBUF_NAMESPACE_ID::Map< ::int64_t, ::tensorflow::data::model::ModelProto_Node >::const_iterator
1841 it = this->_internal_nodes().begin();
1842 it != this->_internal_nodes().end(); ++it) {
1843 total_size += ModelProto_NodesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
1844 }
1845
1846 // .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5;
1847 if (this->_internal_has_optimization_params()) {
1848 total_size += 1 +
1849 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1850 *_impl_.optimization_params_);
1851 }
1852
1853 // int64 output = 2;
1854 if (this->_internal_output() != 0) {
1855 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_output());
1856 }
1857
1858 // int64 id_counter = 3;
1859 if (this->_internal_id_counter() != 0) {
1860 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_id_counter());
1861 }
1862
1863 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1864 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1865 }
1866 int cached_size = ::_pbi::ToCachedSize(total_size);
1867 SetCachedSize(cached_size);
1868 return total_size;
1869 }
1870
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1871 void ModelProto::CheckTypeAndMergeFrom(
1872 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1873 MergeFrom(*::_pbi::DownCast<const ModelProto*>(
1874 &from));
1875 }
1876
MergeFrom(const ModelProto & from)1877 void ModelProto::MergeFrom(const ModelProto& from) {
1878 ModelProto* const _this = this;
1879 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.model.ModelProto)
1880 GOOGLE_DCHECK_NE(&from, _this);
1881 ::uint32_t cached_has_bits = 0;
1882 (void) cached_has_bits;
1883
1884 _this->_impl_.nodes_.MergeFrom(from._impl_.nodes_);
1885 if (from._internal_has_optimization_params()) {
1886 _this->_internal_mutable_optimization_params()->::tensorflow::data::model::ModelProto_OptimizationParams::MergeFrom(
1887 from._internal_optimization_params());
1888 }
1889 if (from._internal_output() != 0) {
1890 _this->_internal_set_output(from._internal_output());
1891 }
1892 if (from._internal_id_counter() != 0) {
1893 _this->_internal_set_id_counter(from._internal_id_counter());
1894 }
1895 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1896 }
1897
CopyFrom(const ModelProto & from)1898 void ModelProto::CopyFrom(const ModelProto& from) {
1899 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.model.ModelProto)
1900 if (&from == this) return;
1901 Clear();
1902 MergeFrom(from);
1903 }
1904
IsInitialized() const1905 bool ModelProto::IsInitialized() const {
1906 return true;
1907 }
1908
InternalSwap(ModelProto * other)1909 void ModelProto::InternalSwap(ModelProto* other) {
1910 using std::swap;
1911 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1912 _impl_.nodes_.InternalSwap(&other->_impl_.nodes_);
1913 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1914 PROTOBUF_FIELD_OFFSET(ModelProto, _impl_.id_counter_)
1915 + sizeof(ModelProto::_impl_.id_counter_) // NOLINT
1916 - PROTOBUF_FIELD_OFFSET(ModelProto, _impl_.optimization_params_)>(
1917 reinterpret_cast<char*>(&_impl_.optimization_params_),
1918 reinterpret_cast<char*>(&other->_impl_.optimization_params_));
1919 }
1920
GetTypeName() const1921 std::string ModelProto::GetTypeName() const {
1922 return "tensorflow.data.model.ModelProto";
1923 }
1924
1925
1926 // @@protoc_insertion_point(namespace_scope)
1927 } // namespace model
1928 } // namespace data
1929 } // namespace tensorflow
1930 PROTOBUF_NAMESPACE_OPEN
1931 template<> PROTOBUF_NOINLINE ::tensorflow::data::model::ModelProto_Node_Parameter*
CreateMaybeMessage(Arena * arena)1932 Arena::CreateMaybeMessage< ::tensorflow::data::model::ModelProto_Node_Parameter >(Arena* arena) {
1933 return Arena::CreateMessageInternal< ::tensorflow::data::model::ModelProto_Node_Parameter >(arena);
1934 }
1935 template<> PROTOBUF_NOINLINE ::tensorflow::data::model::ModelProto_Node*
CreateMaybeMessage(Arena * arena)1936 Arena::CreateMaybeMessage< ::tensorflow::data::model::ModelProto_Node >(Arena* arena) {
1937 return Arena::CreateMessageInternal< ::tensorflow::data::model::ModelProto_Node >(arena);
1938 }
1939 template<> PROTOBUF_NOINLINE ::tensorflow::data::model::ModelProto_NodesEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)1940 Arena::CreateMaybeMessage< ::tensorflow::data::model::ModelProto_NodesEntry_DoNotUse >(Arena* arena) {
1941 return Arena::CreateMessageInternal< ::tensorflow::data::model::ModelProto_NodesEntry_DoNotUse >(arena);
1942 }
1943 template<> PROTOBUF_NOINLINE ::tensorflow::data::model::ModelProto_OptimizationParams*
CreateMaybeMessage(Arena * arena)1944 Arena::CreateMaybeMessage< ::tensorflow::data::model::ModelProto_OptimizationParams >(Arena* arena) {
1945 return Arena::CreateMessageInternal< ::tensorflow::data::model::ModelProto_OptimizationParams >(arena);
1946 }
1947 template<> PROTOBUF_NOINLINE ::tensorflow::data::model::ModelProto*
CreateMaybeMessage(Arena * arena)1948 Arena::CreateMaybeMessage< ::tensorflow::data::model::ModelProto >(Arena* arena) {
1949 return Arena::CreateMessageInternal< ::tensorflow::data::model::ModelProto >(arena);
1950 }
1951 PROTOBUF_NAMESPACE_CLOSE
1952
1953 // @@protoc_insertion_point(global_scope)
1954 #include <google/protobuf/port_undef.inc>
1955