1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/arena.h>
32
33 #include <algorithm>
34 #include <atomic>
35 #include <cstddef>
36 #include <cstdint>
37 #include <limits>
38 #include <typeinfo>
39
40 #include <google/protobuf/arena_impl.h>
41 #include <google/protobuf/arenaz_sampler.h>
42 #include <google/protobuf/port.h>
43
44 #include <google/protobuf/stubs/mutex.h>
45 #ifdef ADDRESS_SANITIZER
46 #include <sanitizer/asan_interface.h>
47 #endif // ADDRESS_SANITIZER
48
49 // Must be included last.
50 #include <google/protobuf/port_def.inc>
51
52 namespace google {
53 namespace protobuf {
54 namespace internal {
55
AllocateMemory(const AllocationPolicy * policy_ptr,size_t last_size,size_t min_bytes)56 static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
57 size_t last_size, size_t min_bytes) {
58 AllocationPolicy policy; // default policy
59 if (policy_ptr) policy = *policy_ptr;
60 size_t size;
61 if (last_size != 0) {
62 // Double the current block size, up to a limit.
63 auto max_size = policy.max_block_size;
64 size = std::min(2 * last_size, max_size);
65 } else {
66 size = policy.start_block_size;
67 }
68 // Verify that min_bytes + kBlockHeaderSize won't overflow.
69 GOOGLE_CHECK_LE(min_bytes,
70 std::numeric_limits<size_t>::max() - SerialArena::kBlockHeaderSize);
71 size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
72
73 void* mem;
74 if (policy.block_alloc == nullptr) {
75 mem = ::operator new(size);
76 } else {
77 mem = policy.block_alloc(size);
78 }
79 return {mem, size};
80 }
81
82 class GetDeallocator {
83 public:
GetDeallocator(const AllocationPolicy * policy,size_t * space_allocated)84 GetDeallocator(const AllocationPolicy* policy, size_t* space_allocated)
85 : dealloc_(policy ? policy->block_dealloc : nullptr),
86 space_allocated_(space_allocated) {}
87
operator ()(SerialArena::Memory mem) const88 void operator()(SerialArena::Memory mem) const {
89 #ifdef ADDRESS_SANITIZER
90 // This memory was provided by the underlying allocator as unpoisoned,
91 // so return it in an unpoisoned state.
92 ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
93 #endif // ADDRESS_SANITIZER
94 if (dealloc_) {
95 dealloc_(mem.ptr, mem.size);
96 } else {
97 internal::SizedDelete(mem.ptr, mem.size);
98 }
99 *space_allocated_ += mem.size;
100 }
101
102 private:
103 void (*dealloc_)(void*, size_t);
104 size_t* space_allocated_;
105 };
106
SerialArena(Block * b,void * owner,ThreadSafeArenaStats * stats)107 SerialArena::SerialArena(Block* b, void* owner, ThreadSafeArenaStats* stats)
108 : space_allocated_(b->size) {
109 owner_ = owner;
110 head_ = b;
111 ptr_ = b->Pointer(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize);
112 limit_ = b->Pointer(b->size & static_cast<size_t>(-8));
113 arena_stats_ = stats;
114 }
115
New(Memory mem,void * owner,ThreadSafeArenaStats * stats)116 SerialArena* SerialArena::New(Memory mem, void* owner,
117 ThreadSafeArenaStats* stats) {
118 GOOGLE_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.size);
119 ThreadSafeArenaStats::RecordAllocateStats(
120 stats, /*requested=*/mem.size, /*allocated=*/mem.size, /*wasted=*/0);
121 auto b = new (mem.ptr) Block{nullptr, mem.size};
122 return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, owner, stats);
123 }
124
125 template <typename Deallocator>
Free(Deallocator deallocator)126 SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
127 Block* b = head_;
128 Memory mem = {b, b->size};
129 while (b->next) {
130 b = b->next; // We must first advance before deleting this block
131 deallocator(mem);
132 mem = {b, b->size};
133 }
134 return mem;
135 }
136
137 PROTOBUF_NOINLINE
138 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n,const AllocationPolicy * policy)139 SerialArena::AllocateAlignedWithCleanupFallback(
140 size_t n, const AllocationPolicy* policy) {
141 AllocateNewBlock(n + kCleanupSize, policy);
142 return AllocateFromExistingWithCleanupFallback(n);
143 }
144
145 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n,const AllocationPolicy * policy)146 void* SerialArena::AllocateAlignedFallback(size_t n,
147 const AllocationPolicy* policy) {
148 AllocateNewBlock(n, policy);
149 return AllocateFromExisting(n);
150 }
151
AllocateNewBlock(size_t n,const AllocationPolicy * policy)152 void SerialArena::AllocateNewBlock(size_t n, const AllocationPolicy* policy) {
153 // Sync limit to block
154 head_->start = reinterpret_cast<CleanupNode*>(limit_);
155
156 // Record how much used in this block.
157 size_t used = ptr_ - head_->Pointer(kBlockHeaderSize);
158 size_t wasted = head_->size - used;
159 space_used_ += used;
160
161 // TODO(sbenza): Evaluate if pushing unused space into the cached blocks is a
162 // win. In preliminary testing showed increased memory savings as expected,
163 // but with a CPU regression. The regression might have been an artifact of
164 // the microbenchmark.
165
166 auto mem = AllocateMemory(policy, head_->size, n);
167 // We don't want to emit an expensive RMW instruction that requires
168 // exclusive access to a cacheline. Hence we write it in terms of a
169 // regular add.
170 auto relaxed = std::memory_order_relaxed;
171 space_allocated_.store(space_allocated_.load(relaxed) + mem.size, relaxed);
172 ThreadSafeArenaStats::RecordAllocateStats(arena_stats_, /*requested=*/n,
173 /*allocated=*/mem.size, wasted);
174 head_ = new (mem.ptr) Block{head_, mem.size};
175 ptr_ = head_->Pointer(kBlockHeaderSize);
176 limit_ = head_->Pointer(head_->size);
177
178 #ifdef ADDRESS_SANITIZER
179 ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
180 #endif // ADDRESS_SANITIZER
181 }
182
SpaceUsed() const183 uint64_t SerialArena::SpaceUsed() const {
184 uint64_t space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
185 space_used += space_used_;
186 // Remove the overhead of the SerialArena itself.
187 space_used -= ThreadSafeArena::kSerialArenaSize;
188 return space_used;
189 }
190
CleanupList()191 void SerialArena::CleanupList() {
192 Block* b = head_;
193 b->start = reinterpret_cast<CleanupNode*>(limit_);
194 do {
195 auto* limit = reinterpret_cast<CleanupNode*>(
196 b->Pointer(b->size & static_cast<size_t>(-8)));
197 auto it = b->start;
198 auto num = limit - it;
199 if (num > 0) {
200 for (; it < limit; it++) {
201 it->cleanup(it->elem);
202 }
203 }
204 b = b->next;
205 } while (b);
206 }
207
208
209 ThreadSafeArena::CacheAlignedLifecycleIdGenerator
210 ThreadSafeArena::lifecycle_id_generator_;
211 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()212 ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
213 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
214 new internal::ThreadLocalStorage<ThreadCache>();
215 return *thread_cache_->Get();
216 }
217 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()218 ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
219 static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {
220 0, static_cast<LifecycleIdAtomic>(-1), nullptr};
221 return thread_cache_;
222 }
223 #else
224 PROTOBUF_THREAD_LOCAL ThreadSafeArena::ThreadCache
225 ThreadSafeArena::thread_cache_ = {0, static_cast<LifecycleIdAtomic>(-1),
226 nullptr};
227 #endif
228
InitializeFrom(void * mem,size_t size)229 void ThreadSafeArena::InitializeFrom(void* mem, size_t size) {
230 GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
231 GOOGLE_DCHECK(!AllocPolicy()); // Reset should call InitializeWithPolicy instead.
232 Init();
233
234 // Ignore initial block if it is too small.
235 if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) {
236 alloc_policy_.set_is_user_owned_initial_block(true);
237 SetInitialBlock(mem, size);
238 }
239 }
240
InitializeWithPolicy(void * mem,size_t size,AllocationPolicy policy)241 void ThreadSafeArena::InitializeWithPolicy(void* mem, size_t size,
242 AllocationPolicy policy) {
243 #ifndef NDEBUG
244 const uint64_t old_alloc_policy = alloc_policy_.get_raw();
245 // If there was a policy (e.g., in Reset()), make sure flags were preserved.
246 #define GOOGLE_DCHECK_POLICY_FLAGS_() \
247 if (old_alloc_policy > 3) \
248 GOOGLE_CHECK_EQ(old_alloc_policy & 3, alloc_policy_.get_raw() & 3)
249 #else
250 #define GOOGLE_DCHECK_POLICY_FLAGS_()
251 #endif // NDEBUG
252
253 if (policy.IsDefault()) {
254 // Legacy code doesn't use the API above, but provides the initial block
255 // through ArenaOptions. I suspect most do not touch the allocation
256 // policy parameters.
257 InitializeFrom(mem, size);
258 GOOGLE_DCHECK_POLICY_FLAGS_();
259 return;
260 }
261 GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
262 Init();
263
264 // Ignore initial block if it is too small. We include an optional
265 // AllocationPolicy in this check, so that this can be allocated on the
266 // first block.
267 constexpr size_t kAPSize = internal::AlignUpTo8(sizeof(AllocationPolicy));
268 constexpr size_t kMinimumSize = kBlockHeaderSize + kSerialArenaSize + kAPSize;
269
270 // The value for alloc_policy_ stores whether or not allocations should be
271 // recorded.
272 alloc_policy_.set_should_record_allocs(
273 policy.metrics_collector != nullptr &&
274 policy.metrics_collector->RecordAllocs());
275 // Make sure we have an initial block to store the AllocationPolicy.
276 if (mem != nullptr && size >= kMinimumSize) {
277 alloc_policy_.set_is_user_owned_initial_block(true);
278 } else {
279 auto tmp = AllocateMemory(&policy, 0, kMinimumSize);
280 mem = tmp.ptr;
281 size = tmp.size;
282 }
283 SetInitialBlock(mem, size);
284
285 auto sa = threads_.load(std::memory_order_relaxed);
286 // We ensured enough space so this cannot fail.
287 void* p;
288 if (!sa || !sa->MaybeAllocateAligned(kAPSize, &p)) {
289 GOOGLE_LOG(FATAL) << "MaybeAllocateAligned cannot fail here.";
290 return;
291 }
292 new (p) AllocationPolicy{policy};
293 // Low bits store flags, so they mustn't be overwritten.
294 GOOGLE_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(p) & 3);
295 alloc_policy_.set_policy(reinterpret_cast<AllocationPolicy*>(p));
296 GOOGLE_DCHECK_POLICY_FLAGS_();
297
298 #undef GOOGLE_DCHECK_POLICY_FLAGS_
299 }
300
Init()301 void ThreadSafeArena::Init() {
302 #ifndef NDEBUG
303 const bool was_message_owned = IsMessageOwned();
304 #endif // NDEBUG
305 ThreadCache& tc = thread_cache();
306 auto id = tc.next_lifecycle_id;
307 // We increment lifecycle_id's by multiples of two so we can use bit 0 as
308 // a tag.
309 constexpr uint64_t kDelta = 2;
310 constexpr uint64_t kInc = ThreadCache::kPerThreadIds * kDelta;
311 if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) {
312 constexpr auto relaxed = std::memory_order_relaxed;
313 // On platforms that don't support uint64_t atomics we can certainly not
314 // afford to increment by large intervals and expect uniqueness due to
315 // wrapping, hence we only add by 1.
316 id = lifecycle_id_generator_.id.fetch_add(1, relaxed) * kInc;
317 }
318 tc.next_lifecycle_id = id + kDelta;
319 // Message ownership is stored in tag_and_id_, and is set in the constructor.
320 // This flag bit must be preserved, even across calls to Reset().
321 tag_and_id_ = id | (tag_and_id_ & kMessageOwnedArena);
322 hint_.store(nullptr, std::memory_order_relaxed);
323 threads_.store(nullptr, std::memory_order_relaxed);
324 #ifndef NDEBUG
325 GOOGLE_CHECK_EQ(was_message_owned, IsMessageOwned());
326 #endif // NDEBUG
327 arena_stats_ = Sample();
328 }
329
SetInitialBlock(void * mem,size_t size)330 void ThreadSafeArena::SetInitialBlock(void* mem, size_t size) {
331 SerialArena* serial = SerialArena::New({mem, size}, &thread_cache(),
332 arena_stats_.MutableStats());
333 serial->set_next(NULL);
334 threads_.store(serial, std::memory_order_relaxed);
335 CacheSerialArena(serial);
336 }
337
~ThreadSafeArena()338 ThreadSafeArena::~ThreadSafeArena() {
339 // Have to do this in a first pass, because some of the destructors might
340 // refer to memory in other blocks.
341 CleanupList();
342
343 size_t space_allocated = 0;
344 auto mem = Free(&space_allocated);
345
346 // Policy is about to get deleted.
347 auto* p = alloc_policy_.get();
348 ArenaMetricsCollector* collector = p ? p->metrics_collector : nullptr;
349
350 if (alloc_policy_.is_user_owned_initial_block()) {
351 #ifdef ADDRESS_SANITIZER
352 // Unpoison the initial block, now that it's going back to the user.
353 ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
354 #endif // ADDRESS_SANITIZER
355 space_allocated += mem.size;
356 } else {
357 GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
358 }
359
360 if (collector) collector->OnDestroy(space_allocated);
361 }
362
Free(size_t * space_allocated)363 SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
364 SerialArena::Memory mem = {nullptr, 0};
365 auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated);
366 PerSerialArena([deallocator, &mem](SerialArena* a) {
367 if (mem.ptr) deallocator(mem);
368 mem = a->Free(deallocator);
369 });
370 return mem;
371 }
372
Reset()373 uint64_t ThreadSafeArena::Reset() {
374 // Have to do this in a first pass, because some of the destructors might
375 // refer to memory in other blocks.
376 CleanupList();
377
378 // Discard all blocks except the special block (if present).
379 size_t space_allocated = 0;
380 auto mem = Free(&space_allocated);
381 arena_stats_.RecordReset();
382
383 AllocationPolicy* policy = alloc_policy_.get();
384 if (policy) {
385 auto saved_policy = *policy;
386 if (alloc_policy_.is_user_owned_initial_block()) {
387 space_allocated += mem.size;
388 } else {
389 GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
390 mem.ptr = nullptr;
391 mem.size = 0;
392 }
393 ArenaMetricsCollector* collector = saved_policy.metrics_collector;
394 if (collector) collector->OnReset(space_allocated);
395 InitializeWithPolicy(mem.ptr, mem.size, saved_policy);
396 } else {
397 GOOGLE_DCHECK(!alloc_policy_.should_record_allocs());
398 // Nullptr policy
399 if (alloc_policy_.is_user_owned_initial_block()) {
400 space_allocated += mem.size;
401 InitializeFrom(mem.ptr, mem.size);
402 } else {
403 GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
404 Init();
405 }
406 }
407
408 return space_allocated;
409 }
410
411 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n,const std::type_info * type)412 ThreadSafeArena::AllocateAlignedWithCleanup(size_t n,
413 const std::type_info* type) {
414 SerialArena* arena;
415 if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() &&
416 GetSerialArenaFast(&arena))) {
417 return arena->AllocateAlignedWithCleanup(n, alloc_policy_.get());
418 } else {
419 return AllocateAlignedWithCleanupFallback(n, type);
420 }
421 }
422
AddCleanup(void * elem,void (* cleanup)(void *))423 void ThreadSafeArena::AddCleanup(void* elem, void (*cleanup)(void*)) {
424 SerialArena* arena;
425 if (PROTOBUF_PREDICT_FALSE(!GetSerialArenaFast(&arena))) {
426 arena = GetSerialArenaFallback(&thread_cache());
427 }
428 arena->AddCleanup(elem, cleanup, AllocPolicy());
429 }
430
431 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n,const std::type_info * type)432 void* ThreadSafeArena::AllocateAlignedFallback(size_t n,
433 const std::type_info* type) {
434 if (alloc_policy_.should_record_allocs()) {
435 alloc_policy_.RecordAlloc(type, n);
436 SerialArena* arena;
437 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
438 return arena->AllocateAligned(n, alloc_policy_.get());
439 }
440 }
441 return GetSerialArenaFallback(&thread_cache())
442 ->AllocateAligned(n, alloc_policy_.get());
443 }
444
445 PROTOBUF_NOINLINE
446 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n,const std::type_info * type)447 ThreadSafeArena::AllocateAlignedWithCleanupFallback(
448 size_t n, const std::type_info* type) {
449 if (alloc_policy_.should_record_allocs()) {
450 alloc_policy_.RecordAlloc(type, n);
451 SerialArena* arena;
452 if (GetSerialArenaFast(&arena)) {
453 return arena->AllocateAlignedWithCleanup(n, alloc_policy_.get());
454 }
455 }
456 return GetSerialArenaFallback(&thread_cache())
457 ->AllocateAlignedWithCleanup(n, alloc_policy_.get());
458 }
459
SpaceAllocated() const460 uint64_t ThreadSafeArena::SpaceAllocated() const {
461 SerialArena* serial = threads_.load(std::memory_order_acquire);
462 uint64_t res = 0;
463 for (; serial; serial = serial->next()) {
464 res += serial->SpaceAllocated();
465 }
466 return res;
467 }
468
SpaceUsed() const469 uint64_t ThreadSafeArena::SpaceUsed() const {
470 SerialArena* serial = threads_.load(std::memory_order_acquire);
471 uint64_t space_used = 0;
472 for (; serial; serial = serial->next()) {
473 space_used += serial->SpaceUsed();
474 }
475 return space_used - (alloc_policy_.get() ? sizeof(AllocationPolicy) : 0);
476 }
477
CleanupList()478 void ThreadSafeArena::CleanupList() {
479 PerSerialArena([](SerialArena* a) { a->CleanupList(); });
480 }
481
482 PROTOBUF_NOINLINE
GetSerialArenaFallback(void * me)483 SerialArena* ThreadSafeArena::GetSerialArenaFallback(void* me) {
484 // Look for this SerialArena in our linked list.
485 SerialArena* serial = threads_.load(std::memory_order_acquire);
486 for (; serial; serial = serial->next()) {
487 if (serial->owner() == me) {
488 break;
489 }
490 }
491
492 if (!serial) {
493 // This thread doesn't have any SerialArena, which also means it doesn't
494 // have any blocks yet. So we'll allocate its first block now.
495 serial = SerialArena::New(
496 AllocateMemory(alloc_policy_.get(), 0, kSerialArenaSize), me,
497 arena_stats_.MutableStats());
498
499 SerialArena* head = threads_.load(std::memory_order_relaxed);
500 do {
501 serial->set_next(head);
502 } while (!threads_.compare_exchange_weak(
503 head, serial, std::memory_order_release, std::memory_order_relaxed));
504 }
505
506 CacheSerialArena(serial);
507 return serial;
508 }
509
510 } // namespace internal
511
512 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedNoHook(size_t n)513 void* Arena::AllocateAlignedNoHook(size_t n) {
514 return impl_.AllocateAligned(n, nullptr);
515 }
516
517 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedWithHook(size_t n,const std::type_info * type)518 void* Arena::AllocateAlignedWithHook(size_t n, const std::type_info* type) {
519 return impl_.AllocateAligned(n, type);
520 }
521
522 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedWithHookForArray(size_t n,const std::type_info * type)523 void* Arena::AllocateAlignedWithHookForArray(size_t n,
524 const std::type_info* type) {
525 return impl_.AllocateAligned<internal::AllocationClient::kArray>(n, type);
526 }
527
528 PROTOBUF_FUNC_ALIGN(32)
529 std::pair<void*, internal::SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n,const std::type_info * type)530 Arena::AllocateAlignedWithCleanup(size_t n, const std::type_info* type) {
531 return impl_.AllocateAlignedWithCleanup(n, type);
532 }
533
534 } // namespace protobuf
535 } // namespace google
536
537 #include <google/protobuf/port_undef.inc>
538