1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/support/port_platform.h>
20
21 #include <grpc/impl/grpc_types.h>
22
23 #include "src/core/lib/iomgr/exec_ctx.h"
24 #include "src/core/lib/iomgr/port.h"
25
26 #ifdef GRPC_POSIX_SOCKET_TCP
27
28 #include <errno.h>
29 #include <limits.h>
30 #include <netinet/in.h>
31 #include <netinet/tcp.h>
32 #include <stdbool.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <sys/socket.h>
37 #include <sys/types.h>
38 #include <unistd.h>
39
40 #include <algorithm>
41 #include <unordered_map>
42
43 #include <grpc/slice.h>
44 #include <grpc/support/alloc.h>
45 #include <grpc/support/log.h>
46 #include <grpc/support/string_util.h>
47 #include <grpc/support/sync.h>
48 #include <grpc/support/time.h>
49
50 #include "src/core/lib/address_utils/sockaddr_utils.h"
51 #include "src/core/lib/debug/event_log.h"
52 #include "src/core/lib/debug/stats.h"
53 #include "src/core/lib/debug/stats_data.h"
54 #include "src/core/lib/debug/trace.h"
55 #include "src/core/lib/experiments/experiments.h"
56 #include "src/core/lib/gpr/string.h"
57 #include "src/core/lib/gpr/useful.h"
58 #include "src/core/lib/gprpp/crash.h"
59 #include "src/core/lib/gprpp/strerror.h"
60 #include "src/core/lib/gprpp/sync.h"
61 #include "src/core/lib/iomgr/buffer_list.h"
62 #include "src/core/lib/iomgr/ev_posix.h"
63 #include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
64 #include "src/core/lib/iomgr/executor.h"
65 #include "src/core/lib/iomgr/socket_utils_posix.h"
66 #include "src/core/lib/iomgr/tcp_posix.h"
67 #include "src/core/lib/resource_quota/api.h"
68 #include "src/core/lib/resource_quota/memory_quota.h"
69 #include "src/core/lib/resource_quota/trace.h"
70 #include "src/core/lib/slice/slice_internal.h"
71 #include "src/core/lib/slice/slice_string_helpers.h"
72
73 #ifndef SOL_TCP
74 #define SOL_TCP IPPROTO_TCP
75 #endif
76
77 #ifndef TCP_INQ
78 #define TCP_INQ 36
79 #define TCP_CM_INQ TCP_INQ
80 #endif
81
82 #ifdef GRPC_HAVE_MSG_NOSIGNAL
83 #define SENDMSG_FLAGS MSG_NOSIGNAL
84 #else
85 #define SENDMSG_FLAGS 0
86 #endif
87
88 // TCP zero copy sendmsg flag.
89 // NB: We define this here as a fallback in case we're using an older set of
90 // library headers that has not defined MSG_ZEROCOPY. Since this constant is
91 // part of the kernel, we are guaranteed it will never change/disagree so
92 // defining it here is safe.
93 #ifndef MSG_ZEROCOPY
94 #define MSG_ZEROCOPY 0x4000000
95 #endif
96
97 #ifdef GRPC_MSG_IOVLEN_TYPE
98 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
99 #else
100 typedef size_t msg_iovlen_type;
101 #endif
102
103 extern grpc_core::TraceFlag grpc_tcp_trace;
104
105 namespace grpc_core {
106
107 class TcpZerocopySendRecord {
108 public:
TcpZerocopySendRecord()109 TcpZerocopySendRecord() { grpc_slice_buffer_init(&buf_); }
110
~TcpZerocopySendRecord()111 ~TcpZerocopySendRecord() {
112 AssertEmpty();
113 grpc_slice_buffer_destroy(&buf_);
114 }
115
116 // Given the slices that we wish to send, and the current offset into the
117 // slice buffer (indicating which have already been sent), populate an iovec
118 // array that will be used for a zerocopy enabled sendmsg().
119 msg_iovlen_type PopulateIovs(size_t* unwind_slice_idx,
120 size_t* unwind_byte_idx, size_t* sending_length,
121 iovec* iov);
122
123 // A sendmsg() may not be able to send the bytes that we requested at this
124 // time, returning EAGAIN (possibly due to backpressure). In this case,
125 // unwind the offset into the slice buffer so we retry sending these bytes.
UnwindIfThrottled(size_t unwind_slice_idx,size_t unwind_byte_idx)126 void UnwindIfThrottled(size_t unwind_slice_idx, size_t unwind_byte_idx) {
127 out_offset_.byte_idx = unwind_byte_idx;
128 out_offset_.slice_idx = unwind_slice_idx;
129 }
130
131 // Update the offset into the slice buffer based on how much we wanted to sent
132 // vs. what sendmsg() actually sent (which may be lower, possibly due to
133 // backpressure).
134 void UpdateOffsetForBytesSent(size_t sending_length, size_t actually_sent);
135
136 // Indicates whether all underlying data has been sent or not.
AllSlicesSent()137 bool AllSlicesSent() { return out_offset_.slice_idx == buf_.count; }
138
139 // Reset this structure for a new tcp_write() with zerocopy.
PrepareForSends(grpc_slice_buffer * slices_to_send)140 void PrepareForSends(grpc_slice_buffer* slices_to_send) {
141 AssertEmpty();
142 out_offset_.slice_idx = 0;
143 out_offset_.byte_idx = 0;
144 grpc_slice_buffer_swap(slices_to_send, &buf_);
145 Ref();
146 }
147
148 // References: 1 reference per sendmsg(), and 1 for the tcp_write().
Ref()149 void Ref() { ref_.fetch_add(1, std::memory_order_relaxed); }
150
151 // Unref: called when we get an error queue notification for a sendmsg(), if a
152 // sendmsg() failed or when tcp_write() is done.
Unref()153 bool Unref() {
154 const intptr_t prior = ref_.fetch_sub(1, std::memory_order_acq_rel);
155 GPR_DEBUG_ASSERT(prior > 0);
156 if (prior == 1) {
157 AllSendsComplete();
158 return true;
159 }
160 return false;
161 }
162
163 private:
164 struct OutgoingOffset {
165 size_t slice_idx = 0;
166 size_t byte_idx = 0;
167 };
168
AssertEmpty()169 void AssertEmpty() {
170 GPR_DEBUG_ASSERT(buf_.count == 0);
171 GPR_DEBUG_ASSERT(buf_.length == 0);
172 GPR_DEBUG_ASSERT(ref_.load(std::memory_order_relaxed) == 0);
173 }
174
175 // When all sendmsg() calls associated with this tcp_write() have been
176 // completed (ie. we have received the notifications for each sequence number
177 // for each sendmsg()) and all reference counts have been dropped, drop our
178 // reference to the underlying data since we no longer need it.
AllSendsComplete()179 void AllSendsComplete() {
180 GPR_DEBUG_ASSERT(ref_.load(std::memory_order_relaxed) == 0);
181 grpc_slice_buffer_reset_and_unref(&buf_);
182 }
183
184 grpc_slice_buffer buf_;
185 std::atomic<intptr_t> ref_{0};
186 OutgoingOffset out_offset_;
187 };
188
189 class TcpZerocopySendCtx {
190 public:
191 static constexpr int kDefaultMaxSends = 4;
192 static constexpr size_t kDefaultSendBytesThreshold = 16 * 1024; // 16KB
193
TcpZerocopySendCtx(int max_sends=kDefaultMaxSends,size_t send_bytes_threshold=kDefaultSendBytesThreshold)194 explicit TcpZerocopySendCtx(
195 int max_sends = kDefaultMaxSends,
196 size_t send_bytes_threshold = kDefaultSendBytesThreshold)
197 : max_sends_(max_sends),
198 free_send_records_size_(max_sends),
199 threshold_bytes_(send_bytes_threshold) {
200 send_records_ = static_cast<TcpZerocopySendRecord*>(
201 gpr_malloc(max_sends * sizeof(*send_records_)));
202 free_send_records_ = static_cast<TcpZerocopySendRecord**>(
203 gpr_malloc(max_sends * sizeof(*free_send_records_)));
204 if (send_records_ == nullptr || free_send_records_ == nullptr) {
205 gpr_free(send_records_);
206 gpr_free(free_send_records_);
207 gpr_log(GPR_INFO, "Disabling TCP TX zerocopy due to memory pressure.\n");
208 memory_limited_ = true;
209 } else {
210 for (int idx = 0; idx < max_sends_; ++idx) {
211 new (send_records_ + idx) TcpZerocopySendRecord();
212 free_send_records_[idx] = send_records_ + idx;
213 }
214 }
215 }
216
~TcpZerocopySendCtx()217 ~TcpZerocopySendCtx() {
218 if (send_records_ != nullptr) {
219 for (int idx = 0; idx < max_sends_; ++idx) {
220 send_records_[idx].~TcpZerocopySendRecord();
221 }
222 }
223 gpr_free(send_records_);
224 gpr_free(free_send_records_);
225 }
226
227 // True if we were unable to allocate the various bookkeeping structures at
228 // transport initialization time. If memory limited, we do not zerocopy.
memory_limited() const229 bool memory_limited() const { return memory_limited_; }
230
231 // TCP send zerocopy maintains an implicit sequence number for every
232 // successful sendmsg() with zerocopy enabled; the kernel later gives us an
233 // error queue notification with this sequence number indicating that the
234 // underlying data buffers that we sent can now be released. Once that
235 // notification is received, we can release the buffers associated with this
236 // zerocopy send record. Here, we associate the sequence number with the data
237 // buffers that were sent with the corresponding call to sendmsg().
NoteSend(TcpZerocopySendRecord * record)238 void NoteSend(TcpZerocopySendRecord* record) {
239 record->Ref();
240 {
241 MutexLock guard(&lock_);
242 is_in_write_ = true;
243 AssociateSeqWithSendRecordLocked(last_send_, record);
244 }
245 ++last_send_;
246 }
247
248 // If sendmsg() actually failed, though, we need to revert the sequence number
249 // that we speculatively bumped before calling sendmsg(). Note that we bump
250 // this sequence number and perform relevant bookkeeping (see: NoteSend())
251 // *before* calling sendmsg() since, if we called it *after* sendmsg(), then
252 // there is a possible race with the release notification which could occur on
253 // another thread before we do the necessary bookkeeping. Hence, calling
254 // NoteSend() *before* sendmsg() and implementing an undo function is needed.
UndoSend()255 void UndoSend() {
256 --last_send_;
257 if (ReleaseSendRecord(last_send_)->Unref()) {
258 // We should still be holding the ref taken by tcp_write().
259 GPR_DEBUG_ASSERT(0);
260 }
261 }
262
263 // Simply associate this send record (and the underlying sent data buffers)
264 // with the implicit sequence number for this zerocopy sendmsg().
AssociateSeqWithSendRecordLocked(uint32_t seq,TcpZerocopySendRecord * record)265 void AssociateSeqWithSendRecordLocked(uint32_t seq,
266 TcpZerocopySendRecord* record) {
267 ctx_lookup_.emplace(seq, record);
268 }
269
270 // Get a send record for a send that we wish to do with zerocopy.
GetSendRecord()271 TcpZerocopySendRecord* GetSendRecord() {
272 MutexLock guard(&lock_);
273 return TryGetSendRecordLocked();
274 }
275
276 // A given send record corresponds to a single tcp_write() with zerocopy
277 // enabled. This can result in several sendmsg() calls to flush all of the
278 // data to wire. Each sendmsg() takes a reference on the
279 // TcpZerocopySendRecord, and corresponds to a single sequence number.
280 // ReleaseSendRecord releases a reference on TcpZerocopySendRecord for a
281 // single sequence number. This is called either when we receive the relevant
282 // error queue notification (saying that we can discard the underlying
283 // buffers for this sendmsg()) is received from the kernel - or, in case
284 // sendmsg() was unsuccessful to begin with.
ReleaseSendRecord(uint32_t seq)285 TcpZerocopySendRecord* ReleaseSendRecord(uint32_t seq) {
286 MutexLock guard(&lock_);
287 return ReleaseSendRecordLocked(seq);
288 }
289
290 // After all the references to a TcpZerocopySendRecord are released, we can
291 // add it back to the pool (of size max_sends_). Note that we can only have
292 // max_sends_ tcp_write() instances with zerocopy enabled in flight at the
293 // same time.
PutSendRecord(TcpZerocopySendRecord * record)294 void PutSendRecord(TcpZerocopySendRecord* record) {
295 GPR_DEBUG_ASSERT(record >= send_records_ &&
296 record < send_records_ + max_sends_);
297 MutexLock guard(&lock_);
298 PutSendRecordLocked(record);
299 }
300
301 // Indicate that we are disposing of this zerocopy context. This indicator
302 // will prevent new zerocopy writes from being issued.
Shutdown()303 void Shutdown() { shutdown_.store(true, std::memory_order_release); }
304
305 // Indicates that there are no inflight tcp_write() instances with zerocopy
306 // enabled.
AllSendRecordsEmpty()307 bool AllSendRecordsEmpty() {
308 MutexLock guard(&lock_);
309 return free_send_records_size_ == max_sends_;
310 }
311
enabled() const312 bool enabled() const { return enabled_; }
313
set_enabled(bool enabled)314 void set_enabled(bool enabled) {
315 GPR_DEBUG_ASSERT(!enabled || !memory_limited());
316 enabled_ = enabled;
317 }
318
319 // Only use zerocopy if we are sending at least this many bytes. The
320 // additional overhead of reading the error queue for notifications means that
321 // zerocopy is not useful for small transfers.
threshold_bytes() const322 size_t threshold_bytes() const { return threshold_bytes_; }
323
324 // Expected to be called by handler reading messages from the err queue.
325 // It is used to indicate that some OMem meory is now available. It returns
326 // true to tell the caller to mark the file descriptor as immediately
327 // writable.
328 //
329 // If a write is currently in progress on the socket (ie. we have issued a
330 // sendmsg() and are about to check its return value) then we set omem state
331 // to CHECK to make the sending thread know that some tcp_omem was
332 // concurrently freed even if sendmsg() returns ENOBUFS. In this case, since
333 // there is already an active send thread, we do not need to mark the
334 // socket writeable, so we return false.
335 //
336 // If there was no write in progress on the socket, and the socket was not
337 // marked as FULL, then we need not mark the socket writeable now that some
338 // tcp_omem memory is freed since it was not considered as blocked on
339 // tcp_omem to begin with. So in this case, return false.
340 //
341 // But, if a write was not in progress and the omem state was FULL, then we
342 // need to mark the socket writeable since it is no longer blocked by
343 // tcp_omem. In this case, return true.
344 //
345 // Please refer to the STATE TRANSITION DIAGRAM below for more details.
346 //
UpdateZeroCopyOMemStateAfterFree()347 bool UpdateZeroCopyOMemStateAfterFree() {
348 MutexLock guard(&lock_);
349 if (is_in_write_) {
350 zcopy_enobuf_state_ = OMemState::CHECK;
351 return false;
352 }
353 GPR_DEBUG_ASSERT(zcopy_enobuf_state_ != OMemState::CHECK);
354 if (zcopy_enobuf_state_ == OMemState::FULL) {
355 // A previous sendmsg attempt was blocked by ENOBUFS. Return true to
356 // mark the fd as writable so the next write attempt could be made.
357 zcopy_enobuf_state_ = OMemState::OPEN;
358 return true;
359 } else if (zcopy_enobuf_state_ == OMemState::OPEN) {
360 // No need to mark the fd as writable because the previous write
361 // attempt did not encounter ENOBUFS.
362 return false;
363 } else {
364 // This state should never be reached because it implies that the previous
365 // state was CHECK and is_in_write is false. This means that after the
366 // previous sendmsg returned and set is_in_write to false, it did
367 // not update the z-copy change from CHECK to OPEN.
368 Crash("OMem state error!");
369 }
370 }
371
372 // Expected to be called by the thread calling sendmsg after the syscall
373 // invocation. is complete. If an ENOBUF is seen, it checks if the error
374 // handler (Tx0cp completions) has already run and free'ed up some OMem. It
375 // returns true indicating that the write can be attempted again immediately.
376 // If ENOBUFS was seen but no Tx0cp completions have been received between the
377 // sendmsg() and us taking this lock, then tcp_omem is still full from our
378 // point of view. Therefore, we do not signal that the socket is writeable
379 // with respect to the availability of tcp_omem. Therefore the function
380 // returns false. This indicates that another write should not be attempted
381 // immediately and the calling thread should wait until the socket is writable
382 // again. If ENOBUFS was not seen, then again return false because the next
383 // write should be attempted only when the socket is writable again.
384 //
385 // Please refer to the STATE TRANSITION DIAGRAM below for more details.
386 //
UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf)387 bool UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf) {
388 MutexLock guard(&lock_);
389 is_in_write_ = false;
390 if (seen_enobuf) {
391 if (zcopy_enobuf_state_ == OMemState::CHECK) {
392 zcopy_enobuf_state_ = OMemState::OPEN;
393 return true;
394 } else {
395 zcopy_enobuf_state_ = OMemState::FULL;
396 }
397 } else if (zcopy_enobuf_state_ != OMemState::OPEN) {
398 zcopy_enobuf_state_ = OMemState::OPEN;
399 }
400 return false;
401 }
402
403 private:
404 // STATE TRANSITION DIAGRAM
405 //
406 // sendmsg succeeds Tx-zero copy succeeds and there is no active sendmsg
407 // ----<<--+ +------<<-------------------------------------+
408 // | | | |
409 // | | v sendmsg returns ENOBUFS |
410 // +-----> OPEN ------------->>-------------------------> FULL
411 // ^ |
412 // | |
413 // | sendmsg completes |
414 // +----<<---------- CHECK <-------<<-------------+
415 // Tx-zero copy succeeds and there is
416 // an active sendmsg
417 //
418 enum class OMemState : int8_t {
419 OPEN, // Everything is clear and omem is not full.
420 FULL, // The last sendmsg() has returned with an errno of ENOBUFS.
421 CHECK, // Error queue is read while is_in_write_ was true, so we should
422 // check this state after the sendmsg.
423 };
424
ReleaseSendRecordLocked(uint32_t seq)425 TcpZerocopySendRecord* ReleaseSendRecordLocked(uint32_t seq) {
426 auto iter = ctx_lookup_.find(seq);
427 GPR_DEBUG_ASSERT(iter != ctx_lookup_.end());
428 TcpZerocopySendRecord* record = iter->second;
429 ctx_lookup_.erase(iter);
430 return record;
431 }
432
TryGetSendRecordLocked()433 TcpZerocopySendRecord* TryGetSendRecordLocked() {
434 if (shutdown_.load(std::memory_order_acquire)) {
435 return nullptr;
436 }
437 if (free_send_records_size_ == 0) {
438 return nullptr;
439 }
440 free_send_records_size_--;
441 return free_send_records_[free_send_records_size_];
442 }
443
PutSendRecordLocked(TcpZerocopySendRecord * record)444 void PutSendRecordLocked(TcpZerocopySendRecord* record) {
445 GPR_DEBUG_ASSERT(free_send_records_size_ < max_sends_);
446 free_send_records_[free_send_records_size_] = record;
447 free_send_records_size_++;
448 }
449
450 TcpZerocopySendRecord* send_records_;
451 TcpZerocopySendRecord** free_send_records_;
452 int max_sends_;
453 int free_send_records_size_;
454 Mutex lock_;
455 uint32_t last_send_ = 0;
456 std::atomic<bool> shutdown_{false};
457 bool enabled_ = false;
458 size_t threshold_bytes_ = kDefaultSendBytesThreshold;
459 std::unordered_map<uint32_t, TcpZerocopySendRecord*> ctx_lookup_;
460 bool memory_limited_ = false;
461 bool is_in_write_ = false;
462 OMemState zcopy_enobuf_state_;
463 };
464
465 } // namespace grpc_core
466
467 using grpc_core::TcpZerocopySendCtx;
468 using grpc_core::TcpZerocopySendRecord;
469
470 namespace {
471
472 struct grpc_tcp {
grpc_tcp__anon3ba6a3a20111::grpc_tcp473 explicit grpc_tcp(const grpc_core::PosixTcpOptions& tcp_options)
474 : min_read_chunk_size(tcp_options.tcp_min_read_chunk_size),
475 max_read_chunk_size(tcp_options.tcp_max_read_chunk_size),
476 tcp_zerocopy_send_ctx(
477 tcp_options.tcp_tx_zerocopy_max_simultaneous_sends,
478 tcp_options.tcp_tx_zerocopy_send_bytes_threshold) {}
479 grpc_endpoint base;
480 grpc_fd* em_fd;
481 int fd;
482 // Used by the endpoint read function to distinguish the very first read call
483 // from the rest
484 bool is_first_read;
485 bool has_posted_reclaimer ABSL_GUARDED_BY(read_mu) = false;
486 double target_length;
487 double bytes_read_this_round;
488 grpc_core::RefCount refcount;
489 gpr_atm shutdown_count;
490
491 int min_read_chunk_size;
492 int max_read_chunk_size;
493 int set_rcvlowat = 0;
494
495 // garbage after the last read
496 grpc_slice_buffer last_read_buffer;
497
498 grpc_core::Mutex read_mu;
499 grpc_slice_buffer* incoming_buffer ABSL_GUARDED_BY(read_mu) = nullptr;
500 int inq; // bytes pending on the socket from the last read.
501 bool inq_capable; // cache whether kernel supports inq
502
503 grpc_slice_buffer* outgoing_buffer;
504 // byte within outgoing_buffer->slices[0] to write next
505 size_t outgoing_byte_idx;
506
507 grpc_closure* read_cb;
508 grpc_closure* write_cb;
509 grpc_closure* release_fd_cb;
510 int* release_fd;
511
512 grpc_closure read_done_closure;
513 grpc_closure write_done_closure;
514 grpc_closure error_closure;
515
516 std::string peer_string;
517 std::string local_address;
518
519 grpc_core::MemoryOwner memory_owner;
520 grpc_core::MemoryAllocator::Reservation self_reservation;
521
522 grpc_core::TracedBufferList tb_list; // List of traced buffers
523
524 // grpc_endpoint_write takes an argument which if non-null means that the
525 // transport layer wants the TCP layer to collect timestamps for this write.
526 // This arg is forwarded to the timestamps callback function when the ACK
527 // timestamp is received from the kernel. This arg is a (void *) which allows
528 // users of this API to pass in a pointer to any kind of structure. This
529 // structure could actually be a tag or any book-keeping object that the user
530 // can use to distinguish between different traced writes. The only
531 // requirement from the TCP endpoint layer is that this arg should be non-null
532 // if the user wants timestamps for the write.
533 void* outgoing_buffer_arg;
534 // A counter which starts at 0. It is initialized the first time the socket
535 // options for collecting timestamps are set, and is incremented with each
536 // byte sent.
537 int bytes_counter;
538 bool socket_ts_enabled; // True if timestamping options are set on the socket
539 //
540 bool ts_capable; // Cache whether we can set timestamping options
541 gpr_atm stop_error_notification; // Set to 1 if we do not want to be notified
542 // on errors anymore
543 TcpZerocopySendCtx tcp_zerocopy_send_ctx;
544 TcpZerocopySendRecord* current_zerocopy_send = nullptr;
545
546 int min_progress_size; // A hint from upper layers specifying the minimum
547 // number of bytes that need to be read to make
548 // meaningful progress
549 };
550
551 struct backup_poller {
552 gpr_mu* pollset_mu;
553 grpc_closure run_poller;
554 };
555
556 } // namespace
557
558 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp);
559
560 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
561
562 static grpc_core::Mutex* g_backup_poller_mu = nullptr;
563 static int g_uncovered_notifications_pending
564 ABSL_GUARDED_BY(g_backup_poller_mu);
565 static backup_poller* g_backup_poller ABSL_GUARDED_BY(g_backup_poller_mu);
566
567 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
568 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
569 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
570 grpc_error_handle error);
571
done_poller(void * bp,grpc_error_handle)572 static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
573 backup_poller* p = static_cast<backup_poller*>(bp);
574 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
575 gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
576 }
577 grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
578 gpr_free(p);
579 }
580
run_poller(void * bp,grpc_error_handle)581 static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
582 backup_poller* p = static_cast<backup_poller*>(bp);
583 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
584 gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
585 }
586 gpr_mu_lock(p->pollset_mu);
587 grpc_core::Timestamp deadline =
588 grpc_core::Timestamp::Now() + grpc_core::Duration::Seconds(10);
589 GRPC_LOG_IF_ERROR(
590 "backup_poller:pollset_work",
591 grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
592 gpr_mu_unlock(p->pollset_mu);
593 g_backup_poller_mu->Lock();
594 // last "uncovered" notification is the ref that keeps us polling
595 if (g_uncovered_notifications_pending == 1) {
596 GPR_ASSERT(g_backup_poller == p);
597 g_backup_poller = nullptr;
598 g_uncovered_notifications_pending = 0;
599 g_backup_poller_mu->Unlock();
600 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
601 gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
602 }
603 grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
604 GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
605 grpc_schedule_on_exec_ctx));
606 } else {
607 g_backup_poller_mu->Unlock();
608 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
609 gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
610 }
611 grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
612 grpc_core::ExecutorType::DEFAULT,
613 grpc_core::ExecutorJobType::LONG);
614 }
615 }
616
drop_uncovered(grpc_tcp *)617 static void drop_uncovered(grpc_tcp* /*tcp*/) {
618 int old_count;
619 backup_poller* p;
620 g_backup_poller_mu->Lock();
621 p = g_backup_poller;
622 old_count = g_uncovered_notifications_pending--;
623 g_backup_poller_mu->Unlock();
624 GPR_ASSERT(old_count > 1);
625 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
626 gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, old_count,
627 old_count - 1);
628 }
629 }
630
631 // gRPC API considers a Write operation to be done the moment it clears ‘flow
632 // control’ i.e., not necessarily sent on the wire. This means that the
633 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
634 // manner when its `Write()` API is acked.
635 //
636 // We need to ensure that the fd is 'covered' (i.e being monitored by some
637 // polling thread and progress is made) and hence add it to a backup poller here
cover_self(grpc_tcp * tcp)638 static void cover_self(grpc_tcp* tcp) {
639 backup_poller* p;
640 g_backup_poller_mu->Lock();
641 int old_count = 0;
642 if (g_uncovered_notifications_pending == 0) {
643 g_uncovered_notifications_pending = 2;
644 p = static_cast<backup_poller*>(
645 gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
646 g_backup_poller = p;
647 grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
648 g_backup_poller_mu->Unlock();
649 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
650 gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
651 }
652 grpc_core::Executor::Run(
653 GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
654 absl::OkStatus(), grpc_core::ExecutorType::DEFAULT,
655 grpc_core::ExecutorJobType::LONG);
656 } else {
657 old_count = g_uncovered_notifications_pending++;
658 p = g_backup_poller;
659 g_backup_poller_mu->Unlock();
660 }
661 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
662 gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
663 old_count - 1, old_count);
664 }
665 grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
666 }
667
notify_on_read(grpc_tcp * tcp)668 static void notify_on_read(grpc_tcp* tcp) {
669 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
670 gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
671 }
672 grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
673 }
674
notify_on_write(grpc_tcp * tcp)675 static void notify_on_write(grpc_tcp* tcp) {
676 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
677 gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
678 }
679 if (!grpc_event_engine_run_in_background()) {
680 cover_self(tcp);
681 }
682 grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
683 }
684
tcp_drop_uncovered_then_handle_write(void * arg,grpc_error_handle error)685 static void tcp_drop_uncovered_then_handle_write(void* arg,
686 grpc_error_handle error) {
687 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
688 gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
689 grpc_core::StatusToString(error).c_str());
690 }
691 drop_uncovered(static_cast<grpc_tcp*>(arg));
692 tcp_handle_write(arg, error);
693 }
694
add_to_estimate(grpc_tcp * tcp,size_t bytes)695 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
696 tcp->bytes_read_this_round += static_cast<double>(bytes);
697 }
698
finish_estimate(grpc_tcp * tcp)699 static void finish_estimate(grpc_tcp* tcp) {
700 // If we read >80% of the target buffer in one read loop, increase the size
701 // of the target buffer to either the amount read, or twice its previous
702 // value
703 if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
704 tcp->target_length =
705 std::max(2 * tcp->target_length, tcp->bytes_read_this_round);
706 } else {
707 tcp->target_length =
708 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
709 }
710 tcp->bytes_read_this_round = 0;
711 }
712
tcp_annotate_error(grpc_error_handle src_error,grpc_tcp * tcp)713 static grpc_error_handle tcp_annotate_error(grpc_error_handle src_error,
714 grpc_tcp* tcp) {
715 return grpc_error_set_str(
716 grpc_error_set_int(
717 grpc_error_set_int(src_error, grpc_core::StatusIntProperty::kFd,
718 tcp->fd),
719 // All tcp errors are marked with UNAVAILABLE so that application may
720 // choose to retry.
721 grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE),
722 grpc_core::StatusStrProperty::kTargetAddress, tcp->peer_string);
723 }
724
725 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
726 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
727
tcp_shutdown(grpc_endpoint * ep,grpc_error_handle why)728 static void tcp_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
729 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
730 ZerocopyDisableAndWaitForRemaining(tcp);
731 grpc_fd_shutdown(tcp->em_fd, why);
732 }
733
tcp_free(grpc_tcp * tcp)734 static void tcp_free(grpc_tcp* tcp) {
735 grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
736 "tcp_unref_orphan");
737 grpc_slice_buffer_destroy(&tcp->last_read_buffer);
738 tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
739 GRPC_ERROR_CREATE("endpoint destroyed"));
740 tcp->outgoing_buffer_arg = nullptr;
741 delete tcp;
742 }
743
744 #ifndef NDEBUG
745 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
746 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
tcp_unref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)747 static void tcp_unref(grpc_tcp* tcp, const char* reason,
748 const grpc_core::DebugLocation& debug_location) {
749 if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
750 tcp_free(tcp);
751 }
752 }
753
tcp_ref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)754 static void tcp_ref(grpc_tcp* tcp, const char* reason,
755 const grpc_core::DebugLocation& debug_location) {
756 tcp->refcount.Ref(debug_location, reason);
757 }
758 #else
759 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
760 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)761 static void tcp_unref(grpc_tcp* tcp) {
762 if (GPR_UNLIKELY(tcp->refcount.Unref())) {
763 tcp_free(tcp);
764 }
765 }
766
tcp_ref(grpc_tcp * tcp)767 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
768 #endif
769
tcp_destroy(grpc_endpoint * ep)770 static void tcp_destroy(grpc_endpoint* ep) {
771 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
772 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
773 if (grpc_event_engine_can_track_errors()) {
774 ZerocopyDisableAndWaitForRemaining(tcp);
775 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
776 grpc_fd_set_error(tcp->em_fd);
777 }
778 TCP_UNREF(tcp, "destroy");
779 }
780
perform_reclamation(grpc_tcp * tcp)781 static void perform_reclamation(grpc_tcp* tcp)
782 ABSL_LOCKS_EXCLUDED(tcp->read_mu) {
783 if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
784 gpr_log(GPR_INFO, "TCP: benign reclamation to free memory");
785 }
786 tcp->read_mu.Lock();
787 if (tcp->incoming_buffer != nullptr) {
788 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
789 }
790 tcp->has_posted_reclaimer = false;
791 tcp->read_mu.Unlock();
792 }
793
maybe_post_reclaimer(grpc_tcp * tcp)794 static void maybe_post_reclaimer(grpc_tcp* tcp)
795 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
796 if (!tcp->has_posted_reclaimer) {
797 tcp->has_posted_reclaimer = true;
798 tcp->memory_owner.PostReclaimer(
799 grpc_core::ReclamationPass::kBenign,
800 [tcp](absl::optional<grpc_core::ReclamationSweep> sweep) {
801 if (!sweep.has_value()) return;
802 perform_reclamation(tcp);
803 });
804 }
805 }
806
tcp_trace_read(grpc_tcp * tcp,grpc_error_handle error)807 static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
808 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
809 grpc_closure* cb = tcp->read_cb;
810 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
811 gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
812 size_t i;
813 gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp,
814 tcp->peer_string.c_str(), grpc_core::StatusToString(error).c_str());
815 if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
816 for (i = 0; i < tcp->incoming_buffer->count; i++) {
817 char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
818 GPR_DUMP_HEX | GPR_DUMP_ASCII);
819 gpr_log(GPR_DEBUG, "READ DATA: %s", dump);
820 gpr_free(dump);
821 }
822 }
823 }
824 }
825
update_rcvlowat(grpc_tcp * tcp)826 static void update_rcvlowat(grpc_tcp* tcp)
827 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
828 if (!grpc_core::IsTcpRcvLowatEnabled()) return;
829
830 // TODO(ctiller): Check if supported by OS.
831 // TODO(ctiller): Allow some adjustments instead of hardcoding things.
832
833 static constexpr int kRcvLowatMax = 16 * 1024 * 1024;
834 static constexpr int kRcvLowatThreshold = 16 * 1024;
835
836 int remaining = std::min(static_cast<int>(tcp->incoming_buffer->length),
837 tcp->min_progress_size);
838
839 remaining = std::min(remaining, kRcvLowatMax);
840
841 // Setting SO_RCVLOWAT for small quantities does not save on CPU.
842 if (remaining < 2 * kRcvLowatThreshold) {
843 remaining = 0;
844 }
845
846 // Decrement remaining by kRcvLowatThreshold. This would have the effect of
847 // waking up a little early. It would help with latency because some bytes
848 // may arrive while we execute the recvmsg syscall after waking up.
849 if (remaining > 0) {
850 remaining -= kRcvLowatThreshold;
851 }
852
853 // We still do not know the RPC size. Do not set SO_RCVLOWAT.
854 if (tcp->set_rcvlowat <= 1 && remaining <= 1) return;
855
856 // Previous value is still valid. No change needed in SO_RCVLOWAT.
857 if (tcp->set_rcvlowat == remaining) {
858 return;
859 }
860 if (setsockopt(tcp->fd, SOL_SOCKET, SO_RCVLOWAT, &remaining,
861 sizeof(remaining)) != 0) {
862 gpr_log(GPR_ERROR, "%s",
863 absl::StrCat("Cannot set SO_RCVLOWAT on fd=", tcp->fd,
864 " err=", grpc_core::StrError(errno).c_str())
865 .c_str());
866 return;
867 }
868 tcp->set_rcvlowat = remaining;
869 }
870
871 // Returns true if data available to read or error other than EAGAIN.
872 #define MAX_READ_IOVEC 64
tcp_do_read(grpc_tcp * tcp,grpc_error_handle * error)873 static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
874 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
875 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
876 gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
877 }
878 struct msghdr msg;
879 struct iovec iov[MAX_READ_IOVEC];
880 ssize_t read_bytes;
881 size_t total_read_bytes = 0;
882 size_t iov_len =
883 std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
884 #ifdef GRPC_LINUX_ERRQUEUE
885 constexpr size_t cmsg_alloc_space =
886 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
887 #else
888 constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
889 #endif // GRPC_LINUX_ERRQUEUE
890 char cmsgbuf[cmsg_alloc_space];
891 for (size_t i = 0; i < iov_len; i++) {
892 iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
893 iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
894 }
895
896 GPR_ASSERT(tcp->incoming_buffer->length != 0);
897 GPR_DEBUG_ASSERT(tcp->min_progress_size > 0);
898
899 do {
900 // Assume there is something on the queue. If we receive TCP_INQ from
901 // kernel, we will update this value, otherwise, we have to assume there is
902 // always something to read until we get EAGAIN.
903 tcp->inq = 1;
904
905 msg.msg_name = nullptr;
906 msg.msg_namelen = 0;
907 msg.msg_iov = iov;
908 msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
909 if (tcp->inq_capable) {
910 msg.msg_control = cmsgbuf;
911 msg.msg_controllen = sizeof(cmsgbuf);
912 } else {
913 msg.msg_control = nullptr;
914 msg.msg_controllen = 0;
915 }
916 msg.msg_flags = 0;
917
918 grpc_core::global_stats().IncrementTcpReadOffer(
919 tcp->incoming_buffer->length);
920 grpc_core::global_stats().IncrementTcpReadOfferIovSize(
921 tcp->incoming_buffer->count);
922
923 do {
924 grpc_core::global_stats().IncrementSyscallRead();
925 read_bytes = recvmsg(tcp->fd, &msg, 0);
926 } while (read_bytes < 0 && errno == EINTR);
927
928 if (read_bytes < 0 && errno == EAGAIN) {
929 // NB: After calling call_read_cb a parallel call of the read handler may
930 // be running.
931 if (total_read_bytes > 0) {
932 break;
933 }
934 finish_estimate(tcp);
935 tcp->inq = 0;
936 return false;
937 }
938
939 // We have read something in previous reads. We need to deliver those
940 // bytes to the upper layer.
941 if (read_bytes <= 0 && total_read_bytes >= 1) {
942 tcp->inq = 1;
943 break;
944 }
945
946 if (read_bytes <= 0) {
947 // 0 read size ==> end of stream
948 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
949 if (read_bytes == 0) {
950 *error = tcp_annotate_error(absl::InternalError("Socket closed"), tcp);
951 } else {
952 *error =
953 tcp_annotate_error(absl::InternalError(absl::StrCat(
954 "recvmsg:", grpc_core::StrError(errno))),
955 tcp);
956 }
957 return true;
958 }
959
960 grpc_core::global_stats().IncrementTcpReadSize(read_bytes);
961 add_to_estimate(tcp, static_cast<size_t>(read_bytes));
962 GPR_DEBUG_ASSERT((size_t)read_bytes <=
963 tcp->incoming_buffer->length - total_read_bytes);
964
965 #ifdef GRPC_HAVE_TCP_INQ
966 if (tcp->inq_capable) {
967 GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
968 struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
969 for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
970 if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
971 cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
972 tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
973 break;
974 }
975 }
976 }
977 #endif // GRPC_HAVE_TCP_INQ
978
979 total_read_bytes += read_bytes;
980 if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
981 break;
982 }
983
984 // We had a partial read, and still have space to read more data.
985 // So, adjust IOVs and try to read more.
986 size_t remaining = read_bytes;
987 size_t j = 0;
988 for (size_t i = 0; i < iov_len; i++) {
989 if (remaining >= iov[i].iov_len) {
990 remaining -= iov[i].iov_len;
991 continue;
992 }
993 if (remaining > 0) {
994 iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
995 iov[j].iov_len = iov[i].iov_len - remaining;
996 remaining = 0;
997 } else {
998 iov[j].iov_base = iov[i].iov_base;
999 iov[j].iov_len = iov[i].iov_len;
1000 }
1001 ++j;
1002 }
1003 iov_len = j;
1004 } while (true);
1005
1006 if (tcp->inq == 0) {
1007 finish_estimate(tcp);
1008 }
1009
1010 GPR_DEBUG_ASSERT(total_read_bytes > 0);
1011 *error = absl::OkStatus();
1012 if (grpc_core::IsTcpFrameSizeTuningEnabled()) {
1013 // Update min progress size based on the total number of bytes read in
1014 // this round.
1015 tcp->min_progress_size -= total_read_bytes;
1016 if (tcp->min_progress_size > 0) {
1017 // There is still some bytes left to be read before we can signal
1018 // the read as complete. Append the bytes read so far into
1019 // last_read_buffer which serves as a staging buffer. Return false
1020 // to indicate tcp_handle_read needs to be scheduled again.
1021 grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1022 &tcp->last_read_buffer);
1023 return false;
1024 } else {
1025 // The required number of bytes have been read. Append the bytes
1026 // read in this round into last_read_buffer. Then swap last_read_buffer
1027 // and incoming_buffer. Now incoming buffer contains all the bytes
1028 // read since the start of the last tcp_read operation. last_read_buffer
1029 // would contain any spare space left in the incoming buffer. This
1030 // space will be used in the next tcp_read operation.
1031 tcp->min_progress_size = 1;
1032 grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1033 &tcp->last_read_buffer);
1034 grpc_slice_buffer_swap(&tcp->last_read_buffer, tcp->incoming_buffer);
1035 return true;
1036 }
1037 }
1038 if (total_read_bytes < tcp->incoming_buffer->length) {
1039 grpc_slice_buffer_trim_end(tcp->incoming_buffer,
1040 tcp->incoming_buffer->length - total_read_bytes,
1041 &tcp->last_read_buffer);
1042 }
1043 return true;
1044 }
1045
maybe_make_read_slices(grpc_tcp * tcp)1046 static void maybe_make_read_slices(grpc_tcp* tcp)
1047 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
1048 static const int kBigAlloc = 64 * 1024;
1049 static const int kSmallAlloc = 8 * 1024;
1050 if (tcp->incoming_buffer->length <
1051 static_cast<size_t>(tcp->min_progress_size)) {
1052 size_t allocate_length = tcp->min_progress_size;
1053 const size_t target_length = static_cast<size_t>(tcp->target_length);
1054 // If memory pressure is low and we think there will be more than
1055 // min_progress_size bytes to read, allocate a bit more.
1056 const bool low_memory_pressure =
1057 tcp->memory_owner.GetPressureInfo().pressure_control_value < 0.8;
1058 if (low_memory_pressure && target_length > allocate_length) {
1059 allocate_length = target_length;
1060 }
1061 int extra_wanted =
1062 allocate_length - static_cast<int>(tcp->incoming_buffer->length);
1063 if (extra_wanted >=
1064 (low_memory_pressure ? kSmallAlloc * 3 / 2 : kBigAlloc)) {
1065 while (extra_wanted > 0) {
1066 extra_wanted -= kBigAlloc;
1067 grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1068 tcp->memory_owner.MakeSlice(kBigAlloc));
1069 grpc_core::global_stats().IncrementTcpReadAlloc64k();
1070 }
1071 } else {
1072 while (extra_wanted > 0) {
1073 extra_wanted -= kSmallAlloc;
1074 grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1075 tcp->memory_owner.MakeSlice(kSmallAlloc));
1076 grpc_core::global_stats().IncrementTcpReadAlloc8k();
1077 }
1078 }
1079 maybe_post_reclaimer(tcp);
1080 }
1081 }
1082
tcp_handle_read(void * arg,grpc_error_handle error)1083 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
1084 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1085 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1086 gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
1087 grpc_core::StatusToString(error).c_str());
1088 }
1089 tcp->read_mu.Lock();
1090 grpc_error_handle tcp_read_error;
1091 if (GPR_LIKELY(error.ok())) {
1092 maybe_make_read_slices(tcp);
1093 if (!tcp_do_read(tcp, &tcp_read_error)) {
1094 // Maybe update rcv lowat value based on the number of bytes read in this
1095 // round.
1096 update_rcvlowat(tcp);
1097 tcp->read_mu.Unlock();
1098 // We've consumed the edge, request a new one
1099 notify_on_read(tcp);
1100 return;
1101 }
1102 tcp_trace_read(tcp, tcp_read_error);
1103 } else {
1104 tcp_read_error = error;
1105 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
1106 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
1107 }
1108 // Update rcv lowat needs to be called at the end of the current read
1109 // operation to ensure the right SO_RCVLOWAT value is set for the next read.
1110 // Otherwise the next endpoint read operation may get stuck indefinitely
1111 // because the previously set rcv lowat value will persist and the socket may
1112 // erroneously considered to not be ready for read.
1113 update_rcvlowat(tcp);
1114 grpc_closure* cb = tcp->read_cb;
1115 tcp->read_cb = nullptr;
1116 tcp->incoming_buffer = nullptr;
1117 tcp->read_mu.Unlock();
1118 grpc_core::Closure::Run(DEBUG_LOCATION, cb, tcp_read_error);
1119 TCP_UNREF(tcp, "read");
1120 }
1121
tcp_read(grpc_endpoint * ep,grpc_slice_buffer * incoming_buffer,grpc_closure * cb,bool urgent,int min_progress_size)1122 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
1123 grpc_closure* cb, bool urgent, int min_progress_size) {
1124 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1125 GPR_ASSERT(tcp->read_cb == nullptr);
1126 tcp->read_cb = cb;
1127 tcp->read_mu.Lock();
1128 tcp->incoming_buffer = incoming_buffer;
1129 tcp->min_progress_size = grpc_core::IsTcpFrameSizeTuningEnabled()
1130 ? std::max(min_progress_size, 1)
1131 : 1;
1132 grpc_slice_buffer_reset_and_unref(incoming_buffer);
1133 grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
1134 TCP_REF(tcp, "read");
1135 if (tcp->is_first_read) {
1136 tcp->read_mu.Unlock();
1137 // Endpoint read called for the very first time. Register read callback with
1138 // the polling engine
1139 tcp->is_first_read = false;
1140 notify_on_read(tcp);
1141 } else if (!urgent && tcp->inq == 0) {
1142 tcp->read_mu.Unlock();
1143 // Upper layer asked to read more but we know there is no pending data
1144 // to read from previous reads. So, wait for POLLIN.
1145 //
1146 notify_on_read(tcp);
1147 } else {
1148 tcp->read_mu.Unlock();
1149 // Not the first time. We may or may not have more bytes available. In any
1150 // case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
1151 // right thing (i.e calls tcp_do_read() which either reads the available
1152 // bytes or calls notify_on_read() to be notified when new bytes become
1153 // available
1154 grpc_core::Closure::Run(DEBUG_LOCATION, &tcp->read_done_closure,
1155 absl::OkStatus());
1156 }
1157 }
1158
1159 // A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
1160 // of bytes sent.
tcp_send(int fd,const struct msghdr * msg,int * saved_errno,int additional_flags=0)1161 ssize_t tcp_send(int fd, const struct msghdr* msg, int* saved_errno,
1162 int additional_flags = 0) {
1163 ssize_t sent_length;
1164 do {
1165 // TODO(klempner): Cork if this is a partial write
1166 grpc_core::global_stats().IncrementSyscallWrite();
1167 sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
1168 } while (sent_length < 0 && (*saved_errno = errno) == EINTR);
1169 return sent_length;
1170 }
1171
1172 /// This is to be called if outgoing_buffer_arg is not null. On linux platforms,
1173 /// this will call sendmsg with socket options set to collect timestamps inside
1174 /// the kernel. On return, sent_length is set to the return value of the sendmsg
1175 /// call. Returns false if setting the socket options failed. This is not
1176 /// implemented for non-linux platforms currently, and crashes out.
1177 ///
1178 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1179 size_t sending_length,
1180 ssize_t* sent_length, int* saved_errno,
1181 int additional_flags = 0);
1182
1183 /// The callback function to be invoked when we get an error on the socket.
1184 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error_handle error);
1185
1186 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1187 grpc_tcp* tcp, grpc_slice_buffer* buf);
1188
1189 #ifdef GRPC_LINUX_ERRQUEUE
1190 static bool process_errors(grpc_tcp* tcp);
1191
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)1192 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1193 grpc_tcp* tcp, grpc_slice_buffer* buf) {
1194 TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1195 const bool use_zerocopy =
1196 tcp->tcp_zerocopy_send_ctx.enabled() &&
1197 tcp->tcp_zerocopy_send_ctx.threshold_bytes() < buf->length;
1198 if (use_zerocopy) {
1199 zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1200 if (zerocopy_send_record == nullptr) {
1201 process_errors(tcp);
1202 zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1203 }
1204 if (zerocopy_send_record != nullptr) {
1205 zerocopy_send_record->PrepareForSends(buf);
1206 GPR_DEBUG_ASSERT(buf->count == 0);
1207 GPR_DEBUG_ASSERT(buf->length == 0);
1208 tcp->outgoing_byte_idx = 0;
1209 tcp->outgoing_buffer = nullptr;
1210 }
1211 }
1212 return zerocopy_send_record;
1213 }
1214
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)1215 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {
1216 tcp->tcp_zerocopy_send_ctx.Shutdown();
1217 while (!tcp->tcp_zerocopy_send_ctx.AllSendRecordsEmpty()) {
1218 process_errors(tcp);
1219 }
1220 }
1221
tcp_write_with_timestamps(grpc_tcp * tcp,struct msghdr * msg,size_t sending_length,ssize_t * sent_length,int * saved_errno,int additional_flags)1222 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1223 size_t sending_length,
1224 ssize_t* sent_length, int* saved_errno,
1225 int additional_flags) {
1226 if (!tcp->socket_ts_enabled) {
1227 uint32_t opt = grpc_core::kTimestampingSocketOptions;
1228 if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
1229 static_cast<void*>(&opt), sizeof(opt)) != 0) {
1230 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1231 gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
1232 }
1233 return false;
1234 }
1235 tcp->bytes_counter = -1;
1236 tcp->socket_ts_enabled = true;
1237 }
1238 // Set control message to indicate that you want timestamps.
1239 union {
1240 char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
1241 struct cmsghdr align;
1242 } u;
1243 cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
1244 cmsg->cmsg_level = SOL_SOCKET;
1245 cmsg->cmsg_type = SO_TIMESTAMPING;
1246 cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
1247 *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
1248 grpc_core::kTimestampingRecordingOptions;
1249 msg->msg_control = u.cmsg_buf;
1250 msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
1251
1252 // If there was an error on sendmsg the logic in tcp_flush will handle it.
1253 ssize_t length = tcp_send(tcp->fd, msg, saved_errno, additional_flags);
1254 *sent_length = length;
1255 // Only save timestamps if all the bytes were taken by sendmsg.
1256 if (sending_length == static_cast<size_t>(length)) {
1257 tcp->tb_list.AddNewEntry(static_cast<uint32_t>(tcp->bytes_counter + length),
1258 tcp->fd, tcp->outgoing_buffer_arg);
1259 tcp->outgoing_buffer_arg = nullptr;
1260 }
1261 return true;
1262 }
1263
1264 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1265 TcpZerocopySendRecord* record,
1266 uint32_t seq, const char* tag);
1267 // Reads \a cmsg to process zerocopy control messages.
process_zerocopy(grpc_tcp * tcp,struct cmsghdr * cmsg)1268 static void process_zerocopy(grpc_tcp* tcp, struct cmsghdr* cmsg) {
1269 GPR_DEBUG_ASSERT(cmsg);
1270 auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(cmsg));
1271 GPR_DEBUG_ASSERT(serr->ee_errno == 0);
1272 GPR_DEBUG_ASSERT(serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY);
1273 const uint32_t lo = serr->ee_info;
1274 const uint32_t hi = serr->ee_data;
1275 for (uint32_t seq = lo; seq <= hi; ++seq) {
1276 // TODO(arjunroy): It's likely that lo and hi refer to zerocopy sequence
1277 // numbers that are generated by a single call to grpc_endpoint_write; ie.
1278 // we can batch the unref operation. So, check if record is the same for
1279 // both; if so, batch the unref/put.
1280 TcpZerocopySendRecord* record =
1281 tcp->tcp_zerocopy_send_ctx.ReleaseSendRecord(seq);
1282 GPR_DEBUG_ASSERT(record);
1283 UnrefMaybePutZerocopySendRecord(tcp, record, seq, "CALLBACK RCVD");
1284 }
1285 if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterFree()) {
1286 grpc_fd_set_writable(tcp->em_fd);
1287 }
1288 }
1289
1290 // Whether the cmsg received from error queue is of the IPv4 or IPv6 levels.
CmsgIsIpLevel(const cmsghdr & cmsg)1291 static bool CmsgIsIpLevel(const cmsghdr& cmsg) {
1292 return (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR) ||
1293 (cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR);
1294 }
1295
CmsgIsZeroCopy(const cmsghdr & cmsg)1296 static bool CmsgIsZeroCopy(const cmsghdr& cmsg) {
1297 if (!CmsgIsIpLevel(cmsg)) {
1298 return false;
1299 }
1300 auto serr = reinterpret_cast<const sock_extended_err*> CMSG_DATA(&cmsg);
1301 return serr->ee_errno == 0 && serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY;
1302 }
1303
1304 /// Reads \a cmsg to derive timestamps from the control messages. If a valid
1305 /// timestamp is found, the traced buffer list is updated with this timestamp.
1306 /// The caller of this function should be looping on the control messages found
1307 /// in \a msg. \a cmsg should point to the control message that the caller wants
1308 /// processed.
1309 /// On return, a pointer to a control message is returned. On the next
1310 /// iteration, CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg.
process_timestamp(grpc_tcp * tcp,msghdr * msg,struct cmsghdr * cmsg)1311 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
1312 struct cmsghdr* cmsg) {
1313 auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
1314 cmsghdr* opt_stats = nullptr;
1315 if (next_cmsg == nullptr) {
1316 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1317 gpr_log(GPR_ERROR, "Received timestamp without extended error");
1318 }
1319 return cmsg;
1320 }
1321
1322 // Check if next_cmsg is an OPT_STATS msg
1323 if (next_cmsg->cmsg_level == SOL_SOCKET &&
1324 next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
1325 opt_stats = next_cmsg;
1326 next_cmsg = CMSG_NXTHDR(msg, opt_stats);
1327 if (next_cmsg == nullptr) {
1328 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1329 gpr_log(GPR_ERROR, "Received timestamp without extended error");
1330 }
1331 return opt_stats;
1332 }
1333 }
1334
1335 if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
1336 !(next_cmsg->cmsg_type == IP_RECVERR ||
1337 next_cmsg->cmsg_type == IPV6_RECVERR)) {
1338 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1339 gpr_log(GPR_ERROR, "Unexpected control message");
1340 }
1341 return cmsg;
1342 }
1343
1344 auto tss =
1345 reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
1346 auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
1347 if (serr->ee_errno != ENOMSG ||
1348 serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
1349 gpr_log(GPR_ERROR, "Unexpected control message");
1350 return cmsg;
1351 }
1352 tcp->tb_list.ProcessTimestamp(serr, opt_stats, tss);
1353 return next_cmsg;
1354 }
1355
1356 /// For linux platforms, reads the socket's error queue and processes error
1357 /// messages from the queue.
1358 ///
process_errors(grpc_tcp * tcp)1359 static bool process_errors(grpc_tcp* tcp) {
1360 bool processed_err = false;
1361 struct iovec iov;
1362 iov.iov_base = nullptr;
1363 iov.iov_len = 0;
1364 struct msghdr msg;
1365 msg.msg_name = nullptr;
1366 msg.msg_namelen = 0;
1367 msg.msg_iov = &iov;
1368 msg.msg_iovlen = 0;
1369 msg.msg_flags = 0;
1370 // Allocate enough space so we don't need to keep increasing this as size
1371 // of OPT_STATS increase
1372 constexpr size_t cmsg_alloc_space =
1373 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
1374 CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
1375 CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
1376 // Allocate aligned space for cmsgs received along with timestamps
1377 union {
1378 char rbuf[cmsg_alloc_space];
1379 struct cmsghdr align;
1380 } aligned_buf;
1381 msg.msg_control = aligned_buf.rbuf;
1382 int r, saved_errno;
1383 while (true) {
1384 msg.msg_controllen = sizeof(aligned_buf.rbuf);
1385 do {
1386 r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
1387 saved_errno = errno;
1388 } while (r < 0 && saved_errno == EINTR);
1389
1390 if (r == -1 && saved_errno == EAGAIN) {
1391 return processed_err; // No more errors to process
1392 }
1393 if (r == -1) {
1394 return processed_err;
1395 }
1396 if (GPR_UNLIKELY((msg.msg_flags & MSG_CTRUNC) != 0)) {
1397 gpr_log(GPR_ERROR, "Error message was truncated.");
1398 }
1399
1400 if (msg.msg_controllen == 0) {
1401 // There was no control message found. It was probably spurious.
1402 return processed_err;
1403 }
1404 bool seen = false;
1405 for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
1406 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1407 if (CmsgIsZeroCopy(*cmsg)) {
1408 process_zerocopy(tcp, cmsg);
1409 seen = true;
1410 processed_err = true;
1411 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1412 cmsg->cmsg_type == SCM_TIMESTAMPING) {
1413 cmsg = process_timestamp(tcp, &msg, cmsg);
1414 seen = true;
1415 processed_err = true;
1416 } else {
1417 // Got a control message that is not a timestamp or zerocopy. Don't know
1418 // how to handle this.
1419 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1420 gpr_log(GPR_INFO,
1421 "unknown control message cmsg_level:%d cmsg_type:%d",
1422 cmsg->cmsg_level, cmsg->cmsg_type);
1423 }
1424 return processed_err;
1425 }
1426 }
1427 if (!seen) {
1428 return processed_err;
1429 }
1430 }
1431 }
1432
tcp_handle_error(void * arg,grpc_error_handle error)1433 static void tcp_handle_error(void* arg /* grpc_tcp */,
1434 grpc_error_handle error) {
1435 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1436 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1437 gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
1438 grpc_core::StatusToString(error).c_str());
1439 }
1440
1441 if (!error.ok() ||
1442 static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
1443 // We aren't going to register to hear on error anymore, so it is safe to
1444 // unref.
1445 TCP_UNREF(tcp, "error-tracking");
1446 return;
1447 }
1448
1449 // We are still interested in collecting timestamps, so let's try reading
1450 // them.
1451 bool processed = process_errors(tcp);
1452 // This might not a timestamps error. Set the read and write closures to be
1453 // ready.
1454 if (!processed) {
1455 grpc_fd_set_readable(tcp->em_fd);
1456 grpc_fd_set_writable(tcp->em_fd);
1457 }
1458 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1459 }
1460
1461 #else // GRPC_LINUX_ERRQUEUE
tcp_get_send_zerocopy_record(grpc_tcp *,grpc_slice_buffer *)1462 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1463 grpc_tcp* /*tcp*/, grpc_slice_buffer* /*buf*/) {
1464 return nullptr;
1465 }
1466
ZerocopyDisableAndWaitForRemaining(grpc_tcp *)1467 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* /*tcp*/) {}
1468
tcp_write_with_timestamps(grpc_tcp *,struct msghdr *,size_t,ssize_t *,int *,int)1469 static bool tcp_write_with_timestamps(grpc_tcp* /*tcp*/, struct msghdr* /*msg*/,
1470 size_t /*sending_length*/,
1471 ssize_t* /*sent_length*/,
1472 int* /* saved_errno */,
1473 int /*additional_flags*/) {
1474 gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
1475 GPR_ASSERT(0);
1476 return false;
1477 }
1478
tcp_handle_error(void *,grpc_error_handle)1479 static void tcp_handle_error(void* /*arg*/ /* grpc_tcp */,
1480 grpc_error_handle /*error*/) {
1481 gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
1482 GPR_ASSERT(0);
1483 }
1484 #endif // GRPC_LINUX_ERRQUEUE
1485
1486 // If outgoing_buffer_arg is filled, shuts down the list early, so that any
1487 // release operations needed can be performed on the arg
tcp_shutdown_buffer_list(grpc_tcp * tcp)1488 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
1489 if (tcp->outgoing_buffer_arg) {
1490 tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
1491 GRPC_ERROR_CREATE("TracedBuffer list shutdown"));
1492 tcp->outgoing_buffer_arg = nullptr;
1493 }
1494 }
1495
1496 #if defined(IOV_MAX) && IOV_MAX < 260
1497 #define MAX_WRITE_IOVEC IOV_MAX
1498 #else
1499 #define MAX_WRITE_IOVEC 260
1500 #endif
PopulateIovs(size_t * unwind_slice_idx,size_t * unwind_byte_idx,size_t * sending_length,iovec * iov)1501 msg_iovlen_type TcpZerocopySendRecord::PopulateIovs(size_t* unwind_slice_idx,
1502 size_t* unwind_byte_idx,
1503 size_t* sending_length,
1504 iovec* iov) {
1505 msg_iovlen_type iov_size;
1506 *unwind_slice_idx = out_offset_.slice_idx;
1507 *unwind_byte_idx = out_offset_.byte_idx;
1508 for (iov_size = 0;
1509 out_offset_.slice_idx != buf_.count && iov_size != MAX_WRITE_IOVEC;
1510 iov_size++) {
1511 iov[iov_size].iov_base =
1512 GRPC_SLICE_START_PTR(buf_.slices[out_offset_.slice_idx]) +
1513 out_offset_.byte_idx;
1514 iov[iov_size].iov_len =
1515 GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]) -
1516 out_offset_.byte_idx;
1517 *sending_length += iov[iov_size].iov_len;
1518 ++(out_offset_.slice_idx);
1519 out_offset_.byte_idx = 0;
1520 }
1521 GPR_DEBUG_ASSERT(iov_size > 0);
1522 return iov_size;
1523 }
1524
UpdateOffsetForBytesSent(size_t sending_length,size_t actually_sent)1525 void TcpZerocopySendRecord::UpdateOffsetForBytesSent(size_t sending_length,
1526 size_t actually_sent) {
1527 size_t trailing = sending_length - actually_sent;
1528 while (trailing > 0) {
1529 size_t slice_length;
1530 out_offset_.slice_idx--;
1531 slice_length = GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]);
1532 if (slice_length > trailing) {
1533 out_offset_.byte_idx = slice_length - trailing;
1534 break;
1535 } else {
1536 trailing -= slice_length;
1537 }
1538 }
1539 }
1540
1541 // returns true if done, false if pending; if returning true, *error is set
do_tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1542 static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1543 grpc_error_handle* error) {
1544 msg_iovlen_type iov_size;
1545 ssize_t sent_length = 0;
1546 size_t sending_length;
1547 size_t unwind_slice_idx;
1548 size_t unwind_byte_idx;
1549 bool tried_sending_message;
1550 int saved_errno;
1551 msghdr msg;
1552 // iov consumes a large space. Keep it as the last item on the stack to
1553 // improve locality. After all, we expect only the first elements of it being
1554 // populated in most cases.
1555 iovec iov[MAX_WRITE_IOVEC];
1556 while (true) {
1557 sending_length = 0;
1558 iov_size = record->PopulateIovs(&unwind_slice_idx, &unwind_byte_idx,
1559 &sending_length, iov);
1560 msg.msg_name = nullptr;
1561 msg.msg_namelen = 0;
1562 msg.msg_iov = iov;
1563 msg.msg_iovlen = iov_size;
1564 msg.msg_flags = 0;
1565 tried_sending_message = false;
1566 // Before calling sendmsg (with or without timestamps): we
1567 // take a single ref on the zerocopy send record.
1568 tcp->tcp_zerocopy_send_ctx.NoteSend(record);
1569 saved_errno = 0;
1570 if (tcp->outgoing_buffer_arg != nullptr) {
1571 if (!tcp->ts_capable ||
1572 !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1573 &saved_errno, MSG_ZEROCOPY)) {
1574 // We could not set socket options to collect Fathom timestamps.
1575 // Fallback on writing without timestamps.
1576 tcp->ts_capable = false;
1577 tcp_shutdown_buffer_list(tcp);
1578 } else {
1579 tried_sending_message = true;
1580 }
1581 }
1582 if (!tried_sending_message) {
1583 msg.msg_control = nullptr;
1584 msg.msg_controllen = 0;
1585 grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1586 grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1587 sent_length = tcp_send(tcp->fd, &msg, &saved_errno, MSG_ZEROCOPY);
1588 }
1589 if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterSend(
1590 saved_errno == ENOBUFS)) {
1591 grpc_fd_set_writable(tcp->em_fd);
1592 }
1593 if (sent_length < 0) {
1594 // If this particular send failed, drop ref taken earlier in this method.
1595 tcp->tcp_zerocopy_send_ctx.UndoSend();
1596 if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1597 record->UnwindIfThrottled(unwind_slice_idx, unwind_byte_idx);
1598 return false;
1599 } else if (saved_errno == EPIPE) {
1600 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1601 tcp_shutdown_buffer_list(tcp);
1602 return true;
1603 } else {
1604 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1605 tcp_shutdown_buffer_list(tcp);
1606 return true;
1607 }
1608 }
1609 grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1610 tcp->bytes_counter += sent_length;
1611 record->UpdateOffsetForBytesSent(sending_length,
1612 static_cast<size_t>(sent_length));
1613 if (record->AllSlicesSent()) {
1614 *error = absl::OkStatus();
1615 return true;
1616 }
1617 }
1618 }
1619
UnrefMaybePutZerocopySendRecord(grpc_tcp * tcp,TcpZerocopySendRecord * record,uint32_t,const char *)1620 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1621 TcpZerocopySendRecord* record,
1622 uint32_t /*seq*/,
1623 const char* /*tag*/) {
1624 if (record->Unref()) {
1625 tcp->tcp_zerocopy_send_ctx.PutSendRecord(record);
1626 }
1627 }
1628
tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1629 static bool tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1630 grpc_error_handle* error) {
1631 bool done = do_tcp_flush_zerocopy(tcp, record, error);
1632 if (done) {
1633 // Either we encountered an error, or we successfully sent all the bytes.
1634 // In either case, we're done with this record.
1635 UnrefMaybePutZerocopySendRecord(tcp, record, 0, "flush_done");
1636 }
1637 return done;
1638 }
1639
tcp_flush(grpc_tcp * tcp,grpc_error_handle * error)1640 static bool tcp_flush(grpc_tcp* tcp, grpc_error_handle* error) {
1641 struct msghdr msg;
1642 struct iovec iov[MAX_WRITE_IOVEC];
1643 msg_iovlen_type iov_size;
1644 ssize_t sent_length = 0;
1645 size_t sending_length;
1646 size_t trailing;
1647 size_t unwind_slice_idx;
1648 size_t unwind_byte_idx;
1649 int saved_errno;
1650
1651 // We always start at zero, because we eagerly unref and trim the slice
1652 // buffer as we write
1653 size_t outgoing_slice_idx = 0;
1654
1655 while (true) {
1656 sending_length = 0;
1657 unwind_slice_idx = outgoing_slice_idx;
1658 unwind_byte_idx = tcp->outgoing_byte_idx;
1659 for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
1660 iov_size != MAX_WRITE_IOVEC;
1661 iov_size++) {
1662 iov[iov_size].iov_base =
1663 GRPC_SLICE_START_PTR(
1664 tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
1665 tcp->outgoing_byte_idx;
1666 iov[iov_size].iov_len =
1667 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
1668 tcp->outgoing_byte_idx;
1669 sending_length += iov[iov_size].iov_len;
1670 outgoing_slice_idx++;
1671 tcp->outgoing_byte_idx = 0;
1672 }
1673 GPR_ASSERT(iov_size > 0);
1674
1675 msg.msg_name = nullptr;
1676 msg.msg_namelen = 0;
1677 msg.msg_iov = iov;
1678 msg.msg_iovlen = iov_size;
1679 msg.msg_flags = 0;
1680 bool tried_sending_message = false;
1681 saved_errno = 0;
1682 if (tcp->outgoing_buffer_arg != nullptr) {
1683 if (!tcp->ts_capable ||
1684 !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1685 &saved_errno)) {
1686 // We could not set socket options to collect Fathom timestamps.
1687 // Fallback on writing without timestamps.
1688 tcp->ts_capable = false;
1689 tcp_shutdown_buffer_list(tcp);
1690 } else {
1691 tried_sending_message = true;
1692 }
1693 }
1694 if (!tried_sending_message) {
1695 msg.msg_control = nullptr;
1696 msg.msg_controllen = 0;
1697
1698 grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1699 grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1700
1701 sent_length = tcp_send(tcp->fd, &msg, &saved_errno);
1702 }
1703
1704 if (sent_length < 0) {
1705 if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1706 tcp->outgoing_byte_idx = unwind_byte_idx;
1707 // unref all and forget about all slices that have been written to this
1708 // point
1709 for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
1710 grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
1711 }
1712 return false;
1713 } else if (saved_errno == EPIPE) {
1714 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1715 grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1716 tcp_shutdown_buffer_list(tcp);
1717 return true;
1718 } else {
1719 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1720 grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1721 tcp_shutdown_buffer_list(tcp);
1722 return true;
1723 }
1724 }
1725
1726 GPR_ASSERT(tcp->outgoing_byte_idx == 0);
1727 grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1728 tcp->bytes_counter += sent_length;
1729 trailing = sending_length - static_cast<size_t>(sent_length);
1730 while (trailing > 0) {
1731 size_t slice_length;
1732
1733 outgoing_slice_idx--;
1734 slice_length =
1735 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1736 if (slice_length > trailing) {
1737 tcp->outgoing_byte_idx = slice_length - trailing;
1738 break;
1739 } else {
1740 trailing -= slice_length;
1741 }
1742 }
1743 if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1744 *error = absl::OkStatus();
1745 grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1746 return true;
1747 }
1748 }
1749 }
1750
tcp_handle_write(void * arg,grpc_error_handle error)1751 static void tcp_handle_write(void* arg /* grpc_tcp */,
1752 grpc_error_handle error) {
1753 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1754 grpc_closure* cb;
1755
1756 if (!error.ok()) {
1757 cb = tcp->write_cb;
1758 tcp->write_cb = nullptr;
1759 if (tcp->current_zerocopy_send != nullptr) {
1760 UnrefMaybePutZerocopySendRecord(tcp, tcp->current_zerocopy_send, 0,
1761 "handle_write_err");
1762 tcp->current_zerocopy_send = nullptr;
1763 }
1764 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1765 TCP_UNREF(tcp, "write");
1766 return;
1767 }
1768 bool flush_result =
1769 tcp->current_zerocopy_send != nullptr
1770 ? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
1771 : tcp_flush(tcp, &error);
1772 if (!flush_result) {
1773 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1774 gpr_log(GPR_INFO, "write: delayed");
1775 }
1776 notify_on_write(tcp);
1777 // tcp_flush does not populate error if it has returned false.
1778 GPR_DEBUG_ASSERT(error.ok());
1779 } else {
1780 cb = tcp->write_cb;
1781 tcp->write_cb = nullptr;
1782 tcp->current_zerocopy_send = nullptr;
1783 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1784 gpr_log(GPR_INFO, "write: %s", grpc_core::StatusToString(error).c_str());
1785 }
1786 // No need to take a ref on error since tcp_flush provides a ref.
1787 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1788 TCP_UNREF(tcp, "write");
1789 }
1790 }
1791
tcp_write(grpc_endpoint * ep,grpc_slice_buffer * buf,grpc_closure * cb,void * arg,int)1792 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1793 grpc_closure* cb, void* arg, int /*max_frame_size*/) {
1794 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1795 grpc_error_handle error;
1796 TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1797
1798 grpc_core::EventLog::Append("tcp-write-outstanding", buf->length);
1799
1800 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1801 size_t i;
1802
1803 for (i = 0; i < buf->count; i++) {
1804 gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string.c_str());
1805 if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
1806 char* data =
1807 grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1808 gpr_log(GPR_DEBUG, "WRITE DATA: %s", data);
1809 gpr_free(data);
1810 }
1811 }
1812 }
1813
1814 GPR_ASSERT(tcp->write_cb == nullptr);
1815 GPR_DEBUG_ASSERT(tcp->current_zerocopy_send == nullptr);
1816
1817 if (buf->length == 0) {
1818 grpc_core::Closure::Run(
1819 DEBUG_LOCATION, cb,
1820 grpc_fd_is_shutdown(tcp->em_fd)
1821 ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
1822 : absl::OkStatus());
1823 tcp_shutdown_buffer_list(tcp);
1824 return;
1825 }
1826
1827 zerocopy_send_record = tcp_get_send_zerocopy_record(tcp, buf);
1828 if (zerocopy_send_record == nullptr) {
1829 // Either not enough bytes, or couldn't allocate a zerocopy context.
1830 tcp->outgoing_buffer = buf;
1831 tcp->outgoing_byte_idx = 0;
1832 }
1833 tcp->outgoing_buffer_arg = arg;
1834 if (arg) {
1835 GPR_ASSERT(grpc_event_engine_can_track_errors());
1836 }
1837
1838 bool flush_result =
1839 zerocopy_send_record != nullptr
1840 ? tcp_flush_zerocopy(tcp, zerocopy_send_record, &error)
1841 : tcp_flush(tcp, &error);
1842 if (!flush_result) {
1843 TCP_REF(tcp, "write");
1844 tcp->write_cb = cb;
1845 tcp->current_zerocopy_send = zerocopy_send_record;
1846 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1847 gpr_log(GPR_INFO, "write: delayed");
1848 }
1849 notify_on_write(tcp);
1850 } else {
1851 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1852 gpr_log(GPR_INFO, "write: %s", grpc_core::StatusToString(error).c_str());
1853 }
1854 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1855 }
1856 }
1857
tcp_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)1858 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1859 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1860 grpc_pollset_add_fd(pollset, tcp->em_fd);
1861 }
1862
tcp_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1863 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1864 grpc_pollset_set* pollset_set) {
1865 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1866 grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1867 }
1868
tcp_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1869 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1870 grpc_pollset_set* pollset_set) {
1871 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1872 grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1873 }
1874
tcp_get_peer(grpc_endpoint * ep)1875 static absl::string_view tcp_get_peer(grpc_endpoint* ep) {
1876 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1877 return tcp->peer_string;
1878 }
1879
tcp_get_local_address(grpc_endpoint * ep)1880 static absl::string_view tcp_get_local_address(grpc_endpoint* ep) {
1881 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1882 return tcp->local_address;
1883 }
1884
tcp_get_fd(grpc_endpoint * ep)1885 static int tcp_get_fd(grpc_endpoint* ep) {
1886 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1887 return tcp->fd;
1888 }
1889
tcp_can_track_err(grpc_endpoint * ep)1890 static bool tcp_can_track_err(grpc_endpoint* ep) {
1891 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1892 if (!grpc_event_engine_can_track_errors()) {
1893 return false;
1894 }
1895 struct sockaddr addr;
1896 socklen_t len = sizeof(addr);
1897 if (getsockname(tcp->fd, &addr, &len) < 0) {
1898 return false;
1899 }
1900 return addr.sa_family == AF_INET || addr.sa_family == AF_INET6;
1901 }
1902
1903 static const grpc_endpoint_vtable vtable = {tcp_read,
1904 tcp_write,
1905 tcp_add_to_pollset,
1906 tcp_add_to_pollset_set,
1907 tcp_delete_from_pollset_set,
1908 tcp_shutdown,
1909 tcp_destroy,
1910 tcp_get_peer,
1911 tcp_get_local_address,
1912 tcp_get_fd,
1913 tcp_can_track_err};
1914
grpc_tcp_create(grpc_fd * em_fd,const grpc_core::PosixTcpOptions & options,absl::string_view peer_string)1915 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1916 const grpc_core::PosixTcpOptions& options,
1917 absl::string_view peer_string) {
1918 grpc_tcp* tcp = new grpc_tcp(options);
1919 tcp->base.vtable = &vtable;
1920 tcp->peer_string = std::string(peer_string);
1921 tcp->fd = grpc_fd_wrapped_fd(em_fd);
1922 GPR_ASSERT(options.resource_quota != nullptr);
1923 tcp->memory_owner =
1924 options.resource_quota->memory_quota()->CreateMemoryOwner(peer_string);
1925 tcp->self_reservation = tcp->memory_owner.MakeReservation(sizeof(grpc_tcp));
1926 grpc_resolved_address resolved_local_addr;
1927 memset(&resolved_local_addr, 0, sizeof(resolved_local_addr));
1928 resolved_local_addr.len = sizeof(resolved_local_addr.addr);
1929 absl::StatusOr<std::string> addr_uri;
1930 if (getsockname(tcp->fd,
1931 reinterpret_cast<sockaddr*>(resolved_local_addr.addr),
1932 &resolved_local_addr.len) < 0 ||
1933 !(addr_uri = grpc_sockaddr_to_uri(&resolved_local_addr)).ok()) {
1934 tcp->local_address = "";
1935 } else {
1936 tcp->local_address = addr_uri.value();
1937 }
1938 tcp->read_cb = nullptr;
1939 tcp->write_cb = nullptr;
1940 tcp->current_zerocopy_send = nullptr;
1941 tcp->release_fd_cb = nullptr;
1942 tcp->release_fd = nullptr;
1943 tcp->target_length = static_cast<double>(options.tcp_read_chunk_size);
1944 tcp->bytes_read_this_round = 0;
1945 // Will be set to false by the very first endpoint read function
1946 tcp->is_first_read = true;
1947 tcp->bytes_counter = -1;
1948 tcp->socket_ts_enabled = false;
1949 tcp->ts_capable = true;
1950 tcp->outgoing_buffer_arg = nullptr;
1951 tcp->min_progress_size = 1;
1952 if (options.tcp_tx_zero_copy_enabled &&
1953 !tcp->tcp_zerocopy_send_ctx.memory_limited()) {
1954 #ifdef GRPC_LINUX_ERRQUEUE
1955 const int enable = 1;
1956 auto err =
1957 setsockopt(tcp->fd, SOL_SOCKET, SO_ZEROCOPY, &enable, sizeof(enable));
1958 if (err == 0) {
1959 tcp->tcp_zerocopy_send_ctx.set_enabled(true);
1960 } else {
1961 gpr_log(GPR_ERROR, "Failed to set zerocopy options on the socket.");
1962 }
1963 #endif
1964 }
1965 // paired with unref in grpc_tcp_destroy
1966 new (&tcp->refcount) grpc_core::RefCount(
1967 1, GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) ? "tcp" : nullptr);
1968 gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
1969 tcp->em_fd = em_fd;
1970 grpc_slice_buffer_init(&tcp->last_read_buffer);
1971 GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
1972 grpc_schedule_on_exec_ctx);
1973 if (grpc_event_engine_run_in_background()) {
1974 // If there is a polling engine always running in the background, there is
1975 // no need to run the backup poller.
1976 GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
1977 grpc_schedule_on_exec_ctx);
1978 } else {
1979 GRPC_CLOSURE_INIT(&tcp->write_done_closure,
1980 tcp_drop_uncovered_then_handle_write, tcp,
1981 grpc_schedule_on_exec_ctx);
1982 }
1983 // Always assume there is something on the queue to read.
1984 tcp->inq = 1;
1985 #ifdef GRPC_HAVE_TCP_INQ
1986 int one = 1;
1987 if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
1988 tcp->inq_capable = true;
1989 } else {
1990 gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
1991 tcp->inq_capable = false;
1992 }
1993 #else
1994 tcp->inq_capable = false;
1995 #endif // GRPC_HAVE_TCP_INQ
1996 // Start being notified on errors if event engine can track errors.
1997 if (grpc_event_engine_can_track_errors()) {
1998 // Grab a ref to tcp so that we can safely access the tcp struct when
1999 // processing errors. We unref when we no longer want to track errors
2000 // separately.
2001 TCP_REF(tcp, "error-tracking");
2002 gpr_atm_rel_store(&tcp->stop_error_notification, 0);
2003 GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
2004 grpc_schedule_on_exec_ctx);
2005 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
2006 }
2007
2008 return &tcp->base;
2009 }
2010
grpc_tcp_fd(grpc_endpoint * ep)2011 int grpc_tcp_fd(grpc_endpoint* ep) {
2012 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2013 GPR_ASSERT(ep->vtable == &vtable);
2014 return grpc_fd_wrapped_fd(tcp->em_fd);
2015 }
2016
grpc_tcp_destroy_and_release_fd(grpc_endpoint * ep,int * fd,grpc_closure * done)2017 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
2018 grpc_closure* done) {
2019 if (grpc_event_engine::experimental::grpc_is_event_engine_endpoint(ep)) {
2020 return grpc_event_engine::experimental::
2021 grpc_event_engine_endpoint_destroy_and_release_fd(ep, fd, done);
2022 }
2023 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2024 GPR_ASSERT(ep->vtable == &vtable);
2025 tcp->release_fd = fd;
2026 tcp->release_fd_cb = done;
2027 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
2028 if (grpc_event_engine_can_track_errors()) {
2029 // Stop errors notification.
2030 ZerocopyDisableAndWaitForRemaining(tcp);
2031 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
2032 grpc_fd_set_error(tcp->em_fd);
2033 }
2034 TCP_UNREF(tcp, "destroy");
2035 }
2036
grpc_tcp_posix_init()2037 void grpc_tcp_posix_init() { g_backup_poller_mu = new grpc_core::Mutex; }
2038
grpc_tcp_posix_shutdown()2039 void grpc_tcp_posix_shutdown() {
2040 delete g_backup_poller_mu;
2041 g_backup_poller_mu = nullptr;
2042 }
2043
2044 #endif // GRPC_POSIX_SOCKET_TCP
2045