1 //
2 // Copyright 2015-2016 gRPC authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include <grpc/support/port_platform.h>
18
19 #include "src/core/lib/surface/server.h"
20
21 #include <inttypes.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include <algorithm>
26 #include <atomic>
27 #include <initializer_list>
28 #include <list>
29 #include <new>
30 #include <queue>
31 #include <tuple>
32 #include <type_traits>
33 #include <utility>
34 #include <vector>
35
36 #include "absl/cleanup/cleanup.h"
37 #include "absl/status/status.h"
38 #include "absl/types/optional.h"
39 #include "absl/types/variant.h"
40
41 #include <grpc/byte_buffer.h>
42 #include <grpc/grpc.h>
43 #include <grpc/impl/connectivity_state.h>
44 #include <grpc/status.h>
45 #include <grpc/support/log.h>
46 #include <grpc/support/time.h>
47
48 #include "src/core/lib/channel/channel_args.h"
49 #include "src/core/lib/channel/channel_args_preconditioning.h"
50 #include "src/core/lib/channel/channel_trace.h"
51 #include "src/core/lib/channel/channelz.h"
52 #include "src/core/lib/config/core_configuration.h"
53 #include "src/core/lib/experiments/experiments.h"
54 #include "src/core/lib/gpr/useful.h"
55 #include "src/core/lib/gprpp/crash.h"
56 #include "src/core/lib/gprpp/debug_location.h"
57 #include "src/core/lib/gprpp/match.h"
58 #include "src/core/lib/gprpp/mpscq.h"
59 #include "src/core/lib/gprpp/status_helper.h"
60 #include "src/core/lib/iomgr/exec_ctx.h"
61 #include "src/core/lib/iomgr/pollset_set.h"
62 #include "src/core/lib/promise/activity.h"
63 #include "src/core/lib/promise/context.h"
64 #include "src/core/lib/promise/detail/basic_join.h"
65 #include "src/core/lib/promise/detail/basic_seq.h"
66 #include "src/core/lib/promise/map.h"
67 #include "src/core/lib/promise/pipe.h"
68 #include "src/core/lib/promise/poll.h"
69 #include "src/core/lib/promise/promise.h"
70 #include "src/core/lib/promise/try_join.h"
71 #include "src/core/lib/promise/try_seq.h"
72 #include "src/core/lib/slice/slice_buffer.h"
73 #include "src/core/lib/slice/slice_internal.h"
74 #include "src/core/lib/surface/api_trace.h"
75 #include "src/core/lib/surface/call.h"
76 #include "src/core/lib/surface/channel.h"
77 #include "src/core/lib/surface/channel_stack_type.h"
78 #include "src/core/lib/surface/completion_queue.h"
79 #include "src/core/lib/transport/connectivity_state.h"
80 #include "src/core/lib/transport/error_utils.h"
81
82 namespace grpc_core {
83
84 TraceFlag grpc_server_channel_trace(false, "server_channel");
85
86 //
87 // Server::RequestedCall
88 //
89
90 struct Server::RequestedCall {
91 enum class Type { BATCH_CALL, REGISTERED_CALL };
92
RequestedCallgrpc_core::Server::RequestedCall93 RequestedCall(void* tag_arg, grpc_completion_queue* call_cq,
94 grpc_call** call_arg, grpc_metadata_array* initial_md,
95 grpc_call_details* details)
96 : type(Type::BATCH_CALL),
97 tag(tag_arg),
98 cq_bound_to_call(call_cq),
99 call(call_arg),
100 initial_metadata(initial_md) {
101 data.batch.details = details;
102 }
103
RequestedCallgrpc_core::Server::RequestedCall104 RequestedCall(void* tag_arg, grpc_completion_queue* call_cq,
105 grpc_call** call_arg, grpc_metadata_array* initial_md,
106 RegisteredMethod* rm, gpr_timespec* deadline,
107 grpc_byte_buffer** optional_payload)
108 : type(Type::REGISTERED_CALL),
109 tag(tag_arg),
110 cq_bound_to_call(call_cq),
111 call(call_arg),
112 initial_metadata(initial_md) {
113 data.registered.method = rm;
114 data.registered.deadline = deadline;
115 data.registered.optional_payload = optional_payload;
116 }
117
118 MultiProducerSingleConsumerQueue::Node mpscq_node;
119 const Type type;
120 void* const tag;
121 grpc_completion_queue* const cq_bound_to_call;
122 grpc_call** const call;
123 grpc_cq_completion completion;
124 grpc_metadata_array* const initial_metadata;
125 union {
126 struct {
127 grpc_call_details* details;
128 } batch;
129 struct {
130 RegisteredMethod* method;
131 gpr_timespec* deadline;
132 grpc_byte_buffer** optional_payload;
133 } registered;
134 } data;
135 };
136
137 //
138 // Server::RegisteredMethod
139 //
140
141 struct Server::RegisteredMethod {
RegisteredMethodgrpc_core::Server::RegisteredMethod142 RegisteredMethod(
143 const char* method_arg, const char* host_arg,
144 grpc_server_register_method_payload_handling payload_handling_arg,
145 uint32_t flags_arg)
146 : method(method_arg == nullptr ? "" : method_arg),
147 host(host_arg == nullptr ? "" : host_arg),
148 payload_handling(payload_handling_arg),
149 flags(flags_arg) {}
150
151 ~RegisteredMethod() = default;
152
153 const std::string method;
154 const std::string host;
155 const grpc_server_register_method_payload_handling payload_handling;
156 const uint32_t flags;
157 // One request matcher per method.
158 std::unique_ptr<RequestMatcherInterface> matcher;
159 };
160
161 //
162 // Server::RequestMatcherInterface
163 //
164
165 // RPCs that come in from the transport must be matched against RPC requests
166 // from the application. An incoming request from the application can be matched
167 // to an RPC that has already arrived or can be queued up for later use.
168 // Likewise, an RPC coming in from the transport can either be matched to a
169 // request that already arrived from the application or can be queued up for
170 // later use (marked pending). If there is a match, the request's tag is posted
171 // on the request's notification CQ.
172 //
173 // RequestMatcherInterface is the base class to provide this functionality.
174 class Server::RequestMatcherInterface {
175 public:
~RequestMatcherInterface()176 virtual ~RequestMatcherInterface() {}
177
178 // Unref the calls associated with any incoming RPCs in the pending queue (not
179 // yet matched to an application-requested RPC).
180 virtual void ZombifyPending() = 0;
181
182 // Mark all application-requested RPCs failed if they have not been matched to
183 // an incoming RPC. The error parameter indicates why the RPCs are being
184 // failed (always server shutdown in all current implementations).
185 virtual void KillRequests(grpc_error_handle error) = 0;
186
187 // How many request queues are supported by this matcher. This is an abstract
188 // concept that essentially maps to gRPC completion queues.
189 virtual size_t request_queue_count() const = 0;
190
191 // This function is invoked when the application requests a new RPC whose
192 // information is in the call parameter. The request_queue_index marks the
193 // queue onto which to place this RPC, and is typically associated with a gRPC
194 // CQ. If there are pending RPCs waiting to be matched, publish one (match it
195 // and notify the CQ).
196 virtual void RequestCallWithPossiblePublish(size_t request_queue_index,
197 RequestedCall* call) = 0;
198
199 class MatchResult {
200 public:
MatchResult(Server * server,size_t cq_idx,RequestedCall * requested_call)201 MatchResult(Server* server, size_t cq_idx, RequestedCall* requested_call)
202 : server_(server), cq_idx_(cq_idx), requested_call_(requested_call) {}
~MatchResult()203 ~MatchResult() {
204 if (requested_call_ != nullptr) {
205 server_->FailCall(cq_idx_, requested_call_, absl::CancelledError());
206 }
207 }
208
209 MatchResult(const MatchResult&) = delete;
210 MatchResult& operator=(const MatchResult&) = delete;
211
MatchResult(MatchResult && other)212 MatchResult(MatchResult&& other) noexcept
213 : server_(other.server_),
214 cq_idx_(other.cq_idx_),
215 requested_call_(std::exchange(other.requested_call_, nullptr)) {}
216
TakeCall()217 RequestedCall* TakeCall() {
218 return std::exchange(requested_call_, nullptr);
219 }
220
cq() const221 grpc_completion_queue* cq() const { return server_->cqs_[cq_idx_]; }
cq_idx() const222 size_t cq_idx() const { return cq_idx_; }
223
224 private:
225 Server* server_;
226 size_t cq_idx_;
227 RequestedCall* requested_call_;
228 };
229
230 // This function is invoked on an incoming promise based RPC.
231 // The RequestMatcher will try to match it against an application-requested
232 // RPC if possible or will place it in the pending queue otherwise. To enable
233 // some measure of fairness between server CQs, the match is done starting at
234 // the start_request_queue_index parameter in a cyclic order rather than
235 // always starting at 0.
236 virtual ArenaPromise<absl::StatusOr<MatchResult>> MatchRequest(
237 size_t start_request_queue_index) = 0;
238
239 // This function is invoked on an incoming RPC, represented by the calld
240 // object. The RequestMatcher will try to match it against an
241 // application-requested RPC if possible or will place it in the pending queue
242 // otherwise. To enable some measure of fairness between server CQs, the match
243 // is done starting at the start_request_queue_index parameter in a cyclic
244 // order rather than always starting at 0.
245 virtual void MatchOrQueue(size_t start_request_queue_index,
246 CallData* calld) = 0;
247
248 // Returns the server associated with this request matcher
249 virtual Server* server() const = 0;
250 };
251
252 // The RealRequestMatcher is an implementation of RequestMatcherInterface that
253 // actually uses all the features of RequestMatcherInterface: expecting the
254 // application to explicitly request RPCs and then matching those to incoming
255 // RPCs, along with a slow path by which incoming RPCs are put on a locked
256 // pending list if they aren't able to be matched to an application request.
257 class Server::RealRequestMatcher : public RequestMatcherInterface {
258 public:
RealRequestMatcher(Server * server)259 explicit RealRequestMatcher(Server* server)
260 : server_(server), requests_per_cq_(server->cqs_.size()) {}
261
~RealRequestMatcher()262 ~RealRequestMatcher() override {
263 for (LockedMultiProducerSingleConsumerQueue& queue : requests_per_cq_) {
264 GPR_ASSERT(queue.Pop() == nullptr);
265 }
266 }
267
ZombifyPending()268 void ZombifyPending() override {
269 while (!pending_.empty()) {
270 Match(
271 pending_.front(),
272 [](CallData* calld) {
273 calld->SetState(CallData::CallState::ZOMBIED);
274 calld->KillZombie();
275 },
276 [](const std::shared_ptr<ActivityWaiter>& w) {
277 w->Finish(absl::InternalError("Server closed"));
278 });
279 pending_.pop();
280 }
281 }
282
KillRequests(grpc_error_handle error)283 void KillRequests(grpc_error_handle error) override {
284 for (size_t i = 0; i < requests_per_cq_.size(); i++) {
285 RequestedCall* rc;
286 while ((rc = reinterpret_cast<RequestedCall*>(
287 requests_per_cq_[i].Pop())) != nullptr) {
288 server_->FailCall(i, rc, error);
289 }
290 }
291 }
292
request_queue_count() const293 size_t request_queue_count() const override {
294 return requests_per_cq_.size();
295 }
296
RequestCallWithPossiblePublish(size_t request_queue_index,RequestedCall * call)297 void RequestCallWithPossiblePublish(size_t request_queue_index,
298 RequestedCall* call) override {
299 if (requests_per_cq_[request_queue_index].Push(&call->mpscq_node)) {
300 // this was the first queued request: we need to lock and start
301 // matching calls
302 struct NextPendingCall {
303 RequestedCall* rc = nullptr;
304 PendingCall pending;
305 };
306 auto pop_next_pending = [this, request_queue_index] {
307 NextPendingCall pending_call;
308 {
309 MutexLock lock(&server_->mu_call_);
310 if (!pending_.empty()) {
311 pending_call.rc = reinterpret_cast<RequestedCall*>(
312 requests_per_cq_[request_queue_index].Pop());
313 if (pending_call.rc != nullptr) {
314 pending_call.pending = std::move(pending_.front());
315 pending_.pop();
316 }
317 }
318 }
319 return pending_call;
320 };
321 while (true) {
322 NextPendingCall next_pending = pop_next_pending();
323 if (next_pending.rc == nullptr) break;
324 auto mr = MatchResult(server(), request_queue_index, next_pending.rc);
325 Match(
326 next_pending.pending,
327 [&mr](CallData* calld) {
328 if (!calld->MaybeActivate()) {
329 // Zombied Call
330 calld->KillZombie();
331 } else {
332 calld->Publish(mr.cq_idx(), mr.TakeCall());
333 }
334 },
335 [&mr](const std::shared_ptr<ActivityWaiter>& w) {
336 w->Finish(std::move(mr));
337 });
338 }
339 }
340 }
341
MatchOrQueue(size_t start_request_queue_index,CallData * calld)342 void MatchOrQueue(size_t start_request_queue_index,
343 CallData* calld) override {
344 for (size_t i = 0; i < requests_per_cq_.size(); i++) {
345 size_t cq_idx = (start_request_queue_index + i) % requests_per_cq_.size();
346 RequestedCall* rc =
347 reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].TryPop());
348 if (rc != nullptr) {
349 calld->SetState(CallData::CallState::ACTIVATED);
350 calld->Publish(cq_idx, rc);
351 return;
352 }
353 }
354 // No cq to take the request found; queue it on the slow list.
355 // We need to ensure that all the queues are empty. We do this under
356 // the server mu_call_ lock to ensure that if something is added to
357 // an empty request queue, it will block until the call is actually
358 // added to the pending list.
359 RequestedCall* rc = nullptr;
360 size_t cq_idx = 0;
361 size_t loop_count;
362 {
363 MutexLock lock(&server_->mu_call_);
364 for (loop_count = 0; loop_count < requests_per_cq_.size(); loop_count++) {
365 cq_idx =
366 (start_request_queue_index + loop_count) % requests_per_cq_.size();
367 rc = reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].Pop());
368 if (rc != nullptr) {
369 break;
370 }
371 }
372 if (rc == nullptr) {
373 calld->SetState(CallData::CallState::PENDING);
374 pending_.push(calld);
375 return;
376 }
377 }
378 calld->SetState(CallData::CallState::ACTIVATED);
379 calld->Publish(cq_idx, rc);
380 }
381
MatchRequest(size_t start_request_queue_index)382 ArenaPromise<absl::StatusOr<MatchResult>> MatchRequest(
383 size_t start_request_queue_index) override {
384 for (size_t i = 0; i < requests_per_cq_.size(); i++) {
385 size_t cq_idx = (start_request_queue_index + i) % requests_per_cq_.size();
386 RequestedCall* rc =
387 reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].TryPop());
388 if (rc != nullptr) {
389 return Immediate(MatchResult(server(), cq_idx, rc));
390 }
391 }
392 // No cq to take the request found; queue it on the slow list.
393 // We need to ensure that all the queues are empty. We do this under
394 // the server mu_call_ lock to ensure that if something is added to
395 // an empty request queue, it will block until the call is actually
396 // added to the pending list.
397 RequestedCall* rc = nullptr;
398 size_t cq_idx = 0;
399 size_t loop_count;
400 {
401 MutexLock lock(&server_->mu_call_);
402 for (loop_count = 0; loop_count < requests_per_cq_.size(); loop_count++) {
403 cq_idx =
404 (start_request_queue_index + loop_count) % requests_per_cq_.size();
405 rc = reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].Pop());
406 if (rc != nullptr) {
407 break;
408 }
409 }
410 if (rc == nullptr) {
411 auto w = std::make_shared<ActivityWaiter>(
412 Activity::current()->MakeOwningWaker());
413 pending_.push(w);
414 return [w]() -> Poll<absl::StatusOr<MatchResult>> {
415 std::unique_ptr<absl::StatusOr<MatchResult>> r(
416 w->result.exchange(nullptr, std::memory_order_acq_rel));
417 if (r == nullptr) return Pending{};
418 return std::move(*r);
419 };
420 }
421 }
422 return Immediate(MatchResult(server(), cq_idx, rc));
423 }
424
server() const425 Server* server() const final { return server_; }
426
427 private:
428 Server* const server_;
429 struct ActivityWaiter {
ActivityWaitergrpc_core::Server::RealRequestMatcher::ActivityWaiter430 explicit ActivityWaiter(Waker waker) : waker(std::move(waker)) {}
~ActivityWaitergrpc_core::Server::RealRequestMatcher::ActivityWaiter431 ~ActivityWaiter() { delete result.load(std::memory_order_acquire); }
Finishgrpc_core::Server::RealRequestMatcher::ActivityWaiter432 void Finish(absl::StatusOr<MatchResult> r) {
433 result.store(new absl::StatusOr<MatchResult>(std::move(r)),
434 std::memory_order_release);
435 waker.Wakeup();
436 }
437 Waker waker;
438 std::atomic<absl::StatusOr<MatchResult>*> result{nullptr};
439 };
440 using PendingCall = absl::variant<CallData*, std::shared_ptr<ActivityWaiter>>;
441 std::queue<PendingCall> pending_;
442 std::vector<LockedMultiProducerSingleConsumerQueue> requests_per_cq_;
443 };
444
445 // AllocatingRequestMatchers don't allow the application to request an RPC in
446 // advance or queue up any incoming RPC for later match. Instead, MatchOrQueue
447 // will call out to an allocation function passed in at the construction of the
448 // object. These request matchers are designed for the C++ callback API, so they
449 // only support 1 completion queue (passed in at the constructor). They are also
450 // used for the sync API.
451 class Server::AllocatingRequestMatcherBase : public RequestMatcherInterface {
452 public:
AllocatingRequestMatcherBase(Server * server,grpc_completion_queue * cq)453 AllocatingRequestMatcherBase(Server* server, grpc_completion_queue* cq)
454 : server_(server), cq_(cq) {
455 size_t idx;
456 for (idx = 0; idx < server->cqs_.size(); idx++) {
457 if (server->cqs_[idx] == cq) {
458 break;
459 }
460 }
461 GPR_ASSERT(idx < server->cqs_.size());
462 cq_idx_ = idx;
463 }
464
ZombifyPending()465 void ZombifyPending() override {}
466
KillRequests(grpc_error_handle)467 void KillRequests(grpc_error_handle /*error*/) override {}
468
request_queue_count() const469 size_t request_queue_count() const override { return 0; }
470
RequestCallWithPossiblePublish(size_t,RequestedCall *)471 void RequestCallWithPossiblePublish(size_t /*request_queue_index*/,
472 RequestedCall* /*call*/) final {
473 Crash("unreachable");
474 }
475
server() const476 Server* server() const final { return server_; }
477
478 // Supply the completion queue related to this request matcher
cq() const479 grpc_completion_queue* cq() const { return cq_; }
480
481 // Supply the completion queue's index relative to the server.
cq_idx() const482 size_t cq_idx() const { return cq_idx_; }
483
484 private:
485 Server* const server_;
486 grpc_completion_queue* const cq_;
487 size_t cq_idx_;
488 };
489
490 // An allocating request matcher for non-registered methods (used for generic
491 // API and unimplemented RPCs).
492 class Server::AllocatingRequestMatcherBatch
493 : public AllocatingRequestMatcherBase {
494 public:
AllocatingRequestMatcherBatch(Server * server,grpc_completion_queue * cq,std::function<BatchCallAllocation ()> allocator)495 AllocatingRequestMatcherBatch(Server* server, grpc_completion_queue* cq,
496 std::function<BatchCallAllocation()> allocator)
497 : AllocatingRequestMatcherBase(server, cq),
498 allocator_(std::move(allocator)) {}
499
MatchOrQueue(size_t,CallData * calld)500 void MatchOrQueue(size_t /*start_request_queue_index*/,
501 CallData* calld) override {
502 const bool still_running = server()->ShutdownRefOnRequest();
503 auto cleanup_ref =
504 absl::MakeCleanup([this] { server()->ShutdownUnrefOnRequest(); });
505 if (still_running) {
506 BatchCallAllocation call_info = allocator_();
507 GPR_ASSERT(server()->ValidateServerRequest(
508 cq(), static_cast<void*>(call_info.tag), nullptr,
509 nullptr) == GRPC_CALL_OK);
510 RequestedCall* rc = new RequestedCall(
511 static_cast<void*>(call_info.tag), call_info.cq, call_info.call,
512 call_info.initial_metadata, call_info.details);
513 calld->SetState(CallData::CallState::ACTIVATED);
514 calld->Publish(cq_idx(), rc);
515 } else {
516 calld->FailCallCreation();
517 }
518 }
519
MatchRequest(size_t)520 ArenaPromise<absl::StatusOr<MatchResult>> MatchRequest(
521 size_t /*start_request_queue_index*/) override {
522 const bool still_running = server()->ShutdownRefOnRequest();
523 auto cleanup_ref =
524 absl::MakeCleanup([this] { server()->ShutdownUnrefOnRequest(); });
525 if (still_running) {
526 BatchCallAllocation call_info = allocator_();
527 GPR_ASSERT(server()->ValidateServerRequest(
528 cq(), static_cast<void*>(call_info.tag), nullptr,
529 nullptr) == GRPC_CALL_OK);
530 RequestedCall* rc = new RequestedCall(
531 static_cast<void*>(call_info.tag), call_info.cq, call_info.call,
532 call_info.initial_metadata, call_info.details);
533 return Immediate(MatchResult(server(), cq_idx(), rc));
534 } else {
535 return Immediate(absl::InternalError("Server shutdown"));
536 }
537 }
538
539 private:
540 std::function<BatchCallAllocation()> allocator_;
541 };
542
543 // An allocating request matcher for registered methods.
544 class Server::AllocatingRequestMatcherRegistered
545 : public AllocatingRequestMatcherBase {
546 public:
AllocatingRequestMatcherRegistered(Server * server,grpc_completion_queue * cq,RegisteredMethod * rm,std::function<RegisteredCallAllocation ()> allocator)547 AllocatingRequestMatcherRegistered(
548 Server* server, grpc_completion_queue* cq, RegisteredMethod* rm,
549 std::function<RegisteredCallAllocation()> allocator)
550 : AllocatingRequestMatcherBase(server, cq),
551 registered_method_(rm),
552 allocator_(std::move(allocator)) {}
553
MatchOrQueue(size_t,CallData * calld)554 void MatchOrQueue(size_t /*start_request_queue_index*/,
555 CallData* calld) override {
556 auto cleanup_ref =
557 absl::MakeCleanup([this] { server()->ShutdownUnrefOnRequest(); });
558 if (server()->ShutdownRefOnRequest()) {
559 RegisteredCallAllocation call_info = allocator_();
560 GPR_ASSERT(server()->ValidateServerRequest(
561 cq(), call_info.tag, call_info.optional_payload,
562 registered_method_) == GRPC_CALL_OK);
563 RequestedCall* rc =
564 new RequestedCall(call_info.tag, call_info.cq, call_info.call,
565 call_info.initial_metadata, registered_method_,
566 call_info.deadline, call_info.optional_payload);
567 calld->SetState(CallData::CallState::ACTIVATED);
568 calld->Publish(cq_idx(), rc);
569 } else {
570 calld->FailCallCreation();
571 }
572 }
573
MatchRequest(size_t)574 ArenaPromise<absl::StatusOr<MatchResult>> MatchRequest(
575 size_t /*start_request_queue_index*/) override {
576 const bool still_running = server()->ShutdownRefOnRequest();
577 auto cleanup_ref =
578 absl::MakeCleanup([this] { server()->ShutdownUnrefOnRequest(); });
579 if (still_running) {
580 RegisteredCallAllocation call_info = allocator_();
581 GPR_ASSERT(server()->ValidateServerRequest(
582 cq(), call_info.tag, call_info.optional_payload,
583 registered_method_) == GRPC_CALL_OK);
584 RequestedCall* rc =
585 new RequestedCall(call_info.tag, call_info.cq, call_info.call,
586 call_info.initial_metadata, registered_method_,
587 call_info.deadline, call_info.optional_payload);
588 return Immediate(MatchResult(server(), cq_idx(), rc));
589 } else {
590 return Immediate(absl::InternalError("Server shutdown"));
591 }
592 }
593
594 private:
595 RegisteredMethod* const registered_method_;
596 std::function<RegisteredCallAllocation()> allocator_;
597 };
598
599 //
600 // ChannelBroadcaster
601 //
602
603 namespace {
604
605 class ChannelBroadcaster {
606 public:
607 // This can have an empty constructor and destructor since we want to control
608 // when the actual setup and shutdown broadcast take place.
609
610 // Copies over the channels from the locked server.
FillChannelsLocked(std::vector<RefCountedPtr<Channel>> channels)611 void FillChannelsLocked(std::vector<RefCountedPtr<Channel>> channels) {
612 GPR_DEBUG_ASSERT(channels_.empty());
613 channels_ = std::move(channels);
614 }
615
616 // Broadcasts a shutdown on each channel.
BroadcastShutdown(bool send_goaway,grpc_error_handle force_disconnect)617 void BroadcastShutdown(bool send_goaway, grpc_error_handle force_disconnect) {
618 for (const RefCountedPtr<Channel>& channel : channels_) {
619 SendShutdown(channel->c_ptr(), send_goaway, force_disconnect);
620 }
621 channels_.clear(); // just for safety against double broadcast
622 }
623
624 private:
625 struct ShutdownCleanupArgs {
626 grpc_closure closure;
627 grpc_slice slice;
628 };
629
ShutdownCleanup(void * arg,grpc_error_handle)630 static void ShutdownCleanup(void* arg, grpc_error_handle /*error*/) {
631 ShutdownCleanupArgs* a = static_cast<ShutdownCleanupArgs*>(arg);
632 CSliceUnref(a->slice);
633 delete a;
634 }
635
SendShutdown(grpc_channel * channel,bool send_goaway,grpc_error_handle send_disconnect)636 static void SendShutdown(grpc_channel* channel, bool send_goaway,
637 grpc_error_handle send_disconnect) {
638 ShutdownCleanupArgs* sc = new ShutdownCleanupArgs;
639 GRPC_CLOSURE_INIT(&sc->closure, ShutdownCleanup, sc,
640 grpc_schedule_on_exec_ctx);
641 grpc_transport_op* op = grpc_make_transport_op(&sc->closure);
642 grpc_channel_element* elem;
643 op->goaway_error =
644 send_goaway
645 ? grpc_error_set_int(GRPC_ERROR_CREATE("Server shutdown"),
646 StatusIntProperty::kRpcStatus, GRPC_STATUS_OK)
647 : absl::OkStatus();
648 sc->slice = grpc_slice_from_copied_string("Server shutdown");
649 op->disconnect_with_error = send_disconnect;
650 elem =
651 grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
652 elem->filter->start_transport_op(elem, op);
653 }
654
655 std::vector<RefCountedPtr<Channel>> channels_;
656 };
657
658 } // namespace
659
660 //
661 // Server
662 //
663
664 const grpc_channel_filter Server::kServerTopFilter = {
665 Server::CallData::StartTransportStreamOpBatch,
666 Server::ChannelData::MakeCallPromise,
667 grpc_channel_next_op,
668 sizeof(Server::CallData),
669 Server::CallData::InitCallElement,
670 grpc_call_stack_ignore_set_pollset_or_pollset_set,
671 Server::CallData::DestroyCallElement,
672 sizeof(Server::ChannelData),
673 Server::ChannelData::InitChannelElement,
674 grpc_channel_stack_no_post_init,
675 Server::ChannelData::DestroyChannelElement,
676 grpc_channel_next_get_info,
677 "server",
678 };
679
680 namespace {
681
CreateChannelzNode(const ChannelArgs & args)682 RefCountedPtr<channelz::ServerNode> CreateChannelzNode(
683 const ChannelArgs& args) {
684 RefCountedPtr<channelz::ServerNode> channelz_node;
685 if (args.GetBool(GRPC_ARG_ENABLE_CHANNELZ)
686 .value_or(GRPC_ENABLE_CHANNELZ_DEFAULT)) {
687 size_t channel_tracer_max_memory = std::max(
688 0, args.GetInt(GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE)
689 .value_or(GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT));
690 channelz_node =
691 MakeRefCounted<channelz::ServerNode>(channel_tracer_max_memory);
692 channelz_node->AddTraceEvent(
693 channelz::ChannelTrace::Severity::Info,
694 grpc_slice_from_static_string("Server created"));
695 }
696 return channelz_node;
697 }
698
699 } // namespace
700
Server(const ChannelArgs & args)701 Server::Server(const ChannelArgs& args)
702 : channel_args_(args), channelz_node_(CreateChannelzNode(args)) {}
703
~Server()704 Server::~Server() {
705 // Remove the cq pollsets from the config_fetcher.
706 if (started_ && config_fetcher_ != nullptr &&
707 config_fetcher_->interested_parties() != nullptr) {
708 for (grpc_pollset* pollset : pollsets_) {
709 grpc_pollset_set_del_pollset(config_fetcher_->interested_parties(),
710 pollset);
711 }
712 }
713 for (size_t i = 0; i < cqs_.size(); i++) {
714 GRPC_CQ_INTERNAL_UNREF(cqs_[i], "server");
715 }
716 }
717
AddListener(OrphanablePtr<ListenerInterface> listener)718 void Server::AddListener(OrphanablePtr<ListenerInterface> listener) {
719 channelz::ListenSocketNode* listen_socket_node =
720 listener->channelz_listen_socket_node();
721 if (listen_socket_node != nullptr && channelz_node_ != nullptr) {
722 channelz_node_->AddChildListenSocket(listen_socket_node->Ref());
723 }
724 listeners_.emplace_back(std::move(listener));
725 }
726
Start()727 void Server::Start() {
728 started_ = true;
729 for (grpc_completion_queue* cq : cqs_) {
730 if (grpc_cq_can_listen(cq)) {
731 pollsets_.push_back(grpc_cq_pollset(cq));
732 }
733 }
734 if (unregistered_request_matcher_ == nullptr) {
735 unregistered_request_matcher_ = std::make_unique<RealRequestMatcher>(this);
736 }
737 for (std::unique_ptr<RegisteredMethod>& rm : registered_methods_) {
738 if (rm->matcher == nullptr) {
739 rm->matcher = std::make_unique<RealRequestMatcher>(this);
740 }
741 }
742 {
743 MutexLock lock(&mu_global_);
744 starting_ = true;
745 }
746 // Register the interested parties from the config fetcher to the cq pollsets
747 // before starting listeners so that config fetcher is being polled when the
748 // listeners start watch the fetcher.
749 if (config_fetcher_ != nullptr &&
750 config_fetcher_->interested_parties() != nullptr) {
751 for (grpc_pollset* pollset : pollsets_) {
752 grpc_pollset_set_add_pollset(config_fetcher_->interested_parties(),
753 pollset);
754 }
755 }
756 for (auto& listener : listeners_) {
757 listener.listener->Start(this, &pollsets_);
758 }
759 MutexLock lock(&mu_global_);
760 starting_ = false;
761 starting_cv_.Signal();
762 }
763
SetupTransport(grpc_transport * transport,grpc_pollset * accepting_pollset,const ChannelArgs & args,const RefCountedPtr<channelz::SocketNode> & socket_node)764 grpc_error_handle Server::SetupTransport(
765 grpc_transport* transport, grpc_pollset* accepting_pollset,
766 const ChannelArgs& args,
767 const RefCountedPtr<channelz::SocketNode>& socket_node) {
768 // Create channel.
769 absl::StatusOr<RefCountedPtr<Channel>> channel =
770 Channel::Create(nullptr, args, GRPC_SERVER_CHANNEL, transport);
771 if (!channel.ok()) {
772 return absl_status_to_grpc_error(channel.status());
773 }
774 ChannelData* chand = static_cast<ChannelData*>(
775 grpc_channel_stack_element((*channel)->channel_stack(), 0)->channel_data);
776 // Set up CQs.
777 size_t cq_idx;
778 for (cq_idx = 0; cq_idx < cqs_.size(); cq_idx++) {
779 if (grpc_cq_pollset(cqs_[cq_idx]) == accepting_pollset) break;
780 }
781 if (cq_idx == cqs_.size()) {
782 // Completion queue not found. Pick a random one to publish new calls to.
783 cq_idx = static_cast<size_t>(rand()) % cqs_.size();
784 }
785 // Set up channelz node.
786 intptr_t channelz_socket_uuid = 0;
787 if (socket_node != nullptr) {
788 channelz_socket_uuid = socket_node->uuid();
789 channelz_node_->AddChildSocket(socket_node);
790 }
791 // Initialize chand.
792 chand->InitTransport(Ref(), std::move(*channel), cq_idx, transport,
793 channelz_socket_uuid);
794 return absl::OkStatus();
795 }
796
HasOpenConnections()797 bool Server::HasOpenConnections() {
798 MutexLock lock(&mu_global_);
799 return !channels_.empty();
800 }
801
SetRegisteredMethodAllocator(grpc_completion_queue * cq,void * method_tag,std::function<RegisteredCallAllocation ()> allocator)802 void Server::SetRegisteredMethodAllocator(
803 grpc_completion_queue* cq, void* method_tag,
804 std::function<RegisteredCallAllocation()> allocator) {
805 RegisteredMethod* rm = static_cast<RegisteredMethod*>(method_tag);
806 rm->matcher = std::make_unique<AllocatingRequestMatcherRegistered>(
807 this, cq, rm, std::move(allocator));
808 }
809
SetBatchMethodAllocator(grpc_completion_queue * cq,std::function<BatchCallAllocation ()> allocator)810 void Server::SetBatchMethodAllocator(
811 grpc_completion_queue* cq, std::function<BatchCallAllocation()> allocator) {
812 GPR_DEBUG_ASSERT(unregistered_request_matcher_ == nullptr);
813 unregistered_request_matcher_ =
814 std::make_unique<AllocatingRequestMatcherBatch>(this, cq,
815 std::move(allocator));
816 }
817
RegisterCompletionQueue(grpc_completion_queue * cq)818 void Server::RegisterCompletionQueue(grpc_completion_queue* cq) {
819 for (grpc_completion_queue* queue : cqs_) {
820 if (queue == cq) return;
821 }
822 GRPC_CQ_INTERNAL_REF(cq, "server");
823 cqs_.push_back(cq);
824 }
825
826 namespace {
827
streq(const std::string & a,const char * b)828 bool streq(const std::string& a, const char* b) {
829 return (a.empty() && b == nullptr) ||
830 ((b != nullptr) && !strcmp(a.c_str(), b));
831 }
832
833 } // namespace
834
RegisterMethod(const char * method,const char * host,grpc_server_register_method_payload_handling payload_handling,uint32_t flags)835 Server::RegisteredMethod* Server::RegisterMethod(
836 const char* method, const char* host,
837 grpc_server_register_method_payload_handling payload_handling,
838 uint32_t flags) {
839 if (!method) {
840 gpr_log(GPR_ERROR,
841 "grpc_server_register_method method string cannot be NULL");
842 return nullptr;
843 }
844 for (std::unique_ptr<RegisteredMethod>& m : registered_methods_) {
845 if (streq(m->method, method) && streq(m->host, host)) {
846 gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method,
847 host ? host : "*");
848 return nullptr;
849 }
850 }
851 if (flags != 0) {
852 gpr_log(GPR_ERROR, "grpc_server_register_method invalid flags 0x%08x",
853 flags);
854 return nullptr;
855 }
856 registered_methods_.emplace_back(std::make_unique<RegisteredMethod>(
857 method, host, payload_handling, flags));
858 return registered_methods_.back().get();
859 }
860
DoneRequestEvent(void * req,grpc_cq_completion *)861 void Server::DoneRequestEvent(void* req, grpc_cq_completion* /*c*/) {
862 delete static_cast<RequestedCall*>(req);
863 }
864
FailCall(size_t cq_idx,RequestedCall * rc,grpc_error_handle error)865 void Server::FailCall(size_t cq_idx, RequestedCall* rc,
866 grpc_error_handle error) {
867 *rc->call = nullptr;
868 rc->initial_metadata->count = 0;
869 GPR_ASSERT(!error.ok());
870 grpc_cq_end_op(cqs_[cq_idx], rc->tag, error, DoneRequestEvent, rc,
871 &rc->completion);
872 }
873
874 // Before calling MaybeFinishShutdown(), we must hold mu_global_ and not
875 // hold mu_call_.
MaybeFinishShutdown()876 void Server::MaybeFinishShutdown() {
877 if (!ShutdownReady() || shutdown_published_) {
878 return;
879 }
880 {
881 MutexLock lock(&mu_call_);
882 KillPendingWorkLocked(GRPC_ERROR_CREATE("Server Shutdown"));
883 }
884 if (!channels_.empty() || listeners_destroyed_ < listeners_.size()) {
885 if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
886 last_shutdown_message_time_),
887 gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
888 last_shutdown_message_time_ = gpr_now(GPR_CLOCK_REALTIME);
889 gpr_log(GPR_DEBUG,
890 "Waiting for %" PRIuPTR " channels and %" PRIuPTR "/%" PRIuPTR
891 " listeners to be destroyed before shutting down server",
892 channels_.size(), listeners_.size() - listeners_destroyed_,
893 listeners_.size());
894 }
895 return;
896 }
897 shutdown_published_ = true;
898 for (auto& shutdown_tag : shutdown_tags_) {
899 Ref().release();
900 grpc_cq_end_op(shutdown_tag.cq, shutdown_tag.tag, absl::OkStatus(),
901 DoneShutdownEvent, this, &shutdown_tag.completion);
902 }
903 }
904
KillPendingWorkLocked(grpc_error_handle error)905 void Server::KillPendingWorkLocked(grpc_error_handle error) {
906 if (started_) {
907 unregistered_request_matcher_->KillRequests(error);
908 unregistered_request_matcher_->ZombifyPending();
909 for (std::unique_ptr<RegisteredMethod>& rm : registered_methods_) {
910 rm->matcher->KillRequests(error);
911 rm->matcher->ZombifyPending();
912 }
913 }
914 }
915
GetChannelsLocked() const916 std::vector<RefCountedPtr<Channel>> Server::GetChannelsLocked() const {
917 std::vector<RefCountedPtr<Channel>> channels;
918 channels.reserve(channels_.size());
919 for (const ChannelData* chand : channels_) {
920 channels.push_back(chand->channel()->Ref());
921 }
922 return channels;
923 }
924
ListenerDestroyDone(void * arg,grpc_error_handle)925 void Server::ListenerDestroyDone(void* arg, grpc_error_handle /*error*/) {
926 Server* server = static_cast<Server*>(arg);
927 MutexLock lock(&server->mu_global_);
928 server->listeners_destroyed_++;
929 server->MaybeFinishShutdown();
930 }
931
932 namespace {
933
DonePublishedShutdown(void *,grpc_cq_completion * storage)934 void DonePublishedShutdown(void* /*done_arg*/, grpc_cq_completion* storage) {
935 delete storage;
936 }
937
938 } // namespace
939
940 // - Kills all pending requests-for-incoming-RPC-calls (i.e., the requests made
941 // via grpc_server_request_call() and grpc_server_request_registered_call()
942 // will now be cancelled). See KillPendingWorkLocked().
943 //
944 // - Shuts down the listeners (i.e., the server will no longer listen on the
945 // port for new incoming channels).
946 //
947 // - Iterates through all channels on the server and sends shutdown msg (see
948 // ChannelBroadcaster::BroadcastShutdown() for details) to the clients via
949 // the transport layer. The transport layer then guarantees the following:
950 // -- Sends shutdown to the client (e.g., HTTP2 transport sends GOAWAY).
951 // -- If the server has outstanding calls that are in the process, the
952 // connection is NOT closed until the server is done with all those calls.
953 // -- Once there are no more calls in progress, the channel is closed.
ShutdownAndNotify(grpc_completion_queue * cq,void * tag)954 void Server::ShutdownAndNotify(grpc_completion_queue* cq, void* tag) {
955 Notification* await_requests = nullptr;
956 ChannelBroadcaster broadcaster;
957 {
958 // Wait for startup to be finished. Locks mu_global.
959 MutexLock lock(&mu_global_);
960 while (starting_) {
961 starting_cv_.Wait(&mu_global_);
962 }
963 // Stay locked, and gather up some stuff to do.
964 GPR_ASSERT(grpc_cq_begin_op(cq, tag));
965 if (shutdown_published_) {
966 grpc_cq_end_op(cq, tag, absl::OkStatus(), DonePublishedShutdown, nullptr,
967 new grpc_cq_completion);
968 return;
969 }
970 shutdown_tags_.emplace_back(tag, cq);
971 if (ShutdownCalled()) {
972 return;
973 }
974 last_shutdown_message_time_ = gpr_now(GPR_CLOCK_REALTIME);
975 broadcaster.FillChannelsLocked(GetChannelsLocked());
976 // Collect all unregistered then registered calls.
977 {
978 MutexLock lock(&mu_call_);
979 KillPendingWorkLocked(GRPC_ERROR_CREATE("Server Shutdown"));
980 }
981 await_requests = ShutdownUnrefOnShutdownCall();
982 }
983 // We expect no new requests but there can still be requests in-flight.
984 // Wait for them to complete before proceeding.
985 if (await_requests != nullptr) {
986 await_requests->WaitForNotification();
987 }
988 StopListening();
989 broadcaster.BroadcastShutdown(/*send_goaway=*/true, absl::OkStatus());
990 }
991
StopListening()992 void Server::StopListening() {
993 for (auto& listener : listeners_) {
994 if (listener.listener == nullptr) continue;
995 channelz::ListenSocketNode* channelz_listen_socket_node =
996 listener.listener->channelz_listen_socket_node();
997 if (channelz_node_ != nullptr && channelz_listen_socket_node != nullptr) {
998 channelz_node_->RemoveChildListenSocket(
999 channelz_listen_socket_node->uuid());
1000 }
1001 GRPC_CLOSURE_INIT(&listener.destroy_done, ListenerDestroyDone, this,
1002 grpc_schedule_on_exec_ctx);
1003 listener.listener->SetOnDestroyDone(&listener.destroy_done);
1004 listener.listener.reset();
1005 }
1006 }
1007
CancelAllCalls()1008 void Server::CancelAllCalls() {
1009 ChannelBroadcaster broadcaster;
1010 {
1011 MutexLock lock(&mu_global_);
1012 broadcaster.FillChannelsLocked(GetChannelsLocked());
1013 }
1014 broadcaster.BroadcastShutdown(
1015 /*send_goaway=*/false, GRPC_ERROR_CREATE("Cancelling all calls"));
1016 }
1017
SendGoaways()1018 void Server::SendGoaways() {
1019 ChannelBroadcaster broadcaster;
1020 {
1021 MutexLock lock(&mu_global_);
1022 broadcaster.FillChannelsLocked(GetChannelsLocked());
1023 }
1024 broadcaster.BroadcastShutdown(/*send_goaway=*/true, absl::OkStatus());
1025 }
1026
Orphan()1027 void Server::Orphan() {
1028 {
1029 MutexLock lock(&mu_global_);
1030 GPR_ASSERT(ShutdownCalled() || listeners_.empty());
1031 GPR_ASSERT(listeners_destroyed_ == listeners_.size());
1032 }
1033 Unref();
1034 }
1035
ValidateServerRequest(grpc_completion_queue * cq_for_notification,void * tag,grpc_byte_buffer ** optional_payload,RegisteredMethod * rm)1036 grpc_call_error Server::ValidateServerRequest(
1037 grpc_completion_queue* cq_for_notification, void* tag,
1038 grpc_byte_buffer** optional_payload, RegisteredMethod* rm) {
1039 if ((rm == nullptr && optional_payload != nullptr) ||
1040 ((rm != nullptr) && ((optional_payload == nullptr) !=
1041 (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)))) {
1042 return GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
1043 }
1044 if (!grpc_cq_begin_op(cq_for_notification, tag)) {
1045 return GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
1046 }
1047 return GRPC_CALL_OK;
1048 }
1049
ValidateServerRequestAndCq(size_t * cq_idx,grpc_completion_queue * cq_for_notification,void * tag,grpc_byte_buffer ** optional_payload,RegisteredMethod * rm)1050 grpc_call_error Server::ValidateServerRequestAndCq(
1051 size_t* cq_idx, grpc_completion_queue* cq_for_notification, void* tag,
1052 grpc_byte_buffer** optional_payload, RegisteredMethod* rm) {
1053 size_t idx;
1054 for (idx = 0; idx < cqs_.size(); idx++) {
1055 if (cqs_[idx] == cq_for_notification) {
1056 break;
1057 }
1058 }
1059 if (idx == cqs_.size()) {
1060 return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
1061 }
1062 grpc_call_error error =
1063 ValidateServerRequest(cq_for_notification, tag, optional_payload, rm);
1064 if (error != GRPC_CALL_OK) {
1065 return error;
1066 }
1067 *cq_idx = idx;
1068 return GRPC_CALL_OK;
1069 }
1070
QueueRequestedCall(size_t cq_idx,RequestedCall * rc)1071 grpc_call_error Server::QueueRequestedCall(size_t cq_idx, RequestedCall* rc) {
1072 if (ShutdownCalled()) {
1073 FailCall(cq_idx, rc, GRPC_ERROR_CREATE("Server Shutdown"));
1074 return GRPC_CALL_OK;
1075 }
1076 RequestMatcherInterface* rm;
1077 switch (rc->type) {
1078 case RequestedCall::Type::BATCH_CALL:
1079 rm = unregistered_request_matcher_.get();
1080 break;
1081 case RequestedCall::Type::REGISTERED_CALL:
1082 rm = rc->data.registered.method->matcher.get();
1083 break;
1084 }
1085 rm->RequestCallWithPossiblePublish(cq_idx, rc);
1086 return GRPC_CALL_OK;
1087 }
1088
RequestCall(grpc_call ** call,grpc_call_details * details,grpc_metadata_array * request_metadata,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)1089 grpc_call_error Server::RequestCall(grpc_call** call,
1090 grpc_call_details* details,
1091 grpc_metadata_array* request_metadata,
1092 grpc_completion_queue* cq_bound_to_call,
1093 grpc_completion_queue* cq_for_notification,
1094 void* tag) {
1095 size_t cq_idx;
1096 grpc_call_error error = ValidateServerRequestAndCq(
1097 &cq_idx, cq_for_notification, tag, nullptr, nullptr);
1098 if (error != GRPC_CALL_OK) {
1099 return error;
1100 }
1101 RequestedCall* rc =
1102 new RequestedCall(tag, cq_bound_to_call, call, request_metadata, details);
1103 return QueueRequestedCall(cq_idx, rc);
1104 }
1105
RequestRegisteredCall(RegisteredMethod * rm,grpc_call ** call,gpr_timespec * deadline,grpc_metadata_array * request_metadata,grpc_byte_buffer ** optional_payload,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag_new)1106 grpc_call_error Server::RequestRegisteredCall(
1107 RegisteredMethod* rm, grpc_call** call, gpr_timespec* deadline,
1108 grpc_metadata_array* request_metadata, grpc_byte_buffer** optional_payload,
1109 grpc_completion_queue* cq_bound_to_call,
1110 grpc_completion_queue* cq_for_notification, void* tag_new) {
1111 size_t cq_idx;
1112 grpc_call_error error = ValidateServerRequestAndCq(
1113 &cq_idx, cq_for_notification, tag_new, optional_payload, rm);
1114 if (error != GRPC_CALL_OK) {
1115 return error;
1116 }
1117 RequestedCall* rc =
1118 new RequestedCall(tag_new, cq_bound_to_call, call, request_metadata, rm,
1119 deadline, optional_payload);
1120 return QueueRequestedCall(cq_idx, rc);
1121 }
1122
1123 //
1124 // Server::ChannelData::ConnectivityWatcher
1125 //
1126
1127 class Server::ChannelData::ConnectivityWatcher
1128 : public AsyncConnectivityStateWatcherInterface {
1129 public:
ConnectivityWatcher(ChannelData * chand)1130 explicit ConnectivityWatcher(ChannelData* chand)
1131 : chand_(chand), channel_(chand_->channel_->Ref()) {}
1132
1133 private:
OnConnectivityStateChange(grpc_connectivity_state new_state,const absl::Status &)1134 void OnConnectivityStateChange(grpc_connectivity_state new_state,
1135 const absl::Status& /*status*/) override {
1136 // Don't do anything until we are being shut down.
1137 if (new_state != GRPC_CHANNEL_SHUTDOWN) return;
1138 // Shut down channel.
1139 MutexLock lock(&chand_->server_->mu_global_);
1140 chand_->Destroy();
1141 }
1142
1143 ChannelData* const chand_;
1144 const RefCountedPtr<Channel> channel_;
1145 };
1146
1147 //
1148 // Server::ChannelData
1149 //
1150
~ChannelData()1151 Server::ChannelData::~ChannelData() {
1152 registered_methods_.reset();
1153 if (server_ != nullptr) {
1154 if (server_->channelz_node_ != nullptr && channelz_socket_uuid_ != 0) {
1155 server_->channelz_node_->RemoveChildSocket(channelz_socket_uuid_);
1156 }
1157 {
1158 MutexLock lock(&server_->mu_global_);
1159 if (list_position_.has_value()) {
1160 server_->channels_.erase(*list_position_);
1161 list_position_.reset();
1162 }
1163 server_->MaybeFinishShutdown();
1164 }
1165 }
1166 }
1167
InitTransport(RefCountedPtr<Server> server,RefCountedPtr<Channel> channel,size_t cq_idx,grpc_transport * transport,intptr_t channelz_socket_uuid)1168 void Server::ChannelData::InitTransport(RefCountedPtr<Server> server,
1169 RefCountedPtr<Channel> channel,
1170 size_t cq_idx,
1171 grpc_transport* transport,
1172 intptr_t channelz_socket_uuid) {
1173 server_ = std::move(server);
1174 channel_ = channel;
1175 cq_idx_ = cq_idx;
1176 channelz_socket_uuid_ = channelz_socket_uuid;
1177 // Build a lookup table phrased in terms of mdstr's in this channels context
1178 // to quickly find registered methods.
1179 size_t num_registered_methods = server_->registered_methods_.size();
1180 if (num_registered_methods > 0) {
1181 uint32_t max_probes = 0;
1182 size_t slots = 2 * num_registered_methods;
1183 registered_methods_ =
1184 std::make_unique<std::vector<ChannelRegisteredMethod>>(slots);
1185 for (std::unique_ptr<RegisteredMethod>& rm : server_->registered_methods_) {
1186 Slice host;
1187 Slice method = Slice::FromExternalString(rm->method);
1188 const bool has_host = !rm->host.empty();
1189 if (has_host) {
1190 host = Slice::FromExternalString(rm->host.c_str());
1191 }
1192 uint32_t hash = MixHash32(has_host ? host.Hash() : 0, method.Hash());
1193 uint32_t probes = 0;
1194 for (probes = 0; (*registered_methods_)[(hash + probes) % slots]
1195 .server_registered_method != nullptr;
1196 probes++) {
1197 }
1198 if (probes > max_probes) max_probes = probes;
1199 ChannelRegisteredMethod* crm =
1200 &(*registered_methods_)[(hash + probes) % slots];
1201 crm->server_registered_method = rm.get();
1202 crm->flags = rm->flags;
1203 crm->has_host = has_host;
1204 if (has_host) {
1205 crm->host = std::move(host);
1206 }
1207 crm->method = std::move(method);
1208 }
1209 GPR_ASSERT(slots <= UINT32_MAX);
1210 registered_method_max_probes_ = max_probes;
1211 }
1212 // Publish channel.
1213 {
1214 MutexLock lock(&server_->mu_global_);
1215 server_->channels_.push_front(this);
1216 list_position_ = server_->channels_.begin();
1217 }
1218 // Start accept_stream transport op.
1219 grpc_transport_op* op = grpc_make_transport_op(nullptr);
1220 op->set_accept_stream = true;
1221 op->set_accept_stream_fn = AcceptStream;
1222 op->set_accept_stream_user_data = this;
1223 op->start_connectivity_watch = MakeOrphanable<ConnectivityWatcher>(this);
1224 if (server_->ShutdownCalled()) {
1225 op->disconnect_with_error = GRPC_ERROR_CREATE("Server shutdown");
1226 }
1227 grpc_transport_perform_op(transport, op);
1228 }
1229
GetRegisteredMethod(const grpc_slice & host,const grpc_slice & path)1230 Server::ChannelRegisteredMethod* Server::ChannelData::GetRegisteredMethod(
1231 const grpc_slice& host, const grpc_slice& path) {
1232 if (registered_methods_ == nullptr) return nullptr;
1233 // TODO(ctiller): unify these two searches
1234 // check for an exact match with host
1235 uint32_t hash = MixHash32(grpc_slice_hash(host), grpc_slice_hash(path));
1236 for (size_t i = 0; i <= registered_method_max_probes_; i++) {
1237 ChannelRegisteredMethod* rm =
1238 &(*registered_methods_)[(hash + i) % registered_methods_->size()];
1239 if (rm->server_registered_method == nullptr) break;
1240 if (!rm->has_host) continue;
1241 if (rm->host != host) continue;
1242 if (rm->method != path) continue;
1243 return rm;
1244 }
1245 // check for a wildcard method definition (no host set)
1246 hash = MixHash32(0, grpc_slice_hash(path));
1247 for (size_t i = 0; i <= registered_method_max_probes_; i++) {
1248 ChannelRegisteredMethod* rm =
1249 &(*registered_methods_)[(hash + i) % registered_methods_->size()];
1250 if (rm->server_registered_method == nullptr) break;
1251 if (rm->has_host) continue;
1252 if (rm->method != path) continue;
1253 return rm;
1254 }
1255 return nullptr;
1256 }
1257
AcceptStream(void * arg,grpc_transport *,const void * transport_server_data)1258 void Server::ChannelData::AcceptStream(void* arg, grpc_transport* /*transport*/,
1259 const void* transport_server_data) {
1260 auto* chand = static_cast<Server::ChannelData*>(arg);
1261 // create a call
1262 grpc_call_create_args args;
1263 args.channel = chand->channel_;
1264 args.server = chand->server_.get();
1265 args.parent = nullptr;
1266 args.propagation_mask = 0;
1267 args.cq = nullptr;
1268 args.pollset_set_alternative = nullptr;
1269 args.server_transport_data = transport_server_data;
1270 args.send_deadline = Timestamp::InfFuture();
1271 grpc_call* call;
1272 grpc_error_handle error = grpc_call_create(&args, &call);
1273 grpc_call_stack* call_stack = grpc_call_get_call_stack(call);
1274 if (call_stack == nullptr) { // Promise based calls do not have a call stack
1275 GPR_ASSERT(error.ok());
1276 GPR_ASSERT(IsPromiseBasedServerCallEnabled());
1277 return;
1278 } else {
1279 grpc_call_element* elem = grpc_call_stack_element(call_stack, 0);
1280 auto* calld = static_cast<Server::CallData*>(elem->call_data);
1281 if (!error.ok()) {
1282 calld->FailCallCreation();
1283 return;
1284 }
1285 calld->Start(elem);
1286 }
1287 }
1288
MakeCallPromise(grpc_channel_element * elem,CallArgs call_args,NextPromiseFactory)1289 ArenaPromise<ServerMetadataHandle> Server::ChannelData::MakeCallPromise(
1290 grpc_channel_element* elem, CallArgs call_args, NextPromiseFactory) {
1291 auto* chand = static_cast<Server::ChannelData*>(elem->channel_data);
1292 auto* server = chand->server_.get();
1293 if (server->ShutdownCalled()) {
1294 return [] {
1295 return ServerMetadataFromStatus(absl::InternalError("Server shutdown"));
1296 };
1297 }
1298 absl::optional<Slice> path =
1299 call_args.client_initial_metadata->Take(HttpPathMetadata());
1300 if (!path.has_value()) {
1301 return [] {
1302 return ServerMetadataFromStatus(
1303 absl::InternalError("Missing :path header"));
1304 };
1305 }
1306 auto host_ptr =
1307 call_args.client_initial_metadata->get_pointer(HttpAuthorityMetadata());
1308 if (host_ptr == nullptr) {
1309 return [] {
1310 return ServerMetadataFromStatus(
1311 absl::InternalError("Missing :authority header"));
1312 };
1313 }
1314 Timestamp deadline = GetContext<CallContext>()->deadline();
1315 // Find request matcher.
1316 RequestMatcherInterface* matcher;
1317 ChannelRegisteredMethod* rm =
1318 chand->GetRegisteredMethod(host_ptr->c_slice(), path->c_slice());
1319 ArenaPromise<absl::StatusOr<NextResult<MessageHandle>>>
1320 maybe_read_first_message([] { return NextResult<MessageHandle>(); });
1321 if (rm != nullptr) {
1322 matcher = rm->server_registered_method->matcher.get();
1323 switch (rm->server_registered_method->payload_handling) {
1324 case GRPC_SRM_PAYLOAD_NONE:
1325 break;
1326 case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER:
1327 maybe_read_first_message =
1328 Map(call_args.client_to_server_messages->Next(),
1329 [](NextResult<MessageHandle> msg)
1330 -> absl::StatusOr<NextResult<MessageHandle>> {
1331 return std::move(msg);
1332 });
1333 }
1334 } else {
1335 matcher = server->unregistered_request_matcher_.get();
1336 }
1337 return TrySeq(
1338 TryJoin(matcher->MatchRequest(chand->cq_idx()),
1339 std::move(maybe_read_first_message)),
1340 [path = std::move(*path), host_ptr, deadline,
1341 call_args = std::move(call_args)](
1342 std::tuple<RequestMatcherInterface::MatchResult,
1343 NextResult<MessageHandle>>
1344 match_result_and_payload) mutable {
1345 auto& mr = std::get<0>(match_result_and_payload);
1346 auto& payload = std::get<1>(match_result_and_payload);
1347 auto* rc = mr.TakeCall();
1348 auto* cq_for_new_request = mr.cq();
1349 switch (rc->type) {
1350 case RequestedCall::Type::BATCH_CALL:
1351 GPR_ASSERT(!payload.has_value());
1352 rc->data.batch.details->host = CSliceRef(host_ptr->c_slice());
1353 rc->data.batch.details->method = CSliceRef(path.c_slice());
1354 rc->data.batch.details->deadline =
1355 deadline.as_timespec(GPR_CLOCK_MONOTONIC);
1356 break;
1357 case RequestedCall::Type::REGISTERED_CALL:
1358 *rc->data.registered.deadline =
1359 deadline.as_timespec(GPR_CLOCK_MONOTONIC);
1360 if (rc->data.registered.optional_payload != nullptr) {
1361 if (payload.has_value()) {
1362 auto* sb = payload.value()->payload()->c_slice_buffer();
1363 *rc->data.registered.optional_payload =
1364 grpc_raw_byte_buffer_create(sb->slices, sb->count);
1365 } else {
1366 *rc->data.registered.optional_payload = nullptr;
1367 }
1368 }
1369 break;
1370 default:
1371 GPR_UNREACHABLE_CODE(abort());
1372 }
1373 return GetContext<CallContext>()
1374 ->server_call_context()
1375 ->MakeTopOfServerCallPromise(
1376 std::move(call_args), rc->cq_bound_to_call,
1377 rc->initial_metadata,
1378 [rc, cq_for_new_request](grpc_call* call) {
1379 *rc->call = call;
1380 grpc_cq_end_op(cq_for_new_request, rc->tag, absl::OkStatus(),
1381 Server::DoneRequestEvent, rc, &rc->completion,
1382 true);
1383 });
1384 });
1385 }
1386
FinishDestroy(void * arg,grpc_error_handle)1387 void Server::ChannelData::FinishDestroy(void* arg,
1388 grpc_error_handle /*error*/) {
1389 auto* chand = static_cast<Server::ChannelData*>(arg);
1390 Server* server = chand->server_.get();
1391 auto* channel_stack = chand->channel_->channel_stack();
1392 chand->channel_.reset();
1393 server->Unref();
1394 GRPC_CHANNEL_STACK_UNREF(channel_stack, "Server::ChannelData::Destroy");
1395 }
1396
Destroy()1397 void Server::ChannelData::Destroy() {
1398 if (!list_position_.has_value()) return;
1399 GPR_ASSERT(server_ != nullptr);
1400 server_->channels_.erase(*list_position_);
1401 list_position_.reset();
1402 server_->Ref().release();
1403 server_->MaybeFinishShutdown();
1404 // Unreffed by FinishDestroy
1405 GRPC_CHANNEL_STACK_REF(channel_->channel_stack(),
1406 "Server::ChannelData::Destroy");
1407 GRPC_CLOSURE_INIT(&finish_destroy_channel_closure_, FinishDestroy, this,
1408 grpc_schedule_on_exec_ctx);
1409 if (GRPC_TRACE_FLAG_ENABLED(grpc_server_channel_trace)) {
1410 gpr_log(GPR_INFO, "Disconnected client");
1411 }
1412 grpc_transport_op* op =
1413 grpc_make_transport_op(&finish_destroy_channel_closure_);
1414 op->set_accept_stream = true;
1415 grpc_channel_next_op(grpc_channel_stack_element(channel_->channel_stack(), 0),
1416 op);
1417 }
1418
InitChannelElement(grpc_channel_element * elem,grpc_channel_element_args * args)1419 grpc_error_handle Server::ChannelData::InitChannelElement(
1420 grpc_channel_element* elem, grpc_channel_element_args* args) {
1421 GPR_ASSERT(args->is_first);
1422 GPR_ASSERT(!args->is_last);
1423 new (elem->channel_data) ChannelData();
1424 return absl::OkStatus();
1425 }
1426
DestroyChannelElement(grpc_channel_element * elem)1427 void Server::ChannelData::DestroyChannelElement(grpc_channel_element* elem) {
1428 auto* chand = static_cast<ChannelData*>(elem->channel_data);
1429 chand->~ChannelData();
1430 }
1431
1432 //
1433 // Server::CallData
1434 //
1435
CallData(grpc_call_element * elem,const grpc_call_element_args & args,RefCountedPtr<Server> server)1436 Server::CallData::CallData(grpc_call_element* elem,
1437 const grpc_call_element_args& args,
1438 RefCountedPtr<Server> server)
1439 : server_(std::move(server)),
1440 call_(grpc_call_from_top_element(elem)),
1441 call_combiner_(args.call_combiner) {
1442 GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
1443 elem, grpc_schedule_on_exec_ctx);
1444 GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
1445 elem, grpc_schedule_on_exec_ctx);
1446 }
1447
~CallData()1448 Server::CallData::~CallData() {
1449 GPR_ASSERT(state_.load(std::memory_order_relaxed) != CallState::PENDING);
1450 grpc_metadata_array_destroy(&initial_metadata_);
1451 grpc_byte_buffer_destroy(payload_);
1452 }
1453
SetState(CallState state)1454 void Server::CallData::SetState(CallState state) {
1455 state_.store(state, std::memory_order_relaxed);
1456 }
1457
MaybeActivate()1458 bool Server::CallData::MaybeActivate() {
1459 CallState expected = CallState::PENDING;
1460 return state_.compare_exchange_strong(expected, CallState::ACTIVATED,
1461 std::memory_order_acq_rel,
1462 std::memory_order_relaxed);
1463 }
1464
FailCallCreation()1465 void Server::CallData::FailCallCreation() {
1466 CallState expected_not_started = CallState::NOT_STARTED;
1467 CallState expected_pending = CallState::PENDING;
1468 if (state_.compare_exchange_strong(expected_not_started, CallState::ZOMBIED,
1469 std::memory_order_acq_rel,
1470 std::memory_order_acquire)) {
1471 KillZombie();
1472 } else if (state_.compare_exchange_strong(
1473 expected_pending, CallState::ZOMBIED,
1474 std::memory_order_acq_rel, std::memory_order_relaxed)) {
1475 // Zombied call will be destroyed when it's removed from the pending
1476 // queue... later.
1477 }
1478 }
1479
Start(grpc_call_element * elem)1480 void Server::CallData::Start(grpc_call_element* elem) {
1481 grpc_op op;
1482 op.op = GRPC_OP_RECV_INITIAL_METADATA;
1483 op.flags = 0;
1484 op.reserved = nullptr;
1485 op.data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_;
1486 GRPC_CLOSURE_INIT(&recv_initial_metadata_batch_complete_,
1487 RecvInitialMetadataBatchComplete, elem,
1488 grpc_schedule_on_exec_ctx);
1489 grpc_call_start_batch_and_execute(call_, &op, 1,
1490 &recv_initial_metadata_batch_complete_);
1491 }
1492
Publish(size_t cq_idx,RequestedCall * rc)1493 void Server::CallData::Publish(size_t cq_idx, RequestedCall* rc) {
1494 grpc_call_set_completion_queue(call_, rc->cq_bound_to_call);
1495 *rc->call = call_;
1496 cq_new_ = server_->cqs_[cq_idx];
1497 std::swap(*rc->initial_metadata, initial_metadata_);
1498 switch (rc->type) {
1499 case RequestedCall::Type::BATCH_CALL:
1500 GPR_ASSERT(host_.has_value());
1501 GPR_ASSERT(path_.has_value());
1502 rc->data.batch.details->host = CSliceRef(host_->c_slice());
1503 rc->data.batch.details->method = CSliceRef(path_->c_slice());
1504 rc->data.batch.details->deadline =
1505 deadline_.as_timespec(GPR_CLOCK_MONOTONIC);
1506 break;
1507 case RequestedCall::Type::REGISTERED_CALL:
1508 *rc->data.registered.deadline =
1509 deadline_.as_timespec(GPR_CLOCK_MONOTONIC);
1510 if (rc->data.registered.optional_payload != nullptr) {
1511 *rc->data.registered.optional_payload = payload_;
1512 payload_ = nullptr;
1513 }
1514 break;
1515 default:
1516 GPR_UNREACHABLE_CODE(return);
1517 }
1518 grpc_cq_end_op(cq_new_, rc->tag, absl::OkStatus(), Server::DoneRequestEvent,
1519 rc, &rc->completion, true);
1520 }
1521
PublishNewRpc(void * arg,grpc_error_handle error)1522 void Server::CallData::PublishNewRpc(void* arg, grpc_error_handle error) {
1523 grpc_call_element* call_elem = static_cast<grpc_call_element*>(arg);
1524 auto* calld = static_cast<Server::CallData*>(call_elem->call_data);
1525 auto* chand = static_cast<Server::ChannelData*>(call_elem->channel_data);
1526 RequestMatcherInterface* rm = calld->matcher_;
1527 Server* server = rm->server();
1528 if (!error.ok() || server->ShutdownCalled()) {
1529 calld->state_.store(CallState::ZOMBIED, std::memory_order_relaxed);
1530 calld->KillZombie();
1531 return;
1532 }
1533 rm->MatchOrQueue(chand->cq_idx(), calld);
1534 }
1535
1536 namespace {
1537
KillZombieClosure(void * call,grpc_error_handle)1538 void KillZombieClosure(void* call, grpc_error_handle /*error*/) {
1539 grpc_call_unref(static_cast<grpc_call*>(call));
1540 }
1541
1542 } // namespace
1543
KillZombie()1544 void Server::CallData::KillZombie() {
1545 GRPC_CLOSURE_INIT(&kill_zombie_closure_, KillZombieClosure, call_,
1546 grpc_schedule_on_exec_ctx);
1547 ExecCtx::Run(DEBUG_LOCATION, &kill_zombie_closure_, absl::OkStatus());
1548 }
1549
1550 // If this changes, change MakeCallPromise too.
StartNewRpc(grpc_call_element * elem)1551 void Server::CallData::StartNewRpc(grpc_call_element* elem) {
1552 auto* chand = static_cast<ChannelData*>(elem->channel_data);
1553 if (server_->ShutdownCalled()) {
1554 state_.store(CallState::ZOMBIED, std::memory_order_relaxed);
1555 KillZombie();
1556 return;
1557 }
1558 // Find request matcher.
1559 matcher_ = server_->unregistered_request_matcher_.get();
1560 grpc_server_register_method_payload_handling payload_handling =
1561 GRPC_SRM_PAYLOAD_NONE;
1562 if (path_.has_value() && host_.has_value()) {
1563 ChannelRegisteredMethod* rm =
1564 chand->GetRegisteredMethod(host_->c_slice(), path_->c_slice());
1565 if (rm != nullptr) {
1566 matcher_ = rm->server_registered_method->matcher.get();
1567 payload_handling = rm->server_registered_method->payload_handling;
1568 }
1569 }
1570 // Start recv_message op if needed.
1571 switch (payload_handling) {
1572 case GRPC_SRM_PAYLOAD_NONE:
1573 PublishNewRpc(elem, absl::OkStatus());
1574 break;
1575 case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
1576 grpc_op op;
1577 op.op = GRPC_OP_RECV_MESSAGE;
1578 op.flags = 0;
1579 op.reserved = nullptr;
1580 op.data.recv_message.recv_message = &payload_;
1581 GRPC_CLOSURE_INIT(&publish_, PublishNewRpc, elem,
1582 grpc_schedule_on_exec_ctx);
1583 grpc_call_start_batch_and_execute(call_, &op, 1, &publish_);
1584 break;
1585 }
1586 }
1587 }
1588
RecvInitialMetadataBatchComplete(void * arg,grpc_error_handle error)1589 void Server::CallData::RecvInitialMetadataBatchComplete(
1590 void* arg, grpc_error_handle error) {
1591 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1592 auto* calld = static_cast<Server::CallData*>(elem->call_data);
1593 if (!error.ok()) {
1594 gpr_log(GPR_DEBUG, "Failed call creation: %s",
1595 StatusToString(error).c_str());
1596 calld->FailCallCreation();
1597 return;
1598 }
1599 calld->StartNewRpc(elem);
1600 }
1601
StartTransportStreamOpBatchImpl(grpc_call_element * elem,grpc_transport_stream_op_batch * batch)1602 void Server::CallData::StartTransportStreamOpBatchImpl(
1603 grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
1604 if (batch->recv_initial_metadata) {
1605 recv_initial_metadata_ =
1606 batch->payload->recv_initial_metadata.recv_initial_metadata;
1607 original_recv_initial_metadata_ready_ =
1608 batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
1609 batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
1610 &recv_initial_metadata_ready_;
1611 }
1612 if (batch->recv_trailing_metadata) {
1613 original_recv_trailing_metadata_ready_ =
1614 batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
1615 batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
1616 &recv_trailing_metadata_ready_;
1617 }
1618 grpc_call_next_op(elem, batch);
1619 }
1620
RecvInitialMetadataReady(void * arg,grpc_error_handle error)1621 void Server::CallData::RecvInitialMetadataReady(void* arg,
1622 grpc_error_handle error) {
1623 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1624 CallData* calld = static_cast<CallData*>(elem->call_data);
1625 if (error.ok()) {
1626 calld->path_ = calld->recv_initial_metadata_->Take(HttpPathMetadata());
1627 auto* host =
1628 calld->recv_initial_metadata_->get_pointer(HttpAuthorityMetadata());
1629 if (host != nullptr) calld->host_.emplace(host->Ref());
1630 }
1631 auto op_deadline = calld->recv_initial_metadata_->get(GrpcTimeoutMetadata());
1632 if (op_deadline.has_value()) {
1633 calld->deadline_ = *op_deadline;
1634 }
1635 if (calld->host_.has_value() && calld->path_.has_value()) {
1636 // do nothing
1637 } else if (error.ok()) {
1638 // Pass the error reference to calld->recv_initial_metadata_error
1639 error = absl::UnknownError("Missing :authority or :path");
1640 calld->recv_initial_metadata_error_ = error;
1641 }
1642 grpc_closure* closure = calld->original_recv_initial_metadata_ready_;
1643 calld->original_recv_initial_metadata_ready_ = nullptr;
1644 if (calld->seen_recv_trailing_metadata_ready_) {
1645 GRPC_CALL_COMBINER_START(calld->call_combiner_,
1646 &calld->recv_trailing_metadata_ready_,
1647 calld->recv_trailing_metadata_error_,
1648 "continue server recv_trailing_metadata_ready");
1649 }
1650 Closure::Run(DEBUG_LOCATION, closure, error);
1651 }
1652
RecvTrailingMetadataReady(void * arg,grpc_error_handle error)1653 void Server::CallData::RecvTrailingMetadataReady(void* arg,
1654 grpc_error_handle error) {
1655 grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1656 CallData* calld = static_cast<CallData*>(elem->call_data);
1657 if (calld->original_recv_initial_metadata_ready_ != nullptr) {
1658 calld->recv_trailing_metadata_error_ = error;
1659 calld->seen_recv_trailing_metadata_ready_ = true;
1660 GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready_,
1661 RecvTrailingMetadataReady, elem,
1662 grpc_schedule_on_exec_ctx);
1663 GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
1664 "deferring server recv_trailing_metadata_ready "
1665 "until after recv_initial_metadata_ready");
1666 return;
1667 }
1668 error = grpc_error_add_child(error, calld->recv_initial_metadata_error_);
1669 Closure::Run(DEBUG_LOCATION, calld->original_recv_trailing_metadata_ready_,
1670 error);
1671 }
1672
InitCallElement(grpc_call_element * elem,const grpc_call_element_args * args)1673 grpc_error_handle Server::CallData::InitCallElement(
1674 grpc_call_element* elem, const grpc_call_element_args* args) {
1675 auto* chand = static_cast<ChannelData*>(elem->channel_data);
1676 new (elem->call_data) Server::CallData(elem, *args, chand->server());
1677 return absl::OkStatus();
1678 }
1679
DestroyCallElement(grpc_call_element * elem,const grpc_call_final_info *,grpc_closure *)1680 void Server::CallData::DestroyCallElement(
1681 grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
1682 grpc_closure* /*ignored*/) {
1683 auto* calld = static_cast<CallData*>(elem->call_data);
1684 calld->~CallData();
1685 }
1686
StartTransportStreamOpBatch(grpc_call_element * elem,grpc_transport_stream_op_batch * batch)1687 void Server::CallData::StartTransportStreamOpBatch(
1688 grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
1689 auto* calld = static_cast<CallData*>(elem->call_data);
1690 calld->StartTransportStreamOpBatchImpl(elem, batch);
1691 }
1692
1693 } // namespace grpc_core
1694
1695 //
1696 // C-core API
1697 //
1698
grpc_server_create(const grpc_channel_args * args,void * reserved)1699 grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
1700 grpc_core::ExecCtx exec_ctx;
1701 GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
1702 grpc_core::Server* server =
1703 new grpc_core::Server(grpc_core::CoreConfiguration::Get()
1704 .channel_args_preconditioning()
1705 .PreconditionChannelArgs(args));
1706 return server->c_ptr();
1707 }
1708
grpc_server_register_completion_queue(grpc_server * server,grpc_completion_queue * cq,void * reserved)1709 void grpc_server_register_completion_queue(grpc_server* server,
1710 grpc_completion_queue* cq,
1711 void* reserved) {
1712 GRPC_API_TRACE(
1713 "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
1714 (server, cq, reserved));
1715 GPR_ASSERT(!reserved);
1716 auto cq_type = grpc_get_cq_completion_type(cq);
1717 if (cq_type != GRPC_CQ_NEXT && cq_type != GRPC_CQ_CALLBACK) {
1718 gpr_log(GPR_INFO,
1719 "Completion queue of type %d is being registered as a "
1720 "server-completion-queue",
1721 static_cast<int>(cq_type));
1722 // Ideally we should log an error and abort but ruby-wrapped-language API
1723 // calls grpc_completion_queue_pluck() on server completion queues
1724 }
1725 grpc_core::Server::FromC(server)->RegisterCompletionQueue(cq);
1726 }
1727
grpc_server_register_method(grpc_server * server,const char * method,const char * host,grpc_server_register_method_payload_handling payload_handling,uint32_t flags)1728 void* grpc_server_register_method(
1729 grpc_server* server, const char* method, const char* host,
1730 grpc_server_register_method_payload_handling payload_handling,
1731 uint32_t flags) {
1732 GRPC_API_TRACE(
1733 "grpc_server_register_method(server=%p, method=%s, host=%s, "
1734 "flags=0x%08x)",
1735 4, (server, method, host, flags));
1736 return grpc_core::Server::FromC(server)->RegisterMethod(
1737 method, host, payload_handling, flags);
1738 }
1739
grpc_server_start(grpc_server * server)1740 void grpc_server_start(grpc_server* server) {
1741 grpc_core::ExecCtx exec_ctx;
1742 GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
1743 grpc_core::Server::FromC(server)->Start();
1744 }
1745
grpc_server_shutdown_and_notify(grpc_server * server,grpc_completion_queue * cq,void * tag)1746 void grpc_server_shutdown_and_notify(grpc_server* server,
1747 grpc_completion_queue* cq, void* tag) {
1748 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1749 grpc_core::ExecCtx exec_ctx;
1750 GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
1751 (server, cq, tag));
1752 grpc_core::Server::FromC(server)->ShutdownAndNotify(cq, tag);
1753 }
1754
grpc_server_cancel_all_calls(grpc_server * server)1755 void grpc_server_cancel_all_calls(grpc_server* server) {
1756 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1757 grpc_core::ExecCtx exec_ctx;
1758 GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
1759 grpc_core::Server::FromC(server)->CancelAllCalls();
1760 }
1761
grpc_server_destroy(grpc_server * server)1762 void grpc_server_destroy(grpc_server* server) {
1763 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1764 grpc_core::ExecCtx exec_ctx;
1765 GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
1766 grpc_core::Server::FromC(server)->Orphan();
1767 }
1768
grpc_server_request_call(grpc_server * server,grpc_call ** call,grpc_call_details * details,grpc_metadata_array * request_metadata,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)1769 grpc_call_error grpc_server_request_call(
1770 grpc_server* server, grpc_call** call, grpc_call_details* details,
1771 grpc_metadata_array* request_metadata,
1772 grpc_completion_queue* cq_bound_to_call,
1773 grpc_completion_queue* cq_for_notification, void* tag) {
1774 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1775 grpc_core::ExecCtx exec_ctx;
1776 GRPC_API_TRACE(
1777 "grpc_server_request_call("
1778 "server=%p, call=%p, details=%p, initial_metadata=%p, "
1779 "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
1780 7,
1781 (server, call, details, request_metadata, cq_bound_to_call,
1782 cq_for_notification, tag));
1783 return grpc_core::Server::FromC(server)->RequestCall(
1784 call, details, request_metadata, cq_bound_to_call, cq_for_notification,
1785 tag);
1786 }
1787
grpc_server_request_registered_call(grpc_server * server,void * registered_method,grpc_call ** call,gpr_timespec * deadline,grpc_metadata_array * request_metadata,grpc_byte_buffer ** optional_payload,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag_new)1788 grpc_call_error grpc_server_request_registered_call(
1789 grpc_server* server, void* registered_method, grpc_call** call,
1790 gpr_timespec* deadline, grpc_metadata_array* request_metadata,
1791 grpc_byte_buffer** optional_payload,
1792 grpc_completion_queue* cq_bound_to_call,
1793 grpc_completion_queue* cq_for_notification, void* tag_new) {
1794 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1795 grpc_core::ExecCtx exec_ctx;
1796 auto* rm =
1797 static_cast<grpc_core::Server::RegisteredMethod*>(registered_method);
1798 GRPC_API_TRACE(
1799 "grpc_server_request_registered_call("
1800 "server=%p, registered_method=%p, call=%p, deadline=%p, "
1801 "request_metadata=%p, "
1802 "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
1803 "tag=%p)",
1804 9,
1805 (server, registered_method, call, deadline, request_metadata,
1806 optional_payload, cq_bound_to_call, cq_for_notification, tag_new));
1807 return grpc_core::Server::FromC(server)->RequestRegisteredCall(
1808 rm, call, deadline, request_metadata, optional_payload, cq_bound_to_call,
1809 cq_for_notification, tag_new);
1810 }
1811
grpc_server_set_config_fetcher(grpc_server * server,grpc_server_config_fetcher * server_config_fetcher)1812 void grpc_server_set_config_fetcher(
1813 grpc_server* server, grpc_server_config_fetcher* server_config_fetcher) {
1814 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1815 grpc_core::ExecCtx exec_ctx;
1816 GRPC_API_TRACE("grpc_server_set_config_fetcher(server=%p, config_fetcher=%p)",
1817 2, (server, server_config_fetcher));
1818 grpc_core::Server::FromC(server)->set_config_fetcher(
1819 std::unique_ptr<grpc_server_config_fetcher>(server_config_fetcher));
1820 }
1821
grpc_server_config_fetcher_destroy(grpc_server_config_fetcher * server_config_fetcher)1822 void grpc_server_config_fetcher_destroy(
1823 grpc_server_config_fetcher* server_config_fetcher) {
1824 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1825 grpc_core::ExecCtx exec_ctx;
1826 GRPC_API_TRACE("grpc_server_config_fetcher_destroy(config_fetcher=%p)", 1,
1827 (server_config_fetcher));
1828 delete server_config_fetcher;
1829 }
1830