1 //
2 // Copyright 2017 gRPC authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include <memory>
18
19 #include "absl/types/optional.h"
20 #include "gtest/gtest.h"
21
22 #include <grpc/grpc.h>
23 #include <grpc/impl/channel_arg_names.h>
24 #include <grpc/status.h>
25
26 #include "src/core/ext/transport/chttp2/transport/internal.h"
27 #include "src/core/lib/channel/channel_args.h"
28 #include "src/core/lib/gprpp/time.h"
29 #include "test/core/end2end/end2end_tests.h"
30
31 namespace grpc_core {
32 namespace {
33 // Tests transparent retries when the call was never sent out on the wire.
34 // This is similar to retry_transparent_not_sent_on_wire, except that
35 // instead of simulating the response with a filter, we actually have
36 // the transport behave the right way. We create a server with
37 // MAX_CONCURRENT_STREAMS set to 1. We start a call on the server, and
38 // then start a second call, which will get queued in the transport.
39 // Then, before the first call finishes, the server is shut down and
40 // restarted. The second call will fail in that transport instance and
41 // will be transparently retried after the server starts up again.
CORE_END2END_TEST(RetryHttp2Test,RetryTransparentMaxConcurrentStreams)42 CORE_END2END_TEST(RetryHttp2Test, RetryTransparentMaxConcurrentStreams) {
43 const auto server_args =
44 ChannelArgs()
45 .Set(GRPC_ARG_MAX_CONCURRENT_STREAMS, 1)
46 .Set(GRPC_ARG_MAX_CONCURRENT_STREAMS_OVERLOAD_PROTECTION, false);
47 InitServer(server_args);
48 InitClient(ChannelArgs());
49 auto c =
50 NewClientCall("/service/method").Timeout(Duration::Minutes(1)).Create();
51 IncomingStatusOnClient server_status;
52 IncomingMetadata server_initial_metadata;
53 IncomingMessage server_message;
54 c.NewBatch(1)
55 .SendInitialMetadata({})
56 .SendMessage("foo")
57 .SendCloseFromClient()
58 .RecvInitialMetadata(server_initial_metadata)
59 .RecvMessage(server_message)
60 .RecvStatusOnClient(server_status);
61 // Server should get a call.
62 auto s = RequestCall(101);
63 Expect(101, true);
64 Step();
65 EXPECT_EQ(s.method(), "/service/method");
66 // Client starts a second call.
67 // We set wait_for_ready for this call, so that if it retries before
68 // the server comes back up, it stays pending.
69 auto c2 =
70 NewClientCall("/service/method").Timeout(Duration::Minutes(1)).Create();
71 IncomingStatusOnClient server_status2;
72 IncomingMetadata server_initial_metadata2;
73 IncomingMessage server_message2;
74 c2.NewBatch(2)
75 .SendInitialMetadata({}, GRPC_INITIAL_METADATA_WAIT_FOR_READY)
76 .SendMessage("bar")
77 .SendCloseFromClient()
78 .RecvInitialMetadata(server_initial_metadata2)
79 .RecvMessage(server_message2)
80 .RecvStatusOnClient(server_status2);
81 // Start server shutdown.
82 ShutdownServerAndNotify(102);
83 // Server handles the first call.
84 IncomingMessage client_message;
85 s.NewBatch(103).RecvMessage(client_message);
86 Expect(103, true);
87 Step();
88 IncomingCloseOnServer client_close;
89 s.NewBatch(104)
90 .RecvCloseOnServer(client_close)
91 .SendInitialMetadata({})
92 .SendMessage("baz")
93 .SendStatusFromServer(GRPC_STATUS_OK, "xyz", {});
94 // Server completes first call and shutdown.
95 // Client completes first call.
96 Expect(104, true);
97 Expect(102, true);
98 Expect(1, true);
99 Step();
100 // Clean up from first call.
101 EXPECT_EQ(client_message.payload(), "foo");
102 EXPECT_FALSE(client_close.was_cancelled());
103 EXPECT_EQ(server_message.payload(), "baz");
104 EXPECT_EQ(server_status.status(), GRPC_STATUS_OK);
105 EXPECT_EQ(server_status.message(), "xyz");
106 // Destroy server and then restart it.
107 // TODO(hork): hack to solve PosixEventEngine Listener's async shutdown issue.
108 absl::SleepFor(absl::Milliseconds(250));
109 InitServer(server_args);
110 // Server should get the second call.
111 auto s2 = RequestCall(201);
112 Expect(201, true);
113 Step();
114 EXPECT_EQ(s2.method(), "/service/method");
115 // Make sure the "grpc-previous-rpc-attempts" header was NOT sent, since
116 // we don't do that for transparent retries.
117 EXPECT_EQ(s2.GetInitialMetadata("grpc-previous-rpc-attempts"), absl::nullopt);
118 // Server handles the second call.
119 IncomingMessage client_message2;
120 IncomingCloseOnServer client_close2;
121 s2.NewBatch(202).RecvMessage(client_message2);
122 Expect(202, true);
123 Step();
124 s2.NewBatch(203)
125 .RecvCloseOnServer(client_close2)
126 .SendInitialMetadata({})
127 .SendMessage("qux")
128 .SendStatusFromServer(GRPC_STATUS_OK, "xyz", {});
129 // Second call completes.
130 Expect(203, true);
131 Expect(2, true);
132 Step();
133 // Clean up from second call.
134 EXPECT_EQ(client_message2.payload(), "bar");
135 EXPECT_FALSE(client_close.was_cancelled());
136 EXPECT_EQ(server_message2.payload(), "qux");
137 EXPECT_EQ(server_status2.status(), GRPC_STATUS_OK);
138 EXPECT_EQ(server_status2.message(), "xyz");
139 }
140 } // namespace
141 } // namespace grpc_core
142