1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Local endpoint object management
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/udp.h>
15 #include <linux/ip.h>
16 #include <linux/hashtable.h>
17 #include <net/sock.h>
18 #include <net/udp.h>
19 #include <net/udp_tunnel.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
22
23 static void rxrpc_local_rcu(struct rcu_head *);
24
25 /*
26 * Handle an ICMP/ICMP6 error turning up at the tunnel. Push it through the
27 * usual mechanism so that it gets parsed and presented through the UDP
28 * socket's error_report().
29 */
rxrpc_encap_err_rcv(struct sock * sk,struct sk_buff * skb,int err,__be16 port,u32 info,u8 * payload)30 static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
31 __be16 port, u32 info, u8 *payload)
32 {
33 if (ip_hdr(skb)->version == IPVERSION)
34 return ip_icmp_error(sk, skb, err, port, info, payload);
35 if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6))
36 return ipv6_icmp_error(sk, skb, err, port, info, payload);
37 }
38
39 /*
40 * Set or clear the Don't Fragment flag on a socket.
41 */
rxrpc_local_dont_fragment(const struct rxrpc_local * local,bool set)42 void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set)
43 {
44 if (set)
45 ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO);
46 else
47 ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DONT);
48 }
49
50 /*
51 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
52 * same or greater than.
53 *
54 * We explicitly don't compare the RxRPC service ID as we want to reject
55 * conflicting uses by differing services. Further, we don't want to share
56 * addresses with different options (IPv6), so we don't compare those bits
57 * either.
58 */
rxrpc_local_cmp_key(const struct rxrpc_local * local,const struct sockaddr_rxrpc * srx)59 static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
60 const struct sockaddr_rxrpc *srx)
61 {
62 long diff;
63
64 diff = ((local->srx.transport_type - srx->transport_type) ?:
65 (local->srx.transport_len - srx->transport_len) ?:
66 (local->srx.transport.family - srx->transport.family));
67 if (diff != 0)
68 return diff;
69
70 switch (srx->transport.family) {
71 case AF_INET:
72 /* If the choice of UDP port is left up to the transport, then
73 * the endpoint record doesn't match.
74 */
75 return ((u16 __force)local->srx.transport.sin.sin_port -
76 (u16 __force)srx->transport.sin.sin_port) ?:
77 memcmp(&local->srx.transport.sin.sin_addr,
78 &srx->transport.sin.sin_addr,
79 sizeof(struct in_addr));
80 #ifdef CONFIG_AF_RXRPC_IPV6
81 case AF_INET6:
82 /* If the choice of UDP6 port is left up to the transport, then
83 * the endpoint record doesn't match.
84 */
85 return ((u16 __force)local->srx.transport.sin6.sin6_port -
86 (u16 __force)srx->transport.sin6.sin6_port) ?:
87 memcmp(&local->srx.transport.sin6.sin6_addr,
88 &srx->transport.sin6.sin6_addr,
89 sizeof(struct in6_addr));
90 #endif
91 default:
92 BUG();
93 }
94 }
95
rxrpc_client_conn_reap_timeout(struct timer_list * timer)96 static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
97 {
98 struct rxrpc_local *local =
99 container_of(timer, struct rxrpc_local, client_conn_reap_timer);
100
101 if (!local->kill_all_client_conns &&
102 test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
103 rxrpc_wake_up_io_thread(local);
104 }
105
106 /*
107 * Allocate a new local endpoint.
108 */
rxrpc_alloc_local(struct net * net,const struct sockaddr_rxrpc * srx)109 static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
110 const struct sockaddr_rxrpc *srx)
111 {
112 struct rxrpc_local *local;
113 u32 tmp;
114
115 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
116 if (local) {
117 refcount_set(&local->ref, 1);
118 atomic_set(&local->active_users, 1);
119 local->net = net;
120 local->rxnet = rxrpc_net(net);
121 INIT_HLIST_NODE(&local->link);
122 init_completion(&local->io_thread_ready);
123 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
124 skb_queue_head_init(&local->rx_delay_queue);
125 #endif
126 skb_queue_head_init(&local->rx_queue);
127 INIT_LIST_HEAD(&local->conn_attend_q);
128 INIT_LIST_HEAD(&local->call_attend_q);
129
130 local->client_bundles = RB_ROOT;
131 spin_lock_init(&local->client_bundles_lock);
132 local->kill_all_client_conns = false;
133 INIT_LIST_HEAD(&local->idle_client_conns);
134 timer_setup(&local->client_conn_reap_timer,
135 rxrpc_client_conn_reap_timeout, 0);
136
137 spin_lock_init(&local->lock);
138 rwlock_init(&local->services_lock);
139 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
140 memcpy(&local->srx, srx, sizeof(*srx));
141 local->srx.srx_service = 0;
142 idr_init(&local->conn_ids);
143 get_random_bytes(&tmp, sizeof(tmp));
144 tmp &= 0x3fffffff;
145 if (tmp == 0)
146 tmp = 1;
147 idr_set_cursor(&local->conn_ids, tmp);
148 INIT_LIST_HEAD(&local->new_client_calls);
149 spin_lock_init(&local->client_call_lock);
150
151 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
152 }
153
154 _leave(" = %p", local);
155 return local;
156 }
157
158 /*
159 * create the local socket
160 * - must be called with rxrpc_local_mutex locked
161 */
rxrpc_open_socket(struct rxrpc_local * local,struct net * net)162 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
163 {
164 struct udp_tunnel_sock_cfg tuncfg = {NULL};
165 struct sockaddr_rxrpc *srx = &local->srx;
166 struct udp_port_cfg udp_conf = {0};
167 struct task_struct *io_thread;
168 struct sock *usk;
169 int ret;
170
171 _enter("%p{%d,%d}",
172 local, srx->transport_type, srx->transport.family);
173
174 udp_conf.family = srx->transport.family;
175 udp_conf.use_udp_checksums = true;
176 if (udp_conf.family == AF_INET) {
177 udp_conf.local_ip = srx->transport.sin.sin_addr;
178 udp_conf.local_udp_port = srx->transport.sin.sin_port;
179 #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
180 } else {
181 udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
182 udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
183 udp_conf.use_udp6_tx_checksums = true;
184 udp_conf.use_udp6_rx_checksums = true;
185 #endif
186 }
187 ret = udp_sock_create(net, &udp_conf, &local->socket);
188 if (ret < 0) {
189 _leave(" = %d [socket]", ret);
190 return ret;
191 }
192
193 tuncfg.encap_type = UDP_ENCAP_RXRPC;
194 tuncfg.encap_rcv = rxrpc_encap_rcv;
195 tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
196 tuncfg.sk_user_data = local;
197 setup_udp_tunnel_sock(net, local->socket, &tuncfg);
198
199 /* set the socket up */
200 usk = local->socket->sk;
201 usk->sk_error_report = rxrpc_error_report;
202
203 switch (srx->transport.family) {
204 case AF_INET6:
205 /* we want to receive ICMPv6 errors */
206 ip6_sock_set_recverr(usk);
207
208 /* Fall through and set IPv4 options too otherwise we don't get
209 * errors from IPv4 packets sent through the IPv6 socket.
210 */
211 fallthrough;
212 case AF_INET:
213 /* we want to receive ICMP errors */
214 ip_sock_set_recverr(usk);
215
216 /* we want to set the don't fragment bit */
217 rxrpc_local_dont_fragment(local, true);
218 break;
219
220 default:
221 BUG();
222 }
223
224 io_thread = kthread_run(rxrpc_io_thread, local,
225 "krxrpcio/%u", ntohs(udp_conf.local_udp_port));
226 if (IS_ERR(io_thread)) {
227 ret = PTR_ERR(io_thread);
228 goto error_sock;
229 }
230
231 wait_for_completion(&local->io_thread_ready);
232 WRITE_ONCE(local->io_thread, io_thread);
233 _leave(" = 0");
234 return 0;
235
236 error_sock:
237 kernel_sock_shutdown(local->socket, SHUT_RDWR);
238 local->socket->sk->sk_user_data = NULL;
239 sock_release(local->socket);
240 local->socket = NULL;
241 return ret;
242 }
243
244 /*
245 * Look up or create a new local endpoint using the specified local address.
246 */
rxrpc_lookup_local(struct net * net,const struct sockaddr_rxrpc * srx)247 struct rxrpc_local *rxrpc_lookup_local(struct net *net,
248 const struct sockaddr_rxrpc *srx)
249 {
250 struct rxrpc_local *local;
251 struct rxrpc_net *rxnet = rxrpc_net(net);
252 struct hlist_node *cursor;
253 long diff;
254 int ret;
255
256 _enter("{%d,%d,%pISp}",
257 srx->transport_type, srx->transport.family, &srx->transport);
258
259 mutex_lock(&rxnet->local_mutex);
260
261 hlist_for_each(cursor, &rxnet->local_endpoints) {
262 local = hlist_entry(cursor, struct rxrpc_local, link);
263
264 diff = rxrpc_local_cmp_key(local, srx);
265 if (diff != 0)
266 continue;
267
268 /* Services aren't allowed to share transport sockets, so
269 * reject that here. It is possible that the object is dying -
270 * but it may also still have the local transport address that
271 * we want bound.
272 */
273 if (srx->srx_service) {
274 local = NULL;
275 goto addr_in_use;
276 }
277
278 /* Found a match. We want to replace a dying object.
279 * Attempting to bind the transport socket may still fail if
280 * we're attempting to use a local address that the dying
281 * object is still using.
282 */
283 if (!rxrpc_use_local(local, rxrpc_local_use_lookup))
284 break;
285
286 goto found;
287 }
288
289 local = rxrpc_alloc_local(net, srx);
290 if (!local)
291 goto nomem;
292
293 ret = rxrpc_open_socket(local, net);
294 if (ret < 0)
295 goto sock_error;
296
297 if (cursor) {
298 hlist_replace_rcu(cursor, &local->link);
299 cursor->pprev = NULL;
300 } else {
301 hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
302 }
303
304 found:
305 mutex_unlock(&rxnet->local_mutex);
306 _leave(" = %p", local);
307 return local;
308
309 nomem:
310 ret = -ENOMEM;
311 sock_error:
312 mutex_unlock(&rxnet->local_mutex);
313 if (local)
314 call_rcu(&local->rcu, rxrpc_local_rcu);
315 _leave(" = %d", ret);
316 return ERR_PTR(ret);
317
318 addr_in_use:
319 mutex_unlock(&rxnet->local_mutex);
320 _leave(" = -EADDRINUSE");
321 return ERR_PTR(-EADDRINUSE);
322 }
323
324 /*
325 * Get a ref on a local endpoint.
326 */
rxrpc_get_local(struct rxrpc_local * local,enum rxrpc_local_trace why)327 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local,
328 enum rxrpc_local_trace why)
329 {
330 int r, u;
331
332 u = atomic_read(&local->active_users);
333 __refcount_inc(&local->ref, &r);
334 trace_rxrpc_local(local->debug_id, why, r + 1, u);
335 return local;
336 }
337
338 /*
339 * Get a ref on a local endpoint unless its usage has already reached 0.
340 */
rxrpc_get_local_maybe(struct rxrpc_local * local,enum rxrpc_local_trace why)341 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local,
342 enum rxrpc_local_trace why)
343 {
344 int r, u;
345
346 if (local && __refcount_inc_not_zero(&local->ref, &r)) {
347 u = atomic_read(&local->active_users);
348 trace_rxrpc_local(local->debug_id, why, r + 1, u);
349 return local;
350 }
351
352 return NULL;
353 }
354
355 /*
356 * Drop a ref on a local endpoint.
357 */
rxrpc_put_local(struct rxrpc_local * local,enum rxrpc_local_trace why)358 void rxrpc_put_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
359 {
360 unsigned int debug_id;
361 bool dead;
362 int r, u;
363
364 if (local) {
365 debug_id = local->debug_id;
366
367 u = atomic_read(&local->active_users);
368 dead = __refcount_dec_and_test(&local->ref, &r);
369 trace_rxrpc_local(debug_id, why, r, u);
370
371 if (dead)
372 call_rcu(&local->rcu, rxrpc_local_rcu);
373 }
374 }
375
376 /*
377 * Start using a local endpoint.
378 */
rxrpc_use_local(struct rxrpc_local * local,enum rxrpc_local_trace why)379 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
380 enum rxrpc_local_trace why)
381 {
382 local = rxrpc_get_local_maybe(local, rxrpc_local_get_for_use);
383 if (!local)
384 return NULL;
385
386 if (!__rxrpc_use_local(local, why)) {
387 rxrpc_put_local(local, rxrpc_local_put_for_use);
388 return NULL;
389 }
390
391 return local;
392 }
393
394 /*
395 * Cease using a local endpoint. Once the number of active users reaches 0, we
396 * start the closure of the transport in the I/O thread..
397 */
rxrpc_unuse_local(struct rxrpc_local * local,enum rxrpc_local_trace why)398 void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
399 {
400 unsigned int debug_id;
401 int r, u;
402
403 if (local) {
404 debug_id = local->debug_id;
405 r = refcount_read(&local->ref);
406 u = atomic_dec_return(&local->active_users);
407 trace_rxrpc_local(debug_id, why, r, u);
408 if (u == 0)
409 kthread_stop(local->io_thread);
410 }
411 }
412
413 /*
414 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
415 * of.
416 *
417 * Closing the socket cannot be done from bottom half context or RCU callback
418 * context because it might sleep.
419 */
rxrpc_destroy_local(struct rxrpc_local * local)420 void rxrpc_destroy_local(struct rxrpc_local *local)
421 {
422 struct socket *socket = local->socket;
423 struct rxrpc_net *rxnet = local->rxnet;
424
425 _enter("%d", local->debug_id);
426
427 local->dead = true;
428
429 mutex_lock(&rxnet->local_mutex);
430 hlist_del_init_rcu(&local->link);
431 mutex_unlock(&rxnet->local_mutex);
432
433 rxrpc_clean_up_local_conns(local);
434 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
435 ASSERT(!local->service);
436
437 if (socket) {
438 local->socket = NULL;
439 kernel_sock_shutdown(socket, SHUT_RDWR);
440 socket->sk->sk_user_data = NULL;
441 sock_release(socket);
442 }
443
444 /* At this point, there should be no more packets coming in to the
445 * local endpoint.
446 */
447 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
448 rxrpc_purge_queue(&local->rx_delay_queue);
449 #endif
450 rxrpc_purge_queue(&local->rx_queue);
451 rxrpc_purge_client_connections(local);
452 page_frag_cache_drain(&local->tx_alloc);
453 }
454
455 /*
456 * Destroy a local endpoint after the RCU grace period expires.
457 */
rxrpc_local_rcu(struct rcu_head * rcu)458 static void rxrpc_local_rcu(struct rcu_head *rcu)
459 {
460 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
461
462 rxrpc_see_local(local, rxrpc_local_free);
463 kfree(local);
464 }
465
466 /*
467 * Verify the local endpoint list is empty by this point.
468 */
rxrpc_destroy_all_locals(struct rxrpc_net * rxnet)469 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
470 {
471 struct rxrpc_local *local;
472
473 _enter("");
474
475 flush_workqueue(rxrpc_workqueue);
476
477 if (!hlist_empty(&rxnet->local_endpoints)) {
478 mutex_lock(&rxnet->local_mutex);
479 hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
480 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
481 local, refcount_read(&local->ref));
482 }
483 mutex_unlock(&rxnet->local_mutex);
484 BUG();
485 }
486 }
487