1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The User Datagram Protocol (UDP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <[email protected]>
11 * Arnt Gulbrandsen, <[email protected]>
12 * Alan Cox, <[email protected]>
13 * Hirokazu Takahashi, <[email protected]>
14 *
15 * Fixes:
16 * Alan Cox : verify_area() calls
17 * Alan Cox : stopped close while in use off icmp
18 * messages. Not a fix but a botch that
19 * for udp at least is 'valid'.
20 * Alan Cox : Fixed icmp handling properly
21 * Alan Cox : Correct error for oversized datagrams
22 * Alan Cox : Tidied select() semantics.
23 * Alan Cox : udp_err() fixed properly, also now
24 * select and read wake correctly on errors
25 * Alan Cox : udp_send verify_area moved to avoid mem leak
26 * Alan Cox : UDP can count its memory
27 * Alan Cox : send to an unknown connection causes
28 * an ECONNREFUSED off the icmp, but
29 * does NOT close.
30 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
32 * bug no longer crashes it.
33 * Fred Van Kempen : Net2e support for sk->broadcast.
34 * Alan Cox : Uses skb_free_datagram
35 * Alan Cox : Added get/set sockopt support.
36 * Alan Cox : Broadcasting without option set returns EACCES.
37 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
38 * Alan Cox : Use ip_tos and ip_ttl
39 * Alan Cox : SNMP Mibs
40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
41 * Matt Dillon : UDP length checks.
42 * Alan Cox : Smarter af_inet used properly.
43 * Alan Cox : Use new kernel side addressing.
44 * Alan Cox : Incorrect return on truncated datagram receive.
45 * Arnt Gulbrandsen : New udp_send and stuff
46 * Alan Cox : Cache last socket
47 * Alan Cox : Route cache
48 * Jon Peatfield : Minor efficiency fix to sendto().
49 * Mike Shaver : RFC1122 checks.
50 * Alan Cox : Nonblocking error fix.
51 * Willy Konynenberg : Transparent proxying support.
52 * Mike McLagan : Routing by source
53 * David S. Miller : New socket lookup architecture.
54 * Last socket cache retained as it
55 * does have a high hit rate.
56 * Olaf Kirch : Don't linearise iovec on sendmsg.
57 * Andi Kleen : Some cleanups, cache destination entry
58 * for connect.
59 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
60 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
61 * return ENOTCONN for unconnected sockets (POSIX)
62 * Janos Farkas : don't deliver multi/broadcasts to a different
63 * bound-to-device socket
64 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * datagrams.
66 * Hirokazu Takahashi : sendfile() on UDP works now.
67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
70 * a single port at the same time.
71 * Derek Atkins <[email protected]>: Add Encapulation Support
72 * James Chapman : Add L2TP encapsulation type.
73 */
74
75 #define pr_fmt(fmt) "UDP: " fmt
76
77 #include <linux/bpf-cgroup.h>
78 #include <linux/uaccess.h>
79 #include <asm/ioctls.h>
80 #include <linux/memblock.h>
81 #include <linux/highmem.h>
82 #include <linux/types.h>
83 #include <linux/fcntl.h>
84 #include <linux/module.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/igmp.h>
88 #include <linux/inetdevice.h>
89 #include <linux/in.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
92 #include <linux/mm.h>
93 #include <linux/inet.h>
94 #include <linux/netdevice.h>
95 #include <linux/slab.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/net_namespace.h>
101 #include <net/icmp.h>
102 #include <net/inet_hashtables.h>
103 #include <net/ip.h>
104 #include <net/ip_tunnels.h>
105 #include <net/route.h>
106 #include <net/checksum.h>
107 #include <net/gso.h>
108 #include <net/xfrm.h>
109 #include <trace/events/udp.h>
110 #include <linux/static_key.h>
111 #include <linux/btf_ids.h>
112 #include <trace/events/skb.h>
113 #include <net/busy_poll.h>
114 #include "udp_impl.h"
115 #include <net/sock_reuseport.h>
116 #include <net/addrconf.h>
117 #include <net/udp_tunnel.h>
118 #include <net/gro.h>
119 #if IS_ENABLED(CONFIG_IPV6)
120 #include <net/ipv6_stubs.h>
121 #endif
122
123 struct udp_table udp_table __read_mostly;
124 EXPORT_SYMBOL(udp_table);
125
126 long sysctl_udp_mem[3] __read_mostly;
127 EXPORT_SYMBOL(sysctl_udp_mem);
128
129 atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
130 EXPORT_SYMBOL(udp_memory_allocated);
131 DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
132 EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc);
133
134 #define MAX_UDP_PORTS 65536
135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET)
136
udp_get_table_prot(struct sock * sk)137 static struct udp_table *udp_get_table_prot(struct sock *sk)
138 {
139 return sk->sk_prot->h.udp_table ? : sock_net(sk)->ipv4.udp_table;
140 }
141
udp_lib_lport_inuse(struct net * net,__u16 num,const struct udp_hslot * hslot,unsigned long * bitmap,struct sock * sk,unsigned int log)142 static int udp_lib_lport_inuse(struct net *net, __u16 num,
143 const struct udp_hslot *hslot,
144 unsigned long *bitmap,
145 struct sock *sk, unsigned int log)
146 {
147 struct sock *sk2;
148 kuid_t uid = sock_i_uid(sk);
149
150 sk_for_each(sk2, &hslot->head) {
151 if (net_eq(sock_net(sk2), net) &&
152 sk2 != sk &&
153 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
154 (!sk2->sk_reuse || !sk->sk_reuse) &&
155 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
156 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
157 inet_rcv_saddr_equal(sk, sk2, true)) {
158 if (sk2->sk_reuseport && sk->sk_reuseport &&
159 !rcu_access_pointer(sk->sk_reuseport_cb) &&
160 uid_eq(uid, sock_i_uid(sk2))) {
161 if (!bitmap)
162 return 0;
163 } else {
164 if (!bitmap)
165 return 1;
166 __set_bit(udp_sk(sk2)->udp_port_hash >> log,
167 bitmap);
168 }
169 }
170 }
171 return 0;
172 }
173
174 /*
175 * Note: we still hold spinlock of primary hash chain, so no other writer
176 * can insert/delete a socket with local_port == num
177 */
udp_lib_lport_inuse2(struct net * net,__u16 num,struct udp_hslot * hslot2,struct sock * sk)178 static int udp_lib_lport_inuse2(struct net *net, __u16 num,
179 struct udp_hslot *hslot2,
180 struct sock *sk)
181 {
182 struct sock *sk2;
183 kuid_t uid = sock_i_uid(sk);
184 int res = 0;
185
186 spin_lock(&hslot2->lock);
187 udp_portaddr_for_each_entry(sk2, &hslot2->head) {
188 if (net_eq(sock_net(sk2), net) &&
189 sk2 != sk &&
190 (udp_sk(sk2)->udp_port_hash == num) &&
191 (!sk2->sk_reuse || !sk->sk_reuse) &&
192 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
193 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
194 inet_rcv_saddr_equal(sk, sk2, true)) {
195 if (sk2->sk_reuseport && sk->sk_reuseport &&
196 !rcu_access_pointer(sk->sk_reuseport_cb) &&
197 uid_eq(uid, sock_i_uid(sk2))) {
198 res = 0;
199 } else {
200 res = 1;
201 }
202 break;
203 }
204 }
205 spin_unlock(&hslot2->lock);
206 return res;
207 }
208
udp_reuseport_add_sock(struct sock * sk,struct udp_hslot * hslot)209 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
210 {
211 struct net *net = sock_net(sk);
212 kuid_t uid = sock_i_uid(sk);
213 struct sock *sk2;
214
215 sk_for_each(sk2, &hslot->head) {
216 if (net_eq(sock_net(sk2), net) &&
217 sk2 != sk &&
218 sk2->sk_family == sk->sk_family &&
219 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
220 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
221 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
222 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
223 inet_rcv_saddr_equal(sk, sk2, false)) {
224 return reuseport_add_sock(sk, sk2,
225 inet_rcv_saddr_any(sk));
226 }
227 }
228
229 return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
230 }
231
232 /**
233 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
234 *
235 * @sk: socket struct in question
236 * @snum: port number to look up
237 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
238 * with NULL address
239 */
udp_lib_get_port(struct sock * sk,unsigned short snum,unsigned int hash2_nulladdr)240 int udp_lib_get_port(struct sock *sk, unsigned short snum,
241 unsigned int hash2_nulladdr)
242 {
243 struct udp_table *udptable = udp_get_table_prot(sk);
244 struct udp_hslot *hslot, *hslot2;
245 struct net *net = sock_net(sk);
246 int error = -EADDRINUSE;
247
248 if (!snum) {
249 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
250 unsigned short first, last;
251 int low, high, remaining;
252 unsigned int rand;
253
254 inet_sk_get_local_port_range(sk, &low, &high);
255 remaining = (high - low) + 1;
256
257 rand = get_random_u32();
258 first = reciprocal_scale(rand, remaining) + low;
259 /*
260 * force rand to be an odd multiple of UDP_HTABLE_SIZE
261 */
262 rand = (rand | 1) * (udptable->mask + 1);
263 last = first + udptable->mask + 1;
264 do {
265 hslot = udp_hashslot(udptable, net, first);
266 bitmap_zero(bitmap, PORTS_PER_CHAIN);
267 spin_lock_bh(&hslot->lock);
268 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
269 udptable->log);
270
271 snum = first;
272 /*
273 * Iterate on all possible values of snum for this hash.
274 * Using steps of an odd multiple of UDP_HTABLE_SIZE
275 * give us randomization and full range coverage.
276 */
277 do {
278 if (low <= snum && snum <= high &&
279 !test_bit(snum >> udptable->log, bitmap) &&
280 !inet_is_local_reserved_port(net, snum))
281 goto found;
282 snum += rand;
283 } while (snum != first);
284 spin_unlock_bh(&hslot->lock);
285 cond_resched();
286 } while (++first != last);
287 goto fail;
288 } else {
289 hslot = udp_hashslot(udptable, net, snum);
290 spin_lock_bh(&hslot->lock);
291 if (hslot->count > 10) {
292 int exist;
293 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
294
295 slot2 &= udptable->mask;
296 hash2_nulladdr &= udptable->mask;
297
298 hslot2 = udp_hashslot2(udptable, slot2);
299 if (hslot->count < hslot2->count)
300 goto scan_primary_hash;
301
302 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
303 if (!exist && (hash2_nulladdr != slot2)) {
304 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
305 exist = udp_lib_lport_inuse2(net, snum, hslot2,
306 sk);
307 }
308 if (exist)
309 goto fail_unlock;
310 else
311 goto found;
312 }
313 scan_primary_hash:
314 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
315 goto fail_unlock;
316 }
317 found:
318 inet_sk(sk)->inet_num = snum;
319 udp_sk(sk)->udp_port_hash = snum;
320 udp_sk(sk)->udp_portaddr_hash ^= snum;
321 if (sk_unhashed(sk)) {
322 if (sk->sk_reuseport &&
323 udp_reuseport_add_sock(sk, hslot)) {
324 inet_sk(sk)->inet_num = 0;
325 udp_sk(sk)->udp_port_hash = 0;
326 udp_sk(sk)->udp_portaddr_hash ^= snum;
327 goto fail_unlock;
328 }
329
330 sock_set_flag(sk, SOCK_RCU_FREE);
331
332 sk_add_node_rcu(sk, &hslot->head);
333 hslot->count++;
334 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
335
336 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
337 spin_lock(&hslot2->lock);
338 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
339 sk->sk_family == AF_INET6)
340 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
341 &hslot2->head);
342 else
343 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
344 &hslot2->head);
345 hslot2->count++;
346 spin_unlock(&hslot2->lock);
347 }
348
349 error = 0;
350 fail_unlock:
351 spin_unlock_bh(&hslot->lock);
352 fail:
353 return error;
354 }
355 EXPORT_SYMBOL(udp_lib_get_port);
356
udp_v4_get_port(struct sock * sk,unsigned short snum)357 int udp_v4_get_port(struct sock *sk, unsigned short snum)
358 {
359 unsigned int hash2_nulladdr =
360 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
361 unsigned int hash2_partial =
362 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
363
364 /* precompute partial secondary hash */
365 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
366 return udp_lib_get_port(sk, snum, hash2_nulladdr);
367 }
368
compute_score(struct sock * sk,const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned short hnum,int dif,int sdif)369 static int compute_score(struct sock *sk, const struct net *net,
370 __be32 saddr, __be16 sport,
371 __be32 daddr, unsigned short hnum,
372 int dif, int sdif)
373 {
374 int score;
375 struct inet_sock *inet;
376 bool dev_match;
377
378 if (!net_eq(sock_net(sk), net) ||
379 udp_sk(sk)->udp_port_hash != hnum ||
380 ipv6_only_sock(sk))
381 return -1;
382
383 if (sk->sk_rcv_saddr != daddr)
384 return -1;
385
386 score = (sk->sk_family == PF_INET) ? 2 : 1;
387
388 inet = inet_sk(sk);
389 if (inet->inet_daddr) {
390 if (inet->inet_daddr != saddr)
391 return -1;
392 score += 4;
393 }
394
395 if (inet->inet_dport) {
396 if (inet->inet_dport != sport)
397 return -1;
398 score += 4;
399 }
400
401 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
402 dif, sdif);
403 if (!dev_match)
404 return -1;
405 if (sk->sk_bound_dev_if)
406 score += 4;
407
408 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
409 score++;
410 return score;
411 }
412
udp_ehashfn(const struct net * net,const __be32 laddr,const __u16 lport,const __be32 faddr,const __be16 fport)413 u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
414 const __be32 faddr, const __be16 fport)
415 {
416 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
417
418 return __inet_ehashfn(laddr, lport, faddr, fport,
419 udp_ehash_secret + net_hash_mix(net));
420 }
421 EXPORT_SYMBOL(udp_ehashfn);
422
423 /**
424 * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
425 * @net: Network namespace
426 * @saddr: Source address, network order
427 * @sport: Source port, network order
428 * @daddr: Destination address, network order
429 * @hnum: Destination port, host order
430 * @dif: Destination interface index
431 * @sdif: Destination bridge port index, if relevant
432 * @udptable: Set of UDP hash tables
433 *
434 * Simplified lookup to be used as fallback if no sockets are found due to a
435 * potential race between (receive) address change, and lookup happening before
436 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
437 * result sockets, because if we have one, we don't need the fallback at all.
438 *
439 * Called under rcu_read_lock().
440 *
441 * Return: socket with highest matching score if any, NULL if none
442 */
udp4_lib_lookup1(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif,int sdif,const struct udp_table * udptable)443 static struct sock *udp4_lib_lookup1(const struct net *net,
444 __be32 saddr, __be16 sport,
445 __be32 daddr, unsigned int hnum,
446 int dif, int sdif,
447 const struct udp_table *udptable)
448 {
449 unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
450 struct udp_hslot *hslot = &udptable->hash[slot];
451 struct sock *sk, *result = NULL;
452 int score, badness = 0;
453
454 sk_for_each_rcu(sk, &hslot->head) {
455 score = compute_score(sk, net,
456 saddr, sport, daddr, hnum, dif, sdif);
457 if (score > badness) {
458 result = sk;
459 badness = score;
460 }
461 }
462
463 return result;
464 }
465
466 /* called with rcu_read_lock() */
udp4_lib_lookup2(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)467 static struct sock *udp4_lib_lookup2(const struct net *net,
468 __be32 saddr, __be16 sport,
469 __be32 daddr, unsigned int hnum,
470 int dif, int sdif,
471 struct udp_hslot *hslot2,
472 struct sk_buff *skb)
473 {
474 struct sock *sk, *result;
475 int score, badness;
476 bool need_rescore;
477
478 result = NULL;
479 badness = 0;
480 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
481 need_rescore = false;
482 rescore:
483 score = compute_score(need_rescore ? result : sk, net, saddr,
484 sport, daddr, hnum, dif, sdif);
485 if (score > badness) {
486 badness = score;
487
488 if (need_rescore)
489 continue;
490
491 if (sk->sk_state == TCP_ESTABLISHED) {
492 result = sk;
493 continue;
494 }
495
496 result = inet_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
497 saddr, sport, daddr, hnum, udp_ehashfn);
498 if (!result) {
499 result = sk;
500 continue;
501 }
502
503 /* Fall back to scoring if group has connections */
504 if (!reuseport_has_conns(sk))
505 return result;
506
507 /* Reuseport logic returned an error, keep original score. */
508 if (IS_ERR(result))
509 continue;
510
511 /* compute_score is too long of a function to be
512 * inlined, and calling it again here yields
513 * measureable overhead for some
514 * workloads. Work around it by jumping
515 * backwards to rescore 'result'.
516 */
517 need_rescore = true;
518 goto rescore;
519 }
520 }
521 return result;
522 }
523
524 #if IS_ENABLED(CONFIG_BASE_SMALL)
udp4_lib_lookup4(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif,int sdif,struct udp_table * udptable)525 static struct sock *udp4_lib_lookup4(const struct net *net,
526 __be32 saddr, __be16 sport,
527 __be32 daddr, unsigned int hnum,
528 int dif, int sdif,
529 struct udp_table *udptable)
530 {
531 return NULL;
532 }
533
udp_rehash4(struct udp_table * udptable,struct sock * sk,u16 newhash4)534 static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
535 u16 newhash4)
536 {
537 }
538
udp_unhash4(struct udp_table * udptable,struct sock * sk)539 static void udp_unhash4(struct udp_table *udptable, struct sock *sk)
540 {
541 }
542 #else /* !CONFIG_BASE_SMALL */
udp4_lib_lookup4(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif,int sdif,struct udp_table * udptable)543 static struct sock *udp4_lib_lookup4(const struct net *net,
544 __be32 saddr, __be16 sport,
545 __be32 daddr, unsigned int hnum,
546 int dif, int sdif,
547 struct udp_table *udptable)
548 {
549 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
550 const struct hlist_nulls_node *node;
551 struct udp_hslot *hslot4;
552 unsigned int hash4, slot;
553 struct udp_sock *up;
554 struct sock *sk;
555
556 hash4 = udp_ehashfn(net, daddr, hnum, saddr, sport);
557 slot = hash4 & udptable->mask;
558 hslot4 = &udptable->hash4[slot];
559 INET_ADDR_COOKIE(acookie, saddr, daddr);
560
561 begin:
562 /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */
563 udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
564 sk = (struct sock *)up;
565 if (inet_match(net, sk, acookie, ports, dif, sdif))
566 return sk;
567 }
568
569 /* if the nulls value we got at the end of this lookup is not the
570 * expected one, we must restart lookup. We probably met an item that
571 * was moved to another chain due to rehash.
572 */
573 if (get_nulls_value(node) != slot)
574 goto begin;
575
576 return NULL;
577 }
578
579 /* udp_rehash4() only checks hslot4, and hash4_cnt is not processed. */
udp_rehash4(struct udp_table * udptable,struct sock * sk,u16 newhash4)580 static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
581 u16 newhash4)
582 {
583 struct udp_hslot *hslot4, *nhslot4;
584
585 hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash);
586 nhslot4 = udp_hashslot4(udptable, newhash4);
587 udp_sk(sk)->udp_lrpa_hash = newhash4;
588
589 if (hslot4 != nhslot4) {
590 spin_lock_bh(&hslot4->lock);
591 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node);
592 hslot4->count--;
593 spin_unlock_bh(&hslot4->lock);
594
595 spin_lock_bh(&nhslot4->lock);
596 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node,
597 &nhslot4->nulls_head);
598 nhslot4->count++;
599 spin_unlock_bh(&nhslot4->lock);
600 }
601 }
602
udp_unhash4(struct udp_table * udptable,struct sock * sk)603 static void udp_unhash4(struct udp_table *udptable, struct sock *sk)
604 {
605 struct udp_hslot *hslot2, *hslot4;
606
607 if (udp_hashed4(sk)) {
608 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
609 hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash);
610
611 spin_lock(&hslot4->lock);
612 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node);
613 hslot4->count--;
614 spin_unlock(&hslot4->lock);
615
616 spin_lock(&hslot2->lock);
617 udp_hash4_dec(hslot2);
618 spin_unlock(&hslot2->lock);
619 }
620 }
621
udp_lib_hash4(struct sock * sk,u16 hash)622 void udp_lib_hash4(struct sock *sk, u16 hash)
623 {
624 struct udp_hslot *hslot, *hslot2, *hslot4;
625 struct net *net = sock_net(sk);
626 struct udp_table *udptable;
627
628 /* Connected udp socket can re-connect to another remote address, which
629 * will be handled by rehash. Thus no need to redo hash4 here.
630 */
631 if (udp_hashed4(sk))
632 return;
633
634 udptable = net->ipv4.udp_table;
635 hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash);
636 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
637 hslot4 = udp_hashslot4(udptable, hash);
638 udp_sk(sk)->udp_lrpa_hash = hash;
639
640 spin_lock_bh(&hslot->lock);
641 if (rcu_access_pointer(sk->sk_reuseport_cb))
642 reuseport_detach_sock(sk);
643
644 spin_lock(&hslot4->lock);
645 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node,
646 &hslot4->nulls_head);
647 hslot4->count++;
648 spin_unlock(&hslot4->lock);
649
650 spin_lock(&hslot2->lock);
651 udp_hash4_inc(hslot2);
652 spin_unlock(&hslot2->lock);
653
654 spin_unlock_bh(&hslot->lock);
655 }
656 EXPORT_SYMBOL(udp_lib_hash4);
657
658 /* call with sock lock */
udp4_hash4(struct sock * sk)659 void udp4_hash4(struct sock *sk)
660 {
661 struct net *net = sock_net(sk);
662 unsigned int hash;
663
664 if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY))
665 return;
666
667 hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num,
668 sk->sk_daddr, sk->sk_dport);
669
670 udp_lib_hash4(sk, hash);
671 }
672 EXPORT_SYMBOL(udp4_hash4);
673 #endif /* CONFIG_BASE_SMALL */
674
675 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
676 * harder than this. -DaveM
677 */
__udp4_lib_lookup(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)678 struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
679 __be16 sport, __be32 daddr, __be16 dport, int dif,
680 int sdif, struct udp_table *udptable, struct sk_buff *skb)
681 {
682 unsigned short hnum = ntohs(dport);
683 struct udp_hslot *hslot2;
684 struct sock *result, *sk;
685 unsigned int hash2;
686
687 hash2 = ipv4_portaddr_hash(net, daddr, hnum);
688 hslot2 = udp_hashslot2(udptable, hash2);
689
690 if (udp_has_hash4(hslot2)) {
691 result = udp4_lib_lookup4(net, saddr, sport, daddr, hnum,
692 dif, sdif, udptable);
693 if (result) /* udp4_lib_lookup4 return sk or NULL */
694 return result;
695 }
696
697 /* Lookup connected or non-wildcard socket */
698 result = udp4_lib_lookup2(net, saddr, sport,
699 daddr, hnum, dif, sdif,
700 hslot2, skb);
701 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
702 goto done;
703
704 /* Lookup redirect from BPF */
705 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
706 udptable == net->ipv4.udp_table) {
707 sk = inet_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
708 saddr, sport, daddr, hnum, dif,
709 udp_ehashfn);
710 if (sk) {
711 result = sk;
712 goto done;
713 }
714 }
715
716 /* Got non-wildcard socket or error on first lookup */
717 if (result)
718 goto done;
719
720 /* Lookup wildcard sockets */
721 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
722 hslot2 = udp_hashslot2(udptable, hash2);
723
724 result = udp4_lib_lookup2(net, saddr, sport,
725 htonl(INADDR_ANY), hnum, dif, sdif,
726 hslot2, skb);
727 if (!IS_ERR_OR_NULL(result))
728 goto done;
729
730 /* Primary hash (destination port) lookup as fallback for this race:
731 * 1. __ip4_datagram_connect() sets sk_rcv_saddr
732 * 2. lookup (this function): new sk_rcv_saddr, hashes not updated yet
733 * 3. rehash operation updating _secondary and four-tuple_ hashes
734 * The primary hash doesn't need an update after 1., so, thanks to this
735 * further step, 1. and 3. don't need to be atomic against the lookup.
736 */
737 result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
738 udptable);
739
740 done:
741 if (IS_ERR(result))
742 return NULL;
743 return result;
744 }
745 EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
746
__udp4_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)747 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
748 __be16 sport, __be16 dport,
749 struct udp_table *udptable)
750 {
751 const struct iphdr *iph = ip_hdr(skb);
752
753 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
754 iph->daddr, dport, inet_iif(skb),
755 inet_sdif(skb), udptable, skb);
756 }
757
udp4_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)758 struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
759 __be16 sport, __be16 dport)
760 {
761 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
762 const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
763 struct net *net = dev_net(skb->dev);
764 int iif, sdif;
765
766 inet_get_iif_sdif(skb, &iif, &sdif);
767
768 return __udp4_lib_lookup(net, iph->saddr, sport,
769 iph->daddr, dport, iif,
770 sdif, net->ipv4.udp_table, NULL);
771 }
772
773 /* Must be called under rcu_read_lock().
774 * Does increment socket refcount.
775 */
776 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
udp4_lib_lookup(const struct net * net,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif)777 struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
778 __be32 daddr, __be16 dport, int dif)
779 {
780 struct sock *sk;
781
782 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
783 dif, 0, net->ipv4.udp_table, NULL);
784 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
785 sk = NULL;
786 return sk;
787 }
788 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
789 #endif
790
__udp_is_mcast_sock(struct net * net,const struct sock * sk,__be16 loc_port,__be32 loc_addr,__be16 rmt_port,__be32 rmt_addr,int dif,int sdif,unsigned short hnum)791 static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
792 __be16 loc_port, __be32 loc_addr,
793 __be16 rmt_port, __be32 rmt_addr,
794 int dif, int sdif, unsigned short hnum)
795 {
796 const struct inet_sock *inet = inet_sk(sk);
797
798 if (!net_eq(sock_net(sk), net) ||
799 udp_sk(sk)->udp_port_hash != hnum ||
800 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
801 (inet->inet_dport != rmt_port && inet->inet_dport) ||
802 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
803 ipv6_only_sock(sk) ||
804 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
805 return false;
806 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
807 return false;
808 return true;
809 }
810
811 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
812 EXPORT_SYMBOL(udp_encap_needed_key);
813
814 #if IS_ENABLED(CONFIG_IPV6)
815 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
816 EXPORT_SYMBOL(udpv6_encap_needed_key);
817 #endif
818
udp_encap_enable(void)819 void udp_encap_enable(void)
820 {
821 static_branch_inc(&udp_encap_needed_key);
822 }
823 EXPORT_SYMBOL(udp_encap_enable);
824
udp_encap_disable(void)825 void udp_encap_disable(void)
826 {
827 static_branch_dec(&udp_encap_needed_key);
828 }
829 EXPORT_SYMBOL(udp_encap_disable);
830
831 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
832 * through error handlers in encapsulations looking for a match.
833 */
__udp4_lib_err_encap_no_sk(struct sk_buff * skb,u32 info)834 static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
835 {
836 int i;
837
838 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
839 int (*handler)(struct sk_buff *skb, u32 info);
840 const struct ip_tunnel_encap_ops *encap;
841
842 encap = rcu_dereference(iptun_encaps[i]);
843 if (!encap)
844 continue;
845 handler = encap->err_handler;
846 if (handler && !handler(skb, info))
847 return 0;
848 }
849
850 return -ENOENT;
851 }
852
853 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
854 * reversing source and destination port: this will match tunnels that force the
855 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
856 * lwtunnels might actually break this assumption by being configured with
857 * different destination ports on endpoints, in this case we won't be able to
858 * trace ICMP messages back to them.
859 *
860 * If this doesn't match any socket, probe tunnels with arbitrary destination
861 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
862 * we've sent packets to won't necessarily match the local destination port.
863 *
864 * Then ask the tunnel implementation to match the error against a valid
865 * association.
866 *
867 * Return an error if we can't find a match, the socket if we need further
868 * processing, zero otherwise.
869 */
__udp4_lib_err_encap(struct net * net,const struct iphdr * iph,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,u32 info)870 static struct sock *__udp4_lib_err_encap(struct net *net,
871 const struct iphdr *iph,
872 struct udphdr *uh,
873 struct udp_table *udptable,
874 struct sock *sk,
875 struct sk_buff *skb, u32 info)
876 {
877 int (*lookup)(struct sock *sk, struct sk_buff *skb);
878 int network_offset, transport_offset;
879 struct udp_sock *up;
880
881 network_offset = skb_network_offset(skb);
882 transport_offset = skb_transport_offset(skb);
883
884 /* Network header needs to point to the outer IPv4 header inside ICMP */
885 skb_reset_network_header(skb);
886
887 /* Transport header needs to point to the UDP header */
888 skb_set_transport_header(skb, iph->ihl << 2);
889
890 if (sk) {
891 up = udp_sk(sk);
892
893 lookup = READ_ONCE(up->encap_err_lookup);
894 if (lookup && lookup(sk, skb))
895 sk = NULL;
896
897 goto out;
898 }
899
900 sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
901 iph->saddr, uh->dest, skb->dev->ifindex, 0,
902 udptable, NULL);
903 if (sk) {
904 up = udp_sk(sk);
905
906 lookup = READ_ONCE(up->encap_err_lookup);
907 if (!lookup || lookup(sk, skb))
908 sk = NULL;
909 }
910
911 out:
912 if (!sk)
913 sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
914
915 skb_set_transport_header(skb, transport_offset);
916 skb_set_network_header(skb, network_offset);
917
918 return sk;
919 }
920
921 /*
922 * This routine is called by the ICMP module when it gets some
923 * sort of error condition. If err < 0 then the socket should
924 * be closed and the error returned to the user. If err > 0
925 * it's just the icmp type << 8 | icmp code.
926 * Header points to the ip header of the error packet. We move
927 * on past this. Then (as it used to claim before adjustment)
928 * header points to the first 8 bytes of the udp header. We need
929 * to find the appropriate port.
930 */
931
__udp4_lib_err(struct sk_buff * skb,u32 info,struct udp_table * udptable)932 int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
933 {
934 struct inet_sock *inet;
935 const struct iphdr *iph = (const struct iphdr *)skb->data;
936 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
937 const int type = icmp_hdr(skb)->type;
938 const int code = icmp_hdr(skb)->code;
939 bool tunnel = false;
940 struct sock *sk;
941 int harderr;
942 int err;
943 struct net *net = dev_net(skb->dev);
944
945 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
946 iph->saddr, uh->source, skb->dev->ifindex,
947 inet_sdif(skb), udptable, NULL);
948
949 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
950 /* No socket for error: try tunnels before discarding */
951 if (static_branch_unlikely(&udp_encap_needed_key)) {
952 sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
953 info);
954 if (!sk)
955 return 0;
956 } else
957 sk = ERR_PTR(-ENOENT);
958
959 if (IS_ERR(sk)) {
960 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
961 return PTR_ERR(sk);
962 }
963
964 tunnel = true;
965 }
966
967 err = 0;
968 harderr = 0;
969 inet = inet_sk(sk);
970
971 switch (type) {
972 default:
973 case ICMP_TIME_EXCEEDED:
974 err = EHOSTUNREACH;
975 break;
976 case ICMP_SOURCE_QUENCH:
977 goto out;
978 case ICMP_PARAMETERPROB:
979 err = EPROTO;
980 harderr = 1;
981 break;
982 case ICMP_DEST_UNREACH:
983 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
984 ipv4_sk_update_pmtu(skb, sk, info);
985 if (READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT) {
986 err = EMSGSIZE;
987 harderr = 1;
988 break;
989 }
990 goto out;
991 }
992 err = EHOSTUNREACH;
993 if (code <= NR_ICMP_UNREACH) {
994 harderr = icmp_err_convert[code].fatal;
995 err = icmp_err_convert[code].errno;
996 }
997 break;
998 case ICMP_REDIRECT:
999 ipv4_sk_redirect(skb, sk);
1000 goto out;
1001 }
1002
1003 /*
1004 * RFC1122: OK. Passes ICMP errors back to application, as per
1005 * 4.1.3.3.
1006 */
1007 if (tunnel) {
1008 /* ...not for tunnels though: we don't have a sending socket */
1009 if (udp_sk(sk)->encap_err_rcv)
1010 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info,
1011 (u8 *)(uh+1));
1012 goto out;
1013 }
1014 if (!inet_test_bit(RECVERR, sk)) {
1015 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
1016 goto out;
1017 } else
1018 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
1019
1020 sk->sk_err = err;
1021 sk_error_report(sk);
1022 out:
1023 return 0;
1024 }
1025
udp_err(struct sk_buff * skb,u32 info)1026 int udp_err(struct sk_buff *skb, u32 info)
1027 {
1028 return __udp4_lib_err(skb, info, dev_net(skb->dev)->ipv4.udp_table);
1029 }
1030
1031 /*
1032 * Throw away all pending data and cancel the corking. Socket is locked.
1033 */
udp_flush_pending_frames(struct sock * sk)1034 void udp_flush_pending_frames(struct sock *sk)
1035 {
1036 struct udp_sock *up = udp_sk(sk);
1037
1038 if (up->pending) {
1039 up->len = 0;
1040 WRITE_ONCE(up->pending, 0);
1041 ip_flush_pending_frames(sk);
1042 }
1043 }
1044 EXPORT_SYMBOL(udp_flush_pending_frames);
1045
1046 /**
1047 * udp4_hwcsum - handle outgoing HW checksumming
1048 * @skb: sk_buff containing the filled-in UDP header
1049 * (checksum field must be zeroed out)
1050 * @src: source IP address
1051 * @dst: destination IP address
1052 */
udp4_hwcsum(struct sk_buff * skb,__be32 src,__be32 dst)1053 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
1054 {
1055 struct udphdr *uh = udp_hdr(skb);
1056 int offset = skb_transport_offset(skb);
1057 int len = skb->len - offset;
1058 int hlen = len;
1059 __wsum csum = 0;
1060
1061 if (!skb_has_frag_list(skb)) {
1062 /*
1063 * Only one fragment on the socket.
1064 */
1065 skb->csum_start = skb_transport_header(skb) - skb->head;
1066 skb->csum_offset = offsetof(struct udphdr, check);
1067 uh->check = ~csum_tcpudp_magic(src, dst, len,
1068 IPPROTO_UDP, 0);
1069 } else {
1070 struct sk_buff *frags;
1071
1072 /*
1073 * HW-checksum won't work as there are two or more
1074 * fragments on the socket so that all csums of sk_buffs
1075 * should be together
1076 */
1077 skb_walk_frags(skb, frags) {
1078 csum = csum_add(csum, frags->csum);
1079 hlen -= frags->len;
1080 }
1081
1082 csum = skb_checksum(skb, offset, hlen, csum);
1083 skb->ip_summed = CHECKSUM_NONE;
1084
1085 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
1086 if (uh->check == 0)
1087 uh->check = CSUM_MANGLED_0;
1088 }
1089 }
1090 EXPORT_SYMBOL_GPL(udp4_hwcsum);
1091
1092 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
1093 * for the simple case like when setting the checksum for a UDP tunnel.
1094 */
udp_set_csum(bool nocheck,struct sk_buff * skb,__be32 saddr,__be32 daddr,int len)1095 void udp_set_csum(bool nocheck, struct sk_buff *skb,
1096 __be32 saddr, __be32 daddr, int len)
1097 {
1098 struct udphdr *uh = udp_hdr(skb);
1099
1100 if (nocheck) {
1101 uh->check = 0;
1102 } else if (skb_is_gso(skb)) {
1103 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
1104 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1105 uh->check = 0;
1106 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
1107 if (uh->check == 0)
1108 uh->check = CSUM_MANGLED_0;
1109 } else {
1110 skb->ip_summed = CHECKSUM_PARTIAL;
1111 skb->csum_start = skb_transport_header(skb) - skb->head;
1112 skb->csum_offset = offsetof(struct udphdr, check);
1113 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
1114 }
1115 }
1116 EXPORT_SYMBOL(udp_set_csum);
1117
udp_send_skb(struct sk_buff * skb,struct flowi4 * fl4,struct inet_cork * cork)1118 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
1119 struct inet_cork *cork)
1120 {
1121 struct sock *sk = skb->sk;
1122 struct inet_sock *inet = inet_sk(sk);
1123 struct udphdr *uh;
1124 int err;
1125 int is_udplite = IS_UDPLITE(sk);
1126 int offset = skb_transport_offset(skb);
1127 int len = skb->len - offset;
1128 int datalen = len - sizeof(*uh);
1129 __wsum csum = 0;
1130
1131 /*
1132 * Create a UDP header
1133 */
1134 uh = udp_hdr(skb);
1135 uh->source = inet->inet_sport;
1136 uh->dest = fl4->fl4_dport;
1137 uh->len = htons(len);
1138 uh->check = 0;
1139
1140 if (cork->gso_size) {
1141 const int hlen = skb_network_header_len(skb) +
1142 sizeof(struct udphdr);
1143
1144 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1145 kfree_skb(skb);
1146 return -EMSGSIZE;
1147 }
1148 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1149 kfree_skb(skb);
1150 return -EINVAL;
1151 }
1152 if (sk->sk_no_check_tx) {
1153 kfree_skb(skb);
1154 return -EINVAL;
1155 }
1156 if (is_udplite || dst_xfrm(skb_dst(skb))) {
1157 kfree_skb(skb);
1158 return -EIO;
1159 }
1160
1161 if (datalen > cork->gso_size) {
1162 skb_shinfo(skb)->gso_size = cork->gso_size;
1163 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1164 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1165 cork->gso_size);
1166
1167 /* Don't checksum the payload, skb will get segmented */
1168 goto csum_partial;
1169 }
1170 }
1171
1172 if (is_udplite) /* UDP-Lite */
1173 csum = udplite_csum(skb);
1174
1175 else if (sk->sk_no_check_tx) { /* UDP csum off */
1176
1177 skb->ip_summed = CHECKSUM_NONE;
1178 goto send;
1179
1180 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1181 csum_partial:
1182
1183 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
1184 goto send;
1185
1186 } else
1187 csum = udp_csum(skb);
1188
1189 /* add protocol-dependent pseudo-header */
1190 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
1191 sk->sk_protocol, csum);
1192 if (uh->check == 0)
1193 uh->check = CSUM_MANGLED_0;
1194
1195 send:
1196 err = ip_send_skb(sock_net(sk), skb);
1197 if (err) {
1198 if (err == -ENOBUFS &&
1199 !inet_test_bit(RECVERR, sk)) {
1200 UDP_INC_STATS(sock_net(sk),
1201 UDP_MIB_SNDBUFERRORS, is_udplite);
1202 err = 0;
1203 }
1204 } else
1205 UDP_INC_STATS(sock_net(sk),
1206 UDP_MIB_OUTDATAGRAMS, is_udplite);
1207 return err;
1208 }
1209
1210 /*
1211 * Push out all pending data as one UDP datagram. Socket is locked.
1212 */
udp_push_pending_frames(struct sock * sk)1213 int udp_push_pending_frames(struct sock *sk)
1214 {
1215 struct udp_sock *up = udp_sk(sk);
1216 struct inet_sock *inet = inet_sk(sk);
1217 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
1218 struct sk_buff *skb;
1219 int err = 0;
1220
1221 skb = ip_finish_skb(sk, fl4);
1222 if (!skb)
1223 goto out;
1224
1225 err = udp_send_skb(skb, fl4, &inet->cork.base);
1226
1227 out:
1228 up->len = 0;
1229 WRITE_ONCE(up->pending, 0);
1230 return err;
1231 }
1232 EXPORT_SYMBOL(udp_push_pending_frames);
1233
__udp_cmsg_send(struct cmsghdr * cmsg,u16 * gso_size)1234 static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
1235 {
1236 switch (cmsg->cmsg_type) {
1237 case UDP_SEGMENT:
1238 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
1239 return -EINVAL;
1240 *gso_size = *(__u16 *)CMSG_DATA(cmsg);
1241 return 0;
1242 default:
1243 return -EINVAL;
1244 }
1245 }
1246
udp_cmsg_send(struct sock * sk,struct msghdr * msg,u16 * gso_size)1247 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
1248 {
1249 struct cmsghdr *cmsg;
1250 bool need_ip = false;
1251 int err;
1252
1253 for_each_cmsghdr(cmsg, msg) {
1254 if (!CMSG_OK(msg, cmsg))
1255 return -EINVAL;
1256
1257 if (cmsg->cmsg_level != SOL_UDP) {
1258 need_ip = true;
1259 continue;
1260 }
1261
1262 err = __udp_cmsg_send(cmsg, gso_size);
1263 if (err)
1264 return err;
1265 }
1266
1267 return need_ip;
1268 }
1269 EXPORT_SYMBOL_GPL(udp_cmsg_send);
1270
udp_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1271 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1272 {
1273 struct inet_sock *inet = inet_sk(sk);
1274 struct udp_sock *up = udp_sk(sk);
1275 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1276 struct flowi4 fl4_stack;
1277 struct flowi4 *fl4;
1278 int ulen = len;
1279 struct ipcm_cookie ipc;
1280 struct rtable *rt = NULL;
1281 int free = 0;
1282 int connected = 0;
1283 __be32 daddr, faddr, saddr;
1284 u8 tos, scope;
1285 __be16 dport;
1286 int err, is_udplite = IS_UDPLITE(sk);
1287 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1288 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1289 struct sk_buff *skb;
1290 struct ip_options_data opt_copy;
1291 int uc_index;
1292
1293 if (len > 0xFFFF)
1294 return -EMSGSIZE;
1295
1296 /*
1297 * Check the flags.
1298 */
1299
1300 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
1301 return -EOPNOTSUPP;
1302
1303 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1304
1305 fl4 = &inet->cork.fl.u.ip4;
1306 if (READ_ONCE(up->pending)) {
1307 /*
1308 * There are pending frames.
1309 * The socket lock must be held while it's corked.
1310 */
1311 lock_sock(sk);
1312 if (likely(up->pending)) {
1313 if (unlikely(up->pending != AF_INET)) {
1314 release_sock(sk);
1315 return -EINVAL;
1316 }
1317 goto do_append_data;
1318 }
1319 release_sock(sk);
1320 }
1321 ulen += sizeof(struct udphdr);
1322
1323 /*
1324 * Get and verify the address.
1325 */
1326 if (usin) {
1327 if (msg->msg_namelen < sizeof(*usin))
1328 return -EINVAL;
1329 if (usin->sin_family != AF_INET) {
1330 if (usin->sin_family != AF_UNSPEC)
1331 return -EAFNOSUPPORT;
1332 }
1333
1334 daddr = usin->sin_addr.s_addr;
1335 dport = usin->sin_port;
1336 if (dport == 0)
1337 return -EINVAL;
1338 } else {
1339 if (sk->sk_state != TCP_ESTABLISHED)
1340 return -EDESTADDRREQ;
1341 daddr = inet->inet_daddr;
1342 dport = inet->inet_dport;
1343 /* Open fast path for connected socket.
1344 Route will not be used, if at least one option is set.
1345 */
1346 connected = 1;
1347 }
1348
1349 ipcm_init_sk(&ipc, inet);
1350 ipc.gso_size = READ_ONCE(up->gso_size);
1351
1352 if (msg->msg_controllen) {
1353 err = udp_cmsg_send(sk, msg, &ipc.gso_size);
1354 if (err > 0) {
1355 err = ip_cmsg_send(sk, msg, &ipc,
1356 sk->sk_family == AF_INET6);
1357 connected = 0;
1358 }
1359 if (unlikely(err < 0)) {
1360 kfree(ipc.opt);
1361 return err;
1362 }
1363 if (ipc.opt)
1364 free = 1;
1365 }
1366 if (!ipc.opt) {
1367 struct ip_options_rcu *inet_opt;
1368
1369 rcu_read_lock();
1370 inet_opt = rcu_dereference(inet->inet_opt);
1371 if (inet_opt) {
1372 memcpy(&opt_copy, inet_opt,
1373 sizeof(*inet_opt) + inet_opt->opt.optlen);
1374 ipc.opt = &opt_copy.opt;
1375 }
1376 rcu_read_unlock();
1377 }
1378
1379 if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
1380 err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
1381 (struct sockaddr *)usin,
1382 &msg->msg_namelen,
1383 &ipc.addr);
1384 if (err)
1385 goto out_free;
1386 if (usin) {
1387 if (usin->sin_port == 0) {
1388 /* BPF program set invalid port. Reject it. */
1389 err = -EINVAL;
1390 goto out_free;
1391 }
1392 daddr = usin->sin_addr.s_addr;
1393 dport = usin->sin_port;
1394 }
1395 }
1396
1397 saddr = ipc.addr;
1398 ipc.addr = faddr = daddr;
1399
1400 if (ipc.opt && ipc.opt->opt.srr) {
1401 if (!daddr) {
1402 err = -EINVAL;
1403 goto out_free;
1404 }
1405 faddr = ipc.opt->opt.faddr;
1406 connected = 0;
1407 }
1408 tos = get_rttos(&ipc, inet);
1409 scope = ip_sendmsg_scope(inet, &ipc, msg);
1410 if (scope == RT_SCOPE_LINK)
1411 connected = 0;
1412
1413 uc_index = READ_ONCE(inet->uc_index);
1414 if (ipv4_is_multicast(daddr)) {
1415 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
1416 ipc.oif = READ_ONCE(inet->mc_index);
1417 if (!saddr)
1418 saddr = READ_ONCE(inet->mc_addr);
1419 connected = 0;
1420 } else if (!ipc.oif) {
1421 ipc.oif = uc_index;
1422 } else if (ipv4_is_lbcast(daddr) && uc_index) {
1423 /* oif is set, packet is to local broadcast and
1424 * uc_index is set. oif is most likely set
1425 * by sk_bound_dev_if. If uc_index != oif check if the
1426 * oif is an L3 master and uc_index is an L3 slave.
1427 * If so, we want to allow the send using the uc_index.
1428 */
1429 if (ipc.oif != uc_index &&
1430 ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
1431 uc_index)) {
1432 ipc.oif = uc_index;
1433 }
1434 }
1435
1436 if (connected)
1437 rt = dst_rtable(sk_dst_check(sk, 0));
1438
1439 if (!rt) {
1440 struct net *net = sock_net(sk);
1441 __u8 flow_flags = inet_sk_flowi_flags(sk);
1442
1443 fl4 = &fl4_stack;
1444
1445 flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope,
1446 sk->sk_protocol, flow_flags, faddr, saddr,
1447 dport, inet->inet_sport, sk->sk_uid);
1448
1449 security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
1450 rt = ip_route_output_flow(net, fl4, sk);
1451 if (IS_ERR(rt)) {
1452 err = PTR_ERR(rt);
1453 rt = NULL;
1454 if (err == -ENETUNREACH)
1455 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1456 goto out;
1457 }
1458
1459 err = -EACCES;
1460 if ((rt->rt_flags & RTCF_BROADCAST) &&
1461 !sock_flag(sk, SOCK_BROADCAST))
1462 goto out;
1463 if (connected)
1464 sk_dst_set(sk, dst_clone(&rt->dst));
1465 }
1466
1467 if (msg->msg_flags&MSG_CONFIRM)
1468 goto do_confirm;
1469 back_from_confirm:
1470
1471 saddr = fl4->saddr;
1472 if (!ipc.addr)
1473 daddr = ipc.addr = fl4->daddr;
1474
1475 /* Lockless fast path for the non-corking case. */
1476 if (!corkreq) {
1477 struct inet_cork cork;
1478
1479 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
1480 sizeof(struct udphdr), &ipc, &rt,
1481 &cork, msg->msg_flags);
1482 err = PTR_ERR(skb);
1483 if (!IS_ERR_OR_NULL(skb))
1484 err = udp_send_skb(skb, fl4, &cork);
1485 goto out;
1486 }
1487
1488 lock_sock(sk);
1489 if (unlikely(up->pending)) {
1490 /* The socket is already corked while preparing it. */
1491 /* ... which is an evident application bug. --ANK */
1492 release_sock(sk);
1493
1494 net_dbg_ratelimited("socket already corked\n");
1495 err = -EINVAL;
1496 goto out;
1497 }
1498 /*
1499 * Now cork the socket to pend data.
1500 */
1501 fl4 = &inet->cork.fl.u.ip4;
1502 fl4->daddr = daddr;
1503 fl4->saddr = saddr;
1504 fl4->fl4_dport = dport;
1505 fl4->fl4_sport = inet->inet_sport;
1506 WRITE_ONCE(up->pending, AF_INET);
1507
1508 do_append_data:
1509 up->len += ulen;
1510 err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1511 sizeof(struct udphdr), &ipc, &rt,
1512 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1513 if (err)
1514 udp_flush_pending_frames(sk);
1515 else if (!corkreq)
1516 err = udp_push_pending_frames(sk);
1517 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1518 WRITE_ONCE(up->pending, 0);
1519 release_sock(sk);
1520
1521 out:
1522 ip_rt_put(rt);
1523 out_free:
1524 if (free)
1525 kfree(ipc.opt);
1526 if (!err)
1527 return len;
1528 /*
1529 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1530 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1531 * we don't have a good statistic (IpOutDiscards but it can be too many
1532 * things). We could add another new stat but at least for now that
1533 * seems like overkill.
1534 */
1535 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1536 UDP_INC_STATS(sock_net(sk),
1537 UDP_MIB_SNDBUFERRORS, is_udplite);
1538 }
1539 return err;
1540
1541 do_confirm:
1542 if (msg->msg_flags & MSG_PROBE)
1543 dst_confirm_neigh(&rt->dst, &fl4->daddr);
1544 if (!(msg->msg_flags&MSG_PROBE) || len)
1545 goto back_from_confirm;
1546 err = 0;
1547 goto out;
1548 }
1549 EXPORT_SYMBOL(udp_sendmsg);
1550
udp_splice_eof(struct socket * sock)1551 void udp_splice_eof(struct socket *sock)
1552 {
1553 struct sock *sk = sock->sk;
1554 struct udp_sock *up = udp_sk(sk);
1555
1556 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1557 return;
1558
1559 lock_sock(sk);
1560 if (up->pending && !udp_test_bit(CORK, sk))
1561 udp_push_pending_frames(sk);
1562 release_sock(sk);
1563 }
1564 EXPORT_SYMBOL_GPL(udp_splice_eof);
1565
1566 #define UDP_SKB_IS_STATELESS 0x80000000
1567
1568 /* all head states (dst, sk, nf conntrack) except skb extensions are
1569 * cleared by udp_rcv().
1570 *
1571 * We need to preserve secpath, if present, to eventually process
1572 * IP_CMSG_PASSSEC at recvmsg() time.
1573 *
1574 * Other extensions can be cleared.
1575 */
udp_try_make_stateless(struct sk_buff * skb)1576 static bool udp_try_make_stateless(struct sk_buff *skb)
1577 {
1578 if (!skb_has_extensions(skb))
1579 return true;
1580
1581 if (!secpath_exists(skb)) {
1582 skb_ext_reset(skb);
1583 return true;
1584 }
1585
1586 return false;
1587 }
1588
udp_set_dev_scratch(struct sk_buff * skb)1589 static void udp_set_dev_scratch(struct sk_buff *skb)
1590 {
1591 struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
1592
1593 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
1594 scratch->_tsize_state = skb->truesize;
1595 #if BITS_PER_LONG == 64
1596 scratch->len = skb->len;
1597 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
1598 scratch->is_linear = !skb_is_nonlinear(skb);
1599 #endif
1600 if (udp_try_make_stateless(skb))
1601 scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1602 }
1603
udp_skb_csum_unnecessary_set(struct sk_buff * skb)1604 static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
1605 {
1606 /* We come here after udp_lib_checksum_complete() returned 0.
1607 * This means that __skb_checksum_complete() might have
1608 * set skb->csum_valid to 1.
1609 * On 64bit platforms, we can set csum_unnecessary
1610 * to true, but only if the skb is not shared.
1611 */
1612 #if BITS_PER_LONG == 64
1613 if (!skb_shared(skb))
1614 udp_skb_scratch(skb)->csum_unnecessary = true;
1615 #endif
1616 }
1617
udp_skb_truesize(struct sk_buff * skb)1618 static int udp_skb_truesize(struct sk_buff *skb)
1619 {
1620 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1621 }
1622
udp_skb_has_head_state(struct sk_buff * skb)1623 static bool udp_skb_has_head_state(struct sk_buff *skb)
1624 {
1625 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1626 }
1627
1628 /* fully reclaim rmem/fwd memory allocated for skb */
udp_rmem_release(struct sock * sk,unsigned int size,int partial,bool rx_queue_lock_held)1629 static void udp_rmem_release(struct sock *sk, unsigned int size,
1630 int partial, bool rx_queue_lock_held)
1631 {
1632 struct udp_sock *up = udp_sk(sk);
1633 struct sk_buff_head *sk_queue;
1634 unsigned int amt;
1635
1636 if (likely(partial)) {
1637 up->forward_deficit += size;
1638 size = up->forward_deficit;
1639 if (size < READ_ONCE(up->forward_threshold) &&
1640 !skb_queue_empty(&up->reader_queue))
1641 return;
1642 } else {
1643 size += up->forward_deficit;
1644 }
1645 up->forward_deficit = 0;
1646
1647 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1648 * if the called don't held it already
1649 */
1650 sk_queue = &sk->sk_receive_queue;
1651 if (!rx_queue_lock_held)
1652 spin_lock(&sk_queue->lock);
1653
1654 amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
1655 sk_forward_alloc_add(sk, size - amt);
1656
1657 if (amt)
1658 __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
1659
1660 atomic_sub(size, &sk->sk_rmem_alloc);
1661
1662 /* this can save us from acquiring the rx queue lock on next receive */
1663 skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
1664
1665 if (!rx_queue_lock_held)
1666 spin_unlock(&sk_queue->lock);
1667 }
1668
1669 /* Note: called with reader_queue.lock held.
1670 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1671 * This avoids a cache line miss while receive_queue lock is held.
1672 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1673 */
udp_skb_destructor(struct sock * sk,struct sk_buff * skb)1674 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
1675 {
1676 prefetch(&skb->data);
1677 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
1678 }
1679 EXPORT_SYMBOL(udp_skb_destructor);
1680
1681 /* as above, but the caller held the rx queue lock, too */
udp_skb_dtor_locked(struct sock * sk,struct sk_buff * skb)1682 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
1683 {
1684 prefetch(&skb->data);
1685 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
1686 }
1687
1688 /* Idea of busylocks is to let producers grab an extra spinlock
1689 * to relieve pressure on the receive_queue spinlock shared by consumer.
1690 * Under flood, this means that only one producer can be in line
1691 * trying to acquire the receive_queue spinlock.
1692 * These busylock can be allocated on a per cpu manner, instead of a
1693 * per socket one (that would consume a cache line per socket)
1694 */
1695 static int udp_busylocks_log __read_mostly;
1696 static spinlock_t *udp_busylocks __read_mostly;
1697
busylock_acquire(void * ptr)1698 static spinlock_t *busylock_acquire(void *ptr)
1699 {
1700 spinlock_t *busy;
1701
1702 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
1703 spin_lock(busy);
1704 return busy;
1705 }
1706
busylock_release(spinlock_t * busy)1707 static void busylock_release(spinlock_t *busy)
1708 {
1709 if (busy)
1710 spin_unlock(busy);
1711 }
1712
udp_rmem_schedule(struct sock * sk,int size)1713 static int udp_rmem_schedule(struct sock *sk, int size)
1714 {
1715 int delta;
1716
1717 delta = size - sk->sk_forward_alloc;
1718 if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV))
1719 return -ENOBUFS;
1720
1721 return 0;
1722 }
1723
__udp_enqueue_schedule_skb(struct sock * sk,struct sk_buff * skb)1724 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1725 {
1726 struct sk_buff_head *list = &sk->sk_receive_queue;
1727 unsigned int rmem, rcvbuf;
1728 spinlock_t *busy = NULL;
1729 int size, err = -ENOMEM;
1730
1731 rmem = atomic_read(&sk->sk_rmem_alloc);
1732 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1733 size = skb->truesize;
1734
1735 /* Immediately drop when the receive queue is full.
1736 * Cast to unsigned int performs the boundary check for INT_MAX.
1737 */
1738 if (rmem + size > rcvbuf) {
1739 if (rcvbuf > INT_MAX >> 1)
1740 goto drop;
1741
1742 /* Always allow at least one packet for small buffer. */
1743 if (rmem > rcvbuf)
1744 goto drop;
1745 }
1746
1747 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1748 * having linear skbs :
1749 * - Reduce memory overhead and thus increase receive queue capacity
1750 * - Less cache line misses at copyout() time
1751 * - Less work at consume_skb() (less alien page frag freeing)
1752 */
1753 if (rmem > (rcvbuf >> 1)) {
1754 skb_condense(skb);
1755 size = skb->truesize;
1756 busy = busylock_acquire(sk);
1757 }
1758
1759 udp_set_dev_scratch(skb);
1760
1761 atomic_add(size, &sk->sk_rmem_alloc);
1762
1763 spin_lock(&list->lock);
1764 err = udp_rmem_schedule(sk, size);
1765 if (err) {
1766 spin_unlock(&list->lock);
1767 goto uncharge_drop;
1768 }
1769
1770 sk_forward_alloc_add(sk, -size);
1771
1772 /* no need to setup a destructor, we will explicitly release the
1773 * forward allocated memory on dequeue
1774 */
1775 sock_skb_set_dropcount(sk, skb);
1776
1777 __skb_queue_tail(list, skb);
1778 spin_unlock(&list->lock);
1779
1780 if (!sock_flag(sk, SOCK_DEAD))
1781 INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk);
1782
1783 busylock_release(busy);
1784 return 0;
1785
1786 uncharge_drop:
1787 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1788
1789 drop:
1790 atomic_inc(&sk->sk_drops);
1791 busylock_release(busy);
1792 return err;
1793 }
1794 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
1795
udp_destruct_common(struct sock * sk)1796 void udp_destruct_common(struct sock *sk)
1797 {
1798 /* reclaim completely the forward allocated memory */
1799 struct udp_sock *up = udp_sk(sk);
1800 unsigned int total = 0;
1801 struct sk_buff *skb;
1802
1803 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
1804 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
1805 total += skb->truesize;
1806 kfree_skb(skb);
1807 }
1808 udp_rmem_release(sk, total, 0, true);
1809 }
1810 EXPORT_SYMBOL_GPL(udp_destruct_common);
1811
udp_destruct_sock(struct sock * sk)1812 static void udp_destruct_sock(struct sock *sk)
1813 {
1814 udp_destruct_common(sk);
1815 inet_sock_destruct(sk);
1816 }
1817
udp_init_sock(struct sock * sk)1818 int udp_init_sock(struct sock *sk)
1819 {
1820 udp_lib_init_sock(sk);
1821 sk->sk_destruct = udp_destruct_sock;
1822 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
1823 return 0;
1824 }
1825
skb_consume_udp(struct sock * sk,struct sk_buff * skb,int len)1826 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
1827 {
1828 if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
1829 sk_peek_offset_bwd(sk, len);
1830
1831 if (!skb_unref(skb))
1832 return;
1833
1834 /* In the more common cases we cleared the head states previously,
1835 * see __udp_queue_rcv_skb().
1836 */
1837 if (unlikely(udp_skb_has_head_state(skb)))
1838 skb_release_head_state(skb);
1839 __consume_stateless_skb(skb);
1840 }
1841 EXPORT_SYMBOL_GPL(skb_consume_udp);
1842
__first_packet_length(struct sock * sk,struct sk_buff_head * rcvq,unsigned int * total)1843 static struct sk_buff *__first_packet_length(struct sock *sk,
1844 struct sk_buff_head *rcvq,
1845 unsigned int *total)
1846 {
1847 struct sk_buff *skb;
1848
1849 while ((skb = skb_peek(rcvq)) != NULL) {
1850 if (udp_lib_checksum_complete(skb)) {
1851 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
1852 IS_UDPLITE(sk));
1853 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
1854 IS_UDPLITE(sk));
1855 atomic_inc(&sk->sk_drops);
1856 __skb_unlink(skb, rcvq);
1857 *total += skb->truesize;
1858 kfree_skb(skb);
1859 } else {
1860 udp_skb_csum_unnecessary_set(skb);
1861 break;
1862 }
1863 }
1864 return skb;
1865 }
1866
1867 /**
1868 * first_packet_length - return length of first packet in receive queue
1869 * @sk: socket
1870 *
1871 * Drops all bad checksum frames, until a valid one is found.
1872 * Returns the length of found skb, or -1 if none is found.
1873 */
first_packet_length(struct sock * sk)1874 static int first_packet_length(struct sock *sk)
1875 {
1876 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
1877 struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1878 unsigned int total = 0;
1879 struct sk_buff *skb;
1880 int res;
1881
1882 spin_lock_bh(&rcvq->lock);
1883 skb = __first_packet_length(sk, rcvq, &total);
1884 if (!skb && !skb_queue_empty_lockless(sk_queue)) {
1885 spin_lock(&sk_queue->lock);
1886 skb_queue_splice_tail_init(sk_queue, rcvq);
1887 spin_unlock(&sk_queue->lock);
1888
1889 skb = __first_packet_length(sk, rcvq, &total);
1890 }
1891 res = skb ? skb->len : -1;
1892 if (total)
1893 udp_rmem_release(sk, total, 1, false);
1894 spin_unlock_bh(&rcvq->lock);
1895 return res;
1896 }
1897
1898 /*
1899 * IOCTL requests applicable to the UDP protocol
1900 */
1901
udp_ioctl(struct sock * sk,int cmd,int * karg)1902 int udp_ioctl(struct sock *sk, int cmd, int *karg)
1903 {
1904 switch (cmd) {
1905 case SIOCOUTQ:
1906 {
1907 *karg = sk_wmem_alloc_get(sk);
1908 return 0;
1909 }
1910
1911 case SIOCINQ:
1912 {
1913 *karg = max_t(int, 0, first_packet_length(sk));
1914 return 0;
1915 }
1916
1917 default:
1918 return -ENOIOCTLCMD;
1919 }
1920
1921 return 0;
1922 }
1923 EXPORT_SYMBOL(udp_ioctl);
1924
__skb_recv_udp(struct sock * sk,unsigned int flags,int * off,int * err)1925 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
1926 int *off, int *err)
1927 {
1928 struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1929 struct sk_buff_head *queue;
1930 struct sk_buff *last;
1931 long timeo;
1932 int error;
1933
1934 queue = &udp_sk(sk)->reader_queue;
1935 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1936 do {
1937 struct sk_buff *skb;
1938
1939 error = sock_error(sk);
1940 if (error)
1941 break;
1942
1943 error = -EAGAIN;
1944 do {
1945 spin_lock_bh(&queue->lock);
1946 skb = __skb_try_recv_from_queue(sk, queue, flags, off,
1947 err, &last);
1948 if (skb) {
1949 if (!(flags & MSG_PEEK))
1950 udp_skb_destructor(sk, skb);
1951 spin_unlock_bh(&queue->lock);
1952 return skb;
1953 }
1954
1955 if (skb_queue_empty_lockless(sk_queue)) {
1956 spin_unlock_bh(&queue->lock);
1957 goto busy_check;
1958 }
1959
1960 /* refill the reader queue and walk it again
1961 * keep both queues locked to avoid re-acquiring
1962 * the sk_receive_queue lock if fwd memory scheduling
1963 * is needed.
1964 */
1965 spin_lock(&sk_queue->lock);
1966 skb_queue_splice_tail_init(sk_queue, queue);
1967
1968 skb = __skb_try_recv_from_queue(sk, queue, flags, off,
1969 err, &last);
1970 if (skb && !(flags & MSG_PEEK))
1971 udp_skb_dtor_locked(sk, skb);
1972 spin_unlock(&sk_queue->lock);
1973 spin_unlock_bh(&queue->lock);
1974 if (skb)
1975 return skb;
1976
1977 busy_check:
1978 if (!sk_can_busy_loop(sk))
1979 break;
1980
1981 sk_busy_loop(sk, flags & MSG_DONTWAIT);
1982 } while (!skb_queue_empty_lockless(sk_queue));
1983
1984 /* sk_queue is empty, reader_queue may contain peeked packets */
1985 } while (timeo &&
1986 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
1987 &error, &timeo,
1988 (struct sk_buff *)sk_queue));
1989
1990 *err = error;
1991 return NULL;
1992 }
1993 EXPORT_SYMBOL(__skb_recv_udp);
1994
udp_read_skb(struct sock * sk,skb_read_actor_t recv_actor)1995 int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
1996 {
1997 struct sk_buff *skb;
1998 int err;
1999
2000 try_again:
2001 skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
2002 if (!skb)
2003 return err;
2004
2005 if (udp_lib_checksum_complete(skb)) {
2006 int is_udplite = IS_UDPLITE(sk);
2007 struct net *net = sock_net(sk);
2008
2009 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
2010 __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
2011 atomic_inc(&sk->sk_drops);
2012 kfree_skb(skb);
2013 goto try_again;
2014 }
2015
2016 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
2017 return recv_actor(sk, skb);
2018 }
2019 EXPORT_SYMBOL(udp_read_skb);
2020
2021 /*
2022 * This should be easy, if there is something there we
2023 * return it, otherwise we block.
2024 */
2025
udp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2026 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
2027 int *addr_len)
2028 {
2029 struct inet_sock *inet = inet_sk(sk);
2030 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
2031 struct sk_buff *skb;
2032 unsigned int ulen, copied;
2033 int off, err, peeking = flags & MSG_PEEK;
2034 int is_udplite = IS_UDPLITE(sk);
2035 bool checksum_valid = false;
2036
2037 if (flags & MSG_ERRQUEUE)
2038 return ip_recv_error(sk, msg, len, addr_len);
2039
2040 try_again:
2041 off = sk_peek_offset(sk, flags);
2042 skb = __skb_recv_udp(sk, flags, &off, &err);
2043 if (!skb)
2044 return err;
2045
2046 ulen = udp_skb_len(skb);
2047 copied = len;
2048 if (copied > ulen - off)
2049 copied = ulen - off;
2050 else if (copied < ulen)
2051 msg->msg_flags |= MSG_TRUNC;
2052
2053 /*
2054 * If checksum is needed at all, try to do it while copying the
2055 * data. If the data is truncated, or if we only want a partial
2056 * coverage checksum (UDP-Lite), do it before the copy.
2057 */
2058
2059 if (copied < ulen || peeking ||
2060 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
2061 checksum_valid = udp_skb_csum_unnecessary(skb) ||
2062 !__udp_lib_checksum_complete(skb);
2063 if (!checksum_valid)
2064 goto csum_copy_err;
2065 }
2066
2067 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
2068 if (udp_skb_is_linear(skb))
2069 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
2070 else
2071 err = skb_copy_datagram_msg(skb, off, msg, copied);
2072 } else {
2073 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
2074
2075 if (err == -EINVAL)
2076 goto csum_copy_err;
2077 }
2078
2079 if (unlikely(err)) {
2080 if (!peeking) {
2081 atomic_inc(&sk->sk_drops);
2082 UDP_INC_STATS(sock_net(sk),
2083 UDP_MIB_INERRORS, is_udplite);
2084 }
2085 kfree_skb(skb);
2086 return err;
2087 }
2088
2089 if (!peeking)
2090 UDP_INC_STATS(sock_net(sk),
2091 UDP_MIB_INDATAGRAMS, is_udplite);
2092
2093 sock_recv_cmsgs(msg, sk, skb);
2094
2095 /* Copy the address. */
2096 if (sin) {
2097 sin->sin_family = AF_INET;
2098 sin->sin_port = udp_hdr(skb)->source;
2099 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
2100 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2101 *addr_len = sizeof(*sin);
2102
2103 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
2104 (struct sockaddr *)sin,
2105 addr_len);
2106 }
2107
2108 if (udp_test_bit(GRO_ENABLED, sk))
2109 udp_cmsg_recv(msg, sk, skb);
2110
2111 if (inet_cmsg_flags(inet))
2112 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
2113
2114 err = copied;
2115 if (flags & MSG_TRUNC)
2116 err = ulen;
2117
2118 skb_consume_udp(sk, skb, peeking ? -err : err);
2119 return err;
2120
2121 csum_copy_err:
2122 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
2123 udp_skb_destructor)) {
2124 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2125 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
2126 }
2127 kfree_skb(skb);
2128
2129 /* starting over for a new packet, but check if we need to yield */
2130 cond_resched();
2131 msg->msg_flags &= ~MSG_TRUNC;
2132 goto try_again;
2133 }
2134
udp_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)2135 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2136 {
2137 /* This check is replicated from __ip4_datagram_connect() and
2138 * intended to prevent BPF program called below from accessing bytes
2139 * that are out of the bound specified by user in addr_len.
2140 */
2141 if (addr_len < sizeof(struct sockaddr_in))
2142 return -EINVAL;
2143
2144 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len);
2145 }
2146 EXPORT_SYMBOL(udp_pre_connect);
2147
udp_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)2148 static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2149 {
2150 int res;
2151
2152 lock_sock(sk);
2153 res = __ip4_datagram_connect(sk, uaddr, addr_len);
2154 if (!res)
2155 udp4_hash4(sk);
2156 release_sock(sk);
2157 return res;
2158 }
2159
__udp_disconnect(struct sock * sk,int flags)2160 int __udp_disconnect(struct sock *sk, int flags)
2161 {
2162 struct inet_sock *inet = inet_sk(sk);
2163 /*
2164 * 1003.1g - break association.
2165 */
2166
2167 sk->sk_state = TCP_CLOSE;
2168 inet->inet_daddr = 0;
2169 inet->inet_dport = 0;
2170 sock_rps_reset_rxhash(sk);
2171 sk->sk_bound_dev_if = 0;
2172 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
2173 inet_reset_saddr(sk);
2174 if (sk->sk_prot->rehash &&
2175 (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2176 sk->sk_prot->rehash(sk);
2177 }
2178
2179 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
2180 sk->sk_prot->unhash(sk);
2181 inet->inet_sport = 0;
2182 }
2183 sk_dst_reset(sk);
2184 return 0;
2185 }
2186 EXPORT_SYMBOL(__udp_disconnect);
2187
udp_disconnect(struct sock * sk,int flags)2188 int udp_disconnect(struct sock *sk, int flags)
2189 {
2190 lock_sock(sk);
2191 __udp_disconnect(sk, flags);
2192 release_sock(sk);
2193 return 0;
2194 }
2195 EXPORT_SYMBOL(udp_disconnect);
2196
udp_lib_unhash(struct sock * sk)2197 void udp_lib_unhash(struct sock *sk)
2198 {
2199 if (sk_hashed(sk)) {
2200 struct udp_table *udptable = udp_get_table_prot(sk);
2201 struct udp_hslot *hslot, *hslot2;
2202
2203 hslot = udp_hashslot(udptable, sock_net(sk),
2204 udp_sk(sk)->udp_port_hash);
2205 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
2206
2207 spin_lock_bh(&hslot->lock);
2208 if (rcu_access_pointer(sk->sk_reuseport_cb))
2209 reuseport_detach_sock(sk);
2210 if (sk_del_node_init_rcu(sk)) {
2211 hslot->count--;
2212 inet_sk(sk)->inet_num = 0;
2213 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2214
2215 spin_lock(&hslot2->lock);
2216 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
2217 hslot2->count--;
2218 spin_unlock(&hslot2->lock);
2219
2220 udp_unhash4(udptable, sk);
2221 }
2222 spin_unlock_bh(&hslot->lock);
2223 }
2224 }
2225 EXPORT_SYMBOL(udp_lib_unhash);
2226
2227 /*
2228 * inet_rcv_saddr was changed, we must rehash secondary hash
2229 */
udp_lib_rehash(struct sock * sk,u16 newhash,u16 newhash4)2230 void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
2231 {
2232 if (sk_hashed(sk)) {
2233 struct udp_table *udptable = udp_get_table_prot(sk);
2234 struct udp_hslot *hslot, *hslot2, *nhslot2;
2235
2236 hslot = udp_hashslot(udptable, sock_net(sk),
2237 udp_sk(sk)->udp_port_hash);
2238 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
2239 nhslot2 = udp_hashslot2(udptable, newhash);
2240 udp_sk(sk)->udp_portaddr_hash = newhash;
2241
2242 if (hslot2 != nhslot2 ||
2243 rcu_access_pointer(sk->sk_reuseport_cb)) {
2244 /* we must lock primary chain too */
2245 spin_lock_bh(&hslot->lock);
2246 if (rcu_access_pointer(sk->sk_reuseport_cb))
2247 reuseport_detach_sock(sk);
2248
2249 if (hslot2 != nhslot2) {
2250 spin_lock(&hslot2->lock);
2251 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
2252 hslot2->count--;
2253 spin_unlock(&hslot2->lock);
2254
2255 spin_lock(&nhslot2->lock);
2256 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
2257 &nhslot2->head);
2258 nhslot2->count++;
2259 spin_unlock(&nhslot2->lock);
2260 }
2261
2262 spin_unlock_bh(&hslot->lock);
2263 }
2264
2265 /* Now process hash4 if necessary:
2266 * (1) update hslot4;
2267 * (2) update hslot2->hash4_cnt.
2268 * Note that hslot2/hslot4 should be checked separately, as
2269 * either of them may change with the other unchanged.
2270 */
2271 if (udp_hashed4(sk)) {
2272 spin_lock_bh(&hslot->lock);
2273
2274 udp_rehash4(udptable, sk, newhash4);
2275 if (hslot2 != nhslot2) {
2276 spin_lock(&hslot2->lock);
2277 udp_hash4_dec(hslot2);
2278 spin_unlock(&hslot2->lock);
2279
2280 spin_lock(&nhslot2->lock);
2281 udp_hash4_inc(nhslot2);
2282 spin_unlock(&nhslot2->lock);
2283 }
2284
2285 spin_unlock_bh(&hslot->lock);
2286 }
2287 }
2288 }
2289 EXPORT_SYMBOL(udp_lib_rehash);
2290
udp_v4_rehash(struct sock * sk)2291 void udp_v4_rehash(struct sock *sk)
2292 {
2293 u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
2294 inet_sk(sk)->inet_rcv_saddr,
2295 inet_sk(sk)->inet_num);
2296 u16 new_hash4 = udp_ehashfn(sock_net(sk),
2297 sk->sk_rcv_saddr, sk->sk_num,
2298 sk->sk_daddr, sk->sk_dport);
2299
2300 udp_lib_rehash(sk, new_hash, new_hash4);
2301 }
2302
__udp_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)2303 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2304 {
2305 int rc;
2306
2307 if (inet_sk(sk)->inet_daddr) {
2308 sock_rps_save_rxhash(sk, skb);
2309 sk_mark_napi_id(sk, skb);
2310 sk_incoming_cpu_update(sk);
2311 } else {
2312 sk_mark_napi_id_once(sk, skb);
2313 }
2314
2315 rc = __udp_enqueue_schedule_skb(sk, skb);
2316 if (rc < 0) {
2317 int is_udplite = IS_UDPLITE(sk);
2318 int drop_reason;
2319
2320 /* Note that an ENOMEM error is charged twice */
2321 if (rc == -ENOMEM) {
2322 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
2323 is_udplite);
2324 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
2325 } else {
2326 UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS,
2327 is_udplite);
2328 drop_reason = SKB_DROP_REASON_PROTO_MEM;
2329 }
2330 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
2331 trace_udp_fail_queue_rcv_skb(rc, sk, skb);
2332 sk_skb_reason_drop(sk, skb, drop_reason);
2333 return -1;
2334 }
2335
2336 return 0;
2337 }
2338
2339 /* returns:
2340 * -1: error
2341 * 0: success
2342 * >0: "udp encap" protocol resubmission
2343 *
2344 * Note that in the success and error cases, the skb is assumed to
2345 * have either been requeued or freed.
2346 */
udp_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)2347 static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
2348 {
2349 int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
2350 struct udp_sock *up = udp_sk(sk);
2351 int is_udplite = IS_UDPLITE(sk);
2352
2353 /*
2354 * Charge it to the socket, dropping if the queue is full.
2355 */
2356 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2357 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2358 goto drop;
2359 }
2360 nf_reset_ct(skb);
2361
2362 if (static_branch_unlikely(&udp_encap_needed_key) &&
2363 READ_ONCE(up->encap_type)) {
2364 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
2365
2366 /*
2367 * This is an encapsulation socket so pass the skb to
2368 * the socket's udp_encap_rcv() hook. Otherwise, just
2369 * fall through and pass this up the UDP socket.
2370 * up->encap_rcv() returns the following value:
2371 * =0 if skb was successfully passed to the encap
2372 * handler or was discarded by it.
2373 * >0 if skb should be passed on to UDP.
2374 * <0 if skb should be resubmitted as proto -N
2375 */
2376
2377 /* if we're overly short, let UDP handle it */
2378 encap_rcv = READ_ONCE(up->encap_rcv);
2379 if (encap_rcv) {
2380 int ret;
2381
2382 /* Verify checksum before giving to encap */
2383 if (udp_lib_checksum_complete(skb))
2384 goto csum_error;
2385
2386 ret = encap_rcv(sk, skb);
2387 if (ret <= 0) {
2388 __UDP_INC_STATS(sock_net(sk),
2389 UDP_MIB_INDATAGRAMS,
2390 is_udplite);
2391 return -ret;
2392 }
2393 }
2394
2395 /* FALLTHROUGH -- it's a UDP Packet */
2396 }
2397
2398 /*
2399 * UDP-Lite specific tests, ignored on UDP sockets
2400 */
2401 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
2402 u16 pcrlen = READ_ONCE(up->pcrlen);
2403
2404 /*
2405 * MIB statistics other than incrementing the error count are
2406 * disabled for the following two types of errors: these depend
2407 * on the application settings, not on the functioning of the
2408 * protocol stack as such.
2409 *
2410 * RFC 3828 here recommends (sec 3.3): "There should also be a
2411 * way ... to ... at least let the receiving application block
2412 * delivery of packets with coverage values less than a value
2413 * provided by the application."
2414 */
2415 if (pcrlen == 0) { /* full coverage was set */
2416 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2417 UDP_SKB_CB(skb)->cscov, skb->len);
2418 goto drop;
2419 }
2420 /* The next case involves violating the min. coverage requested
2421 * by the receiver. This is subtle: if receiver wants x and x is
2422 * greater than the buffersize/MTU then receiver will complain
2423 * that it wants x while sender emits packets of smaller size y.
2424 * Therefore the above ...()->partial_cov statement is essential.
2425 */
2426 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
2427 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2428 UDP_SKB_CB(skb)->cscov, pcrlen);
2429 goto drop;
2430 }
2431 }
2432
2433 prefetch(&sk->sk_rmem_alloc);
2434 if (rcu_access_pointer(sk->sk_filter) &&
2435 udp_lib_checksum_complete(skb))
2436 goto csum_error;
2437
2438 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
2439 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2440 goto drop;
2441 }
2442
2443 udp_csum_pull_header(skb);
2444
2445 ipv4_pktinfo_prepare(sk, skb, true);
2446 return __udp_queue_rcv_skb(sk, skb);
2447
2448 csum_error:
2449 drop_reason = SKB_DROP_REASON_UDP_CSUM;
2450 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2451 drop:
2452 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
2453 atomic_inc(&sk->sk_drops);
2454 sk_skb_reason_drop(sk, skb, drop_reason);
2455 return -1;
2456 }
2457
udp_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)2458 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2459 {
2460 struct sk_buff *next, *segs;
2461 int ret;
2462
2463 if (likely(!udp_unexpected_gso(sk, skb)))
2464 return udp_queue_rcv_one_skb(sk, skb);
2465
2466 BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET);
2467 __skb_push(skb, -skb_mac_offset(skb));
2468 segs = udp_rcv_segment(sk, skb, true);
2469 skb_list_walk_safe(segs, skb, next) {
2470 __skb_pull(skb, skb_transport_offset(skb));
2471
2472 udp_post_segment_fix_csum(skb);
2473 ret = udp_queue_rcv_one_skb(sk, skb);
2474 if (ret > 0)
2475 ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
2476 }
2477 return 0;
2478 }
2479
2480 /* For TCP sockets, sk_rx_dst is protected by socket lock
2481 * For UDP, we use xchg() to guard against concurrent changes.
2482 */
udp_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)2483 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
2484 {
2485 struct dst_entry *old;
2486
2487 if (dst_hold_safe(dst)) {
2488 old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst)));
2489 dst_release(old);
2490 return old != dst;
2491 }
2492 return false;
2493 }
2494 EXPORT_SYMBOL(udp_sk_rx_dst_set);
2495
2496 /*
2497 * Multicasts and broadcasts go to each listener.
2498 *
2499 * Note: called only from the BH handler context.
2500 */
__udp4_lib_mcast_deliver(struct net * net,struct sk_buff * skb,struct udphdr * uh,__be32 saddr,__be32 daddr,struct udp_table * udptable,int proto)2501 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
2502 struct udphdr *uh,
2503 __be32 saddr, __be32 daddr,
2504 struct udp_table *udptable,
2505 int proto)
2506 {
2507 struct sock *sk, *first = NULL;
2508 unsigned short hnum = ntohs(uh->dest);
2509 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
2510 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
2511 unsigned int offset = offsetof(typeof(*sk), sk_node);
2512 int dif = skb->dev->ifindex;
2513 int sdif = inet_sdif(skb);
2514 struct hlist_node *node;
2515 struct sk_buff *nskb;
2516
2517 if (use_hash2) {
2518 hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
2519 udptable->mask;
2520 hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
2521 start_lookup:
2522 hslot = &udptable->hash2[hash2].hslot;
2523 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
2524 }
2525
2526 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
2527 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
2528 uh->source, saddr, dif, sdif, hnum))
2529 continue;
2530
2531 if (!first) {
2532 first = sk;
2533 continue;
2534 }
2535 nskb = skb_clone(skb, GFP_ATOMIC);
2536
2537 if (unlikely(!nskb)) {
2538 atomic_inc(&sk->sk_drops);
2539 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
2540 IS_UDPLITE(sk));
2541 __UDP_INC_STATS(net, UDP_MIB_INERRORS,
2542 IS_UDPLITE(sk));
2543 continue;
2544 }
2545 if (udp_queue_rcv_skb(sk, nskb) > 0)
2546 consume_skb(nskb);
2547 }
2548
2549 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
2550 if (use_hash2 && hash2 != hash2_any) {
2551 hash2 = hash2_any;
2552 goto start_lookup;
2553 }
2554
2555 if (first) {
2556 if (udp_queue_rcv_skb(first, skb) > 0)
2557 consume_skb(skb);
2558 } else {
2559 kfree_skb(skb);
2560 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
2561 proto == IPPROTO_UDPLITE);
2562 }
2563 return 0;
2564 }
2565
2566 /* Initialize UDP checksum. If exited with zero value (success),
2567 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2568 * Otherwise, csum completion requires checksumming packet body,
2569 * including udp header and folding it to skb->csum.
2570 */
udp4_csum_init(struct sk_buff * skb,struct udphdr * uh,int proto)2571 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2572 int proto)
2573 {
2574 int err;
2575
2576 UDP_SKB_CB(skb)->partial_cov = 0;
2577 UDP_SKB_CB(skb)->cscov = skb->len;
2578
2579 if (proto == IPPROTO_UDPLITE) {
2580 err = udplite_checksum_init(skb, uh);
2581 if (err)
2582 return err;
2583
2584 if (UDP_SKB_CB(skb)->partial_cov) {
2585 skb->csum = inet_compute_pseudo(skb, proto);
2586 return 0;
2587 }
2588 }
2589
2590 /* Note, we are only interested in != 0 or == 0, thus the
2591 * force to int.
2592 */
2593 err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2594 inet_compute_pseudo);
2595 if (err)
2596 return err;
2597
2598 if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
2599 /* If SW calculated the value, we know it's bad */
2600 if (skb->csum_complete_sw)
2601 return 1;
2602
2603 /* HW says the value is bad. Let's validate that.
2604 * skb->csum is no longer the full packet checksum,
2605 * so don't treat it as such.
2606 */
2607 skb_checksum_complete_unset(skb);
2608 }
2609
2610 return 0;
2611 }
2612
2613 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2614 * return code conversion for ip layer consumption
2615 */
udp_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)2616 static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2617 struct udphdr *uh)
2618 {
2619 int ret;
2620
2621 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2622 skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
2623
2624 ret = udp_queue_rcv_skb(sk, skb);
2625
2626 /* a return value > 0 means to resubmit the input, but
2627 * it wants the return to be -protocol, or 0
2628 */
2629 if (ret > 0)
2630 return -ret;
2631 return 0;
2632 }
2633
2634 /*
2635 * All we need to do is get the socket, and then do a checksum.
2636 */
2637
__udp4_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)2638 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2639 int proto)
2640 {
2641 struct sock *sk = NULL;
2642 struct udphdr *uh;
2643 unsigned short ulen;
2644 struct rtable *rt = skb_rtable(skb);
2645 __be32 saddr, daddr;
2646 struct net *net = dev_net(skb->dev);
2647 bool refcounted;
2648 int drop_reason;
2649
2650 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
2651
2652 /*
2653 * Validate the packet.
2654 */
2655 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2656 goto drop; /* No space for header. */
2657
2658 uh = udp_hdr(skb);
2659 ulen = ntohs(uh->len);
2660 saddr = ip_hdr(skb)->saddr;
2661 daddr = ip_hdr(skb)->daddr;
2662
2663 if (ulen > skb->len)
2664 goto short_packet;
2665
2666 if (proto == IPPROTO_UDP) {
2667 /* UDP validates ulen. */
2668 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
2669 goto short_packet;
2670 uh = udp_hdr(skb);
2671 }
2672
2673 if (udp4_csum_init(skb, uh, proto))
2674 goto csum_error;
2675
2676 sk = inet_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
2677 &refcounted, udp_ehashfn);
2678 if (IS_ERR(sk))
2679 goto no_sk;
2680
2681 if (sk) {
2682 struct dst_entry *dst = skb_dst(skb);
2683 int ret;
2684
2685 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
2686 udp_sk_rx_dst_set(sk, dst);
2687
2688 ret = udp_unicast_rcv_skb(sk, skb, uh);
2689 if (refcounted)
2690 sock_put(sk);
2691 return ret;
2692 }
2693
2694 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
2695 return __udp4_lib_mcast_deliver(net, skb, uh,
2696 saddr, daddr, udptable, proto);
2697
2698 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2699 if (sk)
2700 return udp_unicast_rcv_skb(sk, skb, uh);
2701 no_sk:
2702 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2703 goto drop;
2704 nf_reset_ct(skb);
2705
2706 /* No socket. Drop packet silently, if checksum is wrong */
2707 if (udp_lib_checksum_complete(skb))
2708 goto csum_error;
2709
2710 drop_reason = SKB_DROP_REASON_NO_SOCKET;
2711 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
2712 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
2713
2714 /*
2715 * Hmm. We got an UDP packet to a port to which we
2716 * don't wanna listen. Ignore it.
2717 */
2718 sk_skb_reason_drop(sk, skb, drop_reason);
2719 return 0;
2720
2721 short_packet:
2722 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2723 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2724 proto == IPPROTO_UDPLITE ? "Lite" : "",
2725 &saddr, ntohs(uh->source),
2726 ulen, skb->len,
2727 &daddr, ntohs(uh->dest));
2728 goto drop;
2729
2730 csum_error:
2731 /*
2732 * RFC1122: OK. Discards the bad packet silently (as far as
2733 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2734 */
2735 drop_reason = SKB_DROP_REASON_UDP_CSUM;
2736 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2737 proto == IPPROTO_UDPLITE ? "Lite" : "",
2738 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
2739 ulen);
2740 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
2741 drop:
2742 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
2743 sk_skb_reason_drop(sk, skb, drop_reason);
2744 return 0;
2745 }
2746
2747 /* We can only early demux multicast if there is a single matching socket.
2748 * If more than one socket found returns NULL
2749 */
__udp4_lib_mcast_demux_lookup(struct net * net,__be16 loc_port,__be32 loc_addr,__be16 rmt_port,__be32 rmt_addr,int dif,int sdif)2750 static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
2751 __be16 loc_port, __be32 loc_addr,
2752 __be16 rmt_port, __be32 rmt_addr,
2753 int dif, int sdif)
2754 {
2755 struct udp_table *udptable = net->ipv4.udp_table;
2756 unsigned short hnum = ntohs(loc_port);
2757 struct sock *sk, *result;
2758 struct udp_hslot *hslot;
2759 unsigned int slot;
2760
2761 slot = udp_hashfn(net, hnum, udptable->mask);
2762 hslot = &udptable->hash[slot];
2763
2764 /* Do not bother scanning a too big list */
2765 if (hslot->count > 10)
2766 return NULL;
2767
2768 result = NULL;
2769 sk_for_each_rcu(sk, &hslot->head) {
2770 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
2771 rmt_port, rmt_addr, dif, sdif, hnum)) {
2772 if (result)
2773 return NULL;
2774 result = sk;
2775 }
2776 }
2777
2778 return result;
2779 }
2780
2781 /* For unicast we should only early demux connected sockets or we can
2782 * break forwarding setups. The chains here can be long so only check
2783 * if the first socket is an exact match and if not move on.
2784 */
__udp4_lib_demux_lookup(struct net * net,__be16 loc_port,__be32 loc_addr,__be16 rmt_port,__be32 rmt_addr,int dif,int sdif)2785 static struct sock *__udp4_lib_demux_lookup(struct net *net,
2786 __be16 loc_port, __be32 loc_addr,
2787 __be16 rmt_port, __be32 rmt_addr,
2788 int dif, int sdif)
2789 {
2790 struct udp_table *udptable = net->ipv4.udp_table;
2791 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
2792 unsigned short hnum = ntohs(loc_port);
2793 struct udp_hslot *hslot2;
2794 unsigned int hash2;
2795 __portpair ports;
2796 struct sock *sk;
2797
2798 hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
2799 hslot2 = udp_hashslot2(udptable, hash2);
2800 ports = INET_COMBINED_PORTS(rmt_port, hnum);
2801
2802 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
2803 if (inet_match(net, sk, acookie, ports, dif, sdif))
2804 return sk;
2805 /* Only check first socket in chain */
2806 break;
2807 }
2808 return NULL;
2809 }
2810
udp_v4_early_demux(struct sk_buff * skb)2811 int udp_v4_early_demux(struct sk_buff *skb)
2812 {
2813 struct net *net = dev_net(skb->dev);
2814 struct in_device *in_dev = NULL;
2815 const struct iphdr *iph;
2816 const struct udphdr *uh;
2817 struct sock *sk = NULL;
2818 struct dst_entry *dst;
2819 int dif = skb->dev->ifindex;
2820 int sdif = inet_sdif(skb);
2821 int ours;
2822
2823 /* validate the packet */
2824 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2825 return 0;
2826
2827 iph = ip_hdr(skb);
2828 uh = udp_hdr(skb);
2829
2830 if (skb->pkt_type == PACKET_MULTICAST) {
2831 in_dev = __in_dev_get_rcu(skb->dev);
2832
2833 if (!in_dev)
2834 return 0;
2835
2836 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
2837 iph->protocol);
2838 if (!ours)
2839 return 0;
2840
2841 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2842 uh->source, iph->saddr,
2843 dif, sdif);
2844 } else if (skb->pkt_type == PACKET_HOST) {
2845 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2846 uh->source, iph->saddr, dif, sdif);
2847 }
2848
2849 if (!sk)
2850 return 0;
2851
2852 skb->sk = sk;
2853 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
2854 skb->destructor = sock_pfree;
2855 dst = rcu_dereference(sk->sk_rx_dst);
2856
2857 if (dst)
2858 dst = dst_check(dst, 0);
2859 if (dst) {
2860 u32 itag = 0;
2861
2862 /* set noref for now.
2863 * any place which wants to hold dst has to call
2864 * dst_hold_safe()
2865 */
2866 skb_dst_set_noref(skb, dst);
2867
2868 /* for unconnected multicast sockets we need to validate
2869 * the source on each packet
2870 */
2871 if (!inet_sk(sk)->inet_daddr && in_dev)
2872 return ip_mc_validate_source(skb, iph->daddr,
2873 iph->saddr,
2874 ip4h_dscp(iph),
2875 skb->dev, in_dev, &itag);
2876 }
2877 return 0;
2878 }
2879
udp_rcv(struct sk_buff * skb)2880 int udp_rcv(struct sk_buff *skb)
2881 {
2882 return __udp4_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
2883 }
2884
udp_destroy_sock(struct sock * sk)2885 void udp_destroy_sock(struct sock *sk)
2886 {
2887 struct udp_sock *up = udp_sk(sk);
2888 bool slow = lock_sock_fast(sk);
2889
2890 /* protects from races with udp_abort() */
2891 sock_set_flag(sk, SOCK_DEAD);
2892 udp_flush_pending_frames(sk);
2893 unlock_sock_fast(sk, slow);
2894 if (static_branch_unlikely(&udp_encap_needed_key)) {
2895 if (up->encap_type) {
2896 void (*encap_destroy)(struct sock *sk);
2897 encap_destroy = READ_ONCE(up->encap_destroy);
2898 if (encap_destroy)
2899 encap_destroy(sk);
2900 }
2901 if (udp_test_bit(ENCAP_ENABLED, sk))
2902 static_branch_dec(&udp_encap_needed_key);
2903 }
2904 }
2905
set_xfrm_gro_udp_encap_rcv(__u16 encap_type,unsigned short family,struct sock * sk)2906 static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type, unsigned short family,
2907 struct sock *sk)
2908 {
2909 #ifdef CONFIG_XFRM
2910 if (udp_test_bit(GRO_ENABLED, sk) && encap_type == UDP_ENCAP_ESPINUDP) {
2911 if (family == AF_INET)
2912 WRITE_ONCE(udp_sk(sk)->gro_receive, xfrm4_gro_udp_encap_rcv);
2913 else if (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6)
2914 WRITE_ONCE(udp_sk(sk)->gro_receive, ipv6_stub->xfrm6_gro_udp_encap_rcv);
2915 }
2916 #endif
2917 }
2918
2919 /*
2920 * Socket option code for UDP
2921 */
udp_lib_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen,int (* push_pending_frames)(struct sock *))2922 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2923 sockptr_t optval, unsigned int optlen,
2924 int (*push_pending_frames)(struct sock *))
2925 {
2926 struct udp_sock *up = udp_sk(sk);
2927 int val, valbool;
2928 int err = 0;
2929 int is_udplite = IS_UDPLITE(sk);
2930
2931 if (level == SOL_SOCKET) {
2932 err = sk_setsockopt(sk, level, optname, optval, optlen);
2933
2934 if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) {
2935 sockopt_lock_sock(sk);
2936 /* paired with READ_ONCE in udp_rmem_release() */
2937 WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2);
2938 sockopt_release_sock(sk);
2939 }
2940 return err;
2941 }
2942
2943 if (optlen < sizeof(int))
2944 return -EINVAL;
2945
2946 if (copy_from_sockptr(&val, optval, sizeof(val)))
2947 return -EFAULT;
2948
2949 valbool = val ? 1 : 0;
2950
2951 switch (optname) {
2952 case UDP_CORK:
2953 if (val != 0) {
2954 udp_set_bit(CORK, sk);
2955 } else {
2956 udp_clear_bit(CORK, sk);
2957 lock_sock(sk);
2958 push_pending_frames(sk);
2959 release_sock(sk);
2960 }
2961 break;
2962
2963 case UDP_ENCAP:
2964 switch (val) {
2965 case 0:
2966 #ifdef CONFIG_XFRM
2967 case UDP_ENCAP_ESPINUDP:
2968 set_xfrm_gro_udp_encap_rcv(val, sk->sk_family, sk);
2969 #if IS_ENABLED(CONFIG_IPV6)
2970 if (sk->sk_family == AF_INET6)
2971 WRITE_ONCE(up->encap_rcv,
2972 ipv6_stub->xfrm6_udp_encap_rcv);
2973 else
2974 #endif
2975 WRITE_ONCE(up->encap_rcv,
2976 xfrm4_udp_encap_rcv);
2977 #endif
2978 fallthrough;
2979 case UDP_ENCAP_L2TPINUDP:
2980 WRITE_ONCE(up->encap_type, val);
2981 udp_tunnel_encap_enable(sk);
2982 break;
2983 default:
2984 err = -ENOPROTOOPT;
2985 break;
2986 }
2987 break;
2988
2989 case UDP_NO_CHECK6_TX:
2990 udp_set_no_check6_tx(sk, valbool);
2991 break;
2992
2993 case UDP_NO_CHECK6_RX:
2994 udp_set_no_check6_rx(sk, valbool);
2995 break;
2996
2997 case UDP_SEGMENT:
2998 if (val < 0 || val > USHRT_MAX)
2999 return -EINVAL;
3000 WRITE_ONCE(up->gso_size, val);
3001 break;
3002
3003 case UDP_GRO:
3004
3005 /* when enabling GRO, accept the related GSO packet type */
3006 if (valbool)
3007 udp_tunnel_encap_enable(sk);
3008 udp_assign_bit(GRO_ENABLED, sk, valbool);
3009 udp_assign_bit(ACCEPT_L4, sk, valbool);
3010 set_xfrm_gro_udp_encap_rcv(up->encap_type, sk->sk_family, sk);
3011 break;
3012
3013 /*
3014 * UDP-Lite's partial checksum coverage (RFC 3828).
3015 */
3016 /* The sender sets actual checksum coverage length via this option.
3017 * The case coverage > packet length is handled by send module. */
3018 case UDPLITE_SEND_CSCOV:
3019 if (!is_udplite) /* Disable the option on UDP sockets */
3020 return -ENOPROTOOPT;
3021 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
3022 val = 8;
3023 else if (val > USHRT_MAX)
3024 val = USHRT_MAX;
3025 WRITE_ONCE(up->pcslen, val);
3026 udp_set_bit(UDPLITE_SEND_CC, sk);
3027 break;
3028
3029 /* The receiver specifies a minimum checksum coverage value. To make
3030 * sense, this should be set to at least 8 (as done below). If zero is
3031 * used, this again means full checksum coverage. */
3032 case UDPLITE_RECV_CSCOV:
3033 if (!is_udplite) /* Disable the option on UDP sockets */
3034 return -ENOPROTOOPT;
3035 if (val != 0 && val < 8) /* Avoid silly minimal values. */
3036 val = 8;
3037 else if (val > USHRT_MAX)
3038 val = USHRT_MAX;
3039 WRITE_ONCE(up->pcrlen, val);
3040 udp_set_bit(UDPLITE_RECV_CC, sk);
3041 break;
3042
3043 default:
3044 err = -ENOPROTOOPT;
3045 break;
3046 }
3047
3048 return err;
3049 }
3050 EXPORT_SYMBOL(udp_lib_setsockopt);
3051
udp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)3052 int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
3053 unsigned int optlen)
3054 {
3055 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
3056 return udp_lib_setsockopt(sk, level, optname,
3057 optval, optlen,
3058 udp_push_pending_frames);
3059 return ip_setsockopt(sk, level, optname, optval, optlen);
3060 }
3061
udp_lib_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)3062 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
3063 char __user *optval, int __user *optlen)
3064 {
3065 struct udp_sock *up = udp_sk(sk);
3066 int val, len;
3067
3068 if (get_user(len, optlen))
3069 return -EFAULT;
3070
3071 if (len < 0)
3072 return -EINVAL;
3073
3074 len = min_t(unsigned int, len, sizeof(int));
3075
3076 switch (optname) {
3077 case UDP_CORK:
3078 val = udp_test_bit(CORK, sk);
3079 break;
3080
3081 case UDP_ENCAP:
3082 val = READ_ONCE(up->encap_type);
3083 break;
3084
3085 case UDP_NO_CHECK6_TX:
3086 val = udp_get_no_check6_tx(sk);
3087 break;
3088
3089 case UDP_NO_CHECK6_RX:
3090 val = udp_get_no_check6_rx(sk);
3091 break;
3092
3093 case UDP_SEGMENT:
3094 val = READ_ONCE(up->gso_size);
3095 break;
3096
3097 case UDP_GRO:
3098 val = udp_test_bit(GRO_ENABLED, sk);
3099 break;
3100
3101 /* The following two cannot be changed on UDP sockets, the return is
3102 * always 0 (which corresponds to the full checksum coverage of UDP). */
3103 case UDPLITE_SEND_CSCOV:
3104 val = READ_ONCE(up->pcslen);
3105 break;
3106
3107 case UDPLITE_RECV_CSCOV:
3108 val = READ_ONCE(up->pcrlen);
3109 break;
3110
3111 default:
3112 return -ENOPROTOOPT;
3113 }
3114
3115 if (put_user(len, optlen))
3116 return -EFAULT;
3117 if (copy_to_user(optval, &val, len))
3118 return -EFAULT;
3119 return 0;
3120 }
3121 EXPORT_SYMBOL(udp_lib_getsockopt);
3122
udp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)3123 int udp_getsockopt(struct sock *sk, int level, int optname,
3124 char __user *optval, int __user *optlen)
3125 {
3126 if (level == SOL_UDP || level == SOL_UDPLITE)
3127 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
3128 return ip_getsockopt(sk, level, optname, optval, optlen);
3129 }
3130
3131 /**
3132 * udp_poll - wait for a UDP event.
3133 * @file: - file struct
3134 * @sock: - socket
3135 * @wait: - poll table
3136 *
3137 * This is same as datagram poll, except for the special case of
3138 * blocking sockets. If application is using a blocking fd
3139 * and a packet with checksum error is in the queue;
3140 * then it could get return from select indicating data available
3141 * but then block when reading it. Add special case code
3142 * to work around these arguably broken applications.
3143 */
udp_poll(struct file * file,struct socket * sock,poll_table * wait)3144 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
3145 {
3146 __poll_t mask = datagram_poll(file, sock, wait);
3147 struct sock *sk = sock->sk;
3148
3149 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
3150 mask |= EPOLLIN | EPOLLRDNORM;
3151
3152 /* Check for false positives due to checksum errors */
3153 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
3154 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
3155 mask &= ~(EPOLLIN | EPOLLRDNORM);
3156
3157 /* psock ingress_msg queue should not contain any bad checksum frames */
3158 if (sk_is_readable(sk))
3159 mask |= EPOLLIN | EPOLLRDNORM;
3160 return mask;
3161
3162 }
3163 EXPORT_SYMBOL(udp_poll);
3164
udp_abort(struct sock * sk,int err)3165 int udp_abort(struct sock *sk, int err)
3166 {
3167 if (!has_current_bpf_ctx())
3168 lock_sock(sk);
3169
3170 /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
3171 * with close()
3172 */
3173 if (sock_flag(sk, SOCK_DEAD))
3174 goto out;
3175
3176 sk->sk_err = err;
3177 sk_error_report(sk);
3178 __udp_disconnect(sk, 0);
3179
3180 out:
3181 if (!has_current_bpf_ctx())
3182 release_sock(sk);
3183
3184 return 0;
3185 }
3186 EXPORT_SYMBOL_GPL(udp_abort);
3187
3188 struct proto udp_prot = {
3189 .name = "UDP",
3190 .owner = THIS_MODULE,
3191 .close = udp_lib_close,
3192 .pre_connect = udp_pre_connect,
3193 .connect = udp_connect,
3194 .disconnect = udp_disconnect,
3195 .ioctl = udp_ioctl,
3196 .init = udp_init_sock,
3197 .destroy = udp_destroy_sock,
3198 .setsockopt = udp_setsockopt,
3199 .getsockopt = udp_getsockopt,
3200 .sendmsg = udp_sendmsg,
3201 .recvmsg = udp_recvmsg,
3202 .splice_eof = udp_splice_eof,
3203 .release_cb = ip4_datagram_release_cb,
3204 .hash = udp_lib_hash,
3205 .unhash = udp_lib_unhash,
3206 .rehash = udp_v4_rehash,
3207 .get_port = udp_v4_get_port,
3208 .put_port = udp_lib_unhash,
3209 #ifdef CONFIG_BPF_SYSCALL
3210 .psock_update_sk_prot = udp_bpf_update_proto,
3211 #endif
3212 .memory_allocated = &udp_memory_allocated,
3213 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
3214
3215 .sysctl_mem = sysctl_udp_mem,
3216 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
3217 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
3218 .obj_size = sizeof(struct udp_sock),
3219 .h.udp_table = NULL,
3220 .diag_destroy = udp_abort,
3221 };
3222 EXPORT_SYMBOL(udp_prot);
3223
3224 /* ------------------------------------------------------------------------ */
3225 #ifdef CONFIG_PROC_FS
3226
3227 static unsigned short seq_file_family(const struct seq_file *seq);
seq_sk_match(struct seq_file * seq,const struct sock * sk)3228 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
3229 {
3230 unsigned short family = seq_file_family(seq);
3231
3232 /* AF_UNSPEC is used as a match all */
3233 return ((family == AF_UNSPEC || family == sk->sk_family) &&
3234 net_eq(sock_net(sk), seq_file_net(seq)));
3235 }
3236
3237 #ifdef CONFIG_BPF_SYSCALL
3238 static const struct seq_operations bpf_iter_udp_seq_ops;
3239 #endif
udp_get_table_seq(struct seq_file * seq,struct net * net)3240 static struct udp_table *udp_get_table_seq(struct seq_file *seq,
3241 struct net *net)
3242 {
3243 const struct udp_seq_afinfo *afinfo;
3244
3245 #ifdef CONFIG_BPF_SYSCALL
3246 if (seq->op == &bpf_iter_udp_seq_ops)
3247 return net->ipv4.udp_table;
3248 #endif
3249
3250 afinfo = pde_data(file_inode(seq->file));
3251 return afinfo->udp_table ? : net->ipv4.udp_table;
3252 }
3253
udp_get_first(struct seq_file * seq,int start)3254 static struct sock *udp_get_first(struct seq_file *seq, int start)
3255 {
3256 struct udp_iter_state *state = seq->private;
3257 struct net *net = seq_file_net(seq);
3258 struct udp_table *udptable;
3259 struct sock *sk;
3260
3261 udptable = udp_get_table_seq(seq, net);
3262
3263 for (state->bucket = start; state->bucket <= udptable->mask;
3264 ++state->bucket) {
3265 struct udp_hslot *hslot = &udptable->hash[state->bucket];
3266
3267 if (hlist_empty(&hslot->head))
3268 continue;
3269
3270 spin_lock_bh(&hslot->lock);
3271 sk_for_each(sk, &hslot->head) {
3272 if (seq_sk_match(seq, sk))
3273 goto found;
3274 }
3275 spin_unlock_bh(&hslot->lock);
3276 }
3277 sk = NULL;
3278 found:
3279 return sk;
3280 }
3281
udp_get_next(struct seq_file * seq,struct sock * sk)3282 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
3283 {
3284 struct udp_iter_state *state = seq->private;
3285 struct net *net = seq_file_net(seq);
3286 struct udp_table *udptable;
3287
3288 do {
3289 sk = sk_next(sk);
3290 } while (sk && !seq_sk_match(seq, sk));
3291
3292 if (!sk) {
3293 udptable = udp_get_table_seq(seq, net);
3294
3295 if (state->bucket <= udptable->mask)
3296 spin_unlock_bh(&udptable->hash[state->bucket].lock);
3297
3298 return udp_get_first(seq, state->bucket + 1);
3299 }
3300 return sk;
3301 }
3302
udp_get_idx(struct seq_file * seq,loff_t pos)3303 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
3304 {
3305 struct sock *sk = udp_get_first(seq, 0);
3306
3307 if (sk)
3308 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
3309 --pos;
3310 return pos ? NULL : sk;
3311 }
3312
udp_seq_start(struct seq_file * seq,loff_t * pos)3313 void *udp_seq_start(struct seq_file *seq, loff_t *pos)
3314 {
3315 struct udp_iter_state *state = seq->private;
3316 state->bucket = MAX_UDP_PORTS;
3317
3318 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
3319 }
3320 EXPORT_SYMBOL(udp_seq_start);
3321
udp_seq_next(struct seq_file * seq,void * v,loff_t * pos)3322 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3323 {
3324 struct sock *sk;
3325
3326 if (v == SEQ_START_TOKEN)
3327 sk = udp_get_idx(seq, 0);
3328 else
3329 sk = udp_get_next(seq, v);
3330
3331 ++*pos;
3332 return sk;
3333 }
3334 EXPORT_SYMBOL(udp_seq_next);
3335
udp_seq_stop(struct seq_file * seq,void * v)3336 void udp_seq_stop(struct seq_file *seq, void *v)
3337 {
3338 struct udp_iter_state *state = seq->private;
3339 struct udp_table *udptable;
3340
3341 udptable = udp_get_table_seq(seq, seq_file_net(seq));
3342
3343 if (state->bucket <= udptable->mask)
3344 spin_unlock_bh(&udptable->hash[state->bucket].lock);
3345 }
3346 EXPORT_SYMBOL(udp_seq_stop);
3347
3348 /* ------------------------------------------------------------------------ */
udp4_format_sock(struct sock * sp,struct seq_file * f,int bucket)3349 static void udp4_format_sock(struct sock *sp, struct seq_file *f,
3350 int bucket)
3351 {
3352 struct inet_sock *inet = inet_sk(sp);
3353 __be32 dest = inet->inet_daddr;
3354 __be32 src = inet->inet_rcv_saddr;
3355 __u16 destp = ntohs(inet->inet_dport);
3356 __u16 srcp = ntohs(inet->inet_sport);
3357
3358 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
3359 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
3360 bucket, src, srcp, dest, destp, sp->sk_state,
3361 sk_wmem_alloc_get(sp),
3362 udp_rqueue_get(sp),
3363 0, 0L, 0,
3364 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
3365 0, sock_i_ino(sp),
3366 refcount_read(&sp->sk_refcnt), sp,
3367 atomic_read(&sp->sk_drops));
3368 }
3369
udp4_seq_show(struct seq_file * seq,void * v)3370 int udp4_seq_show(struct seq_file *seq, void *v)
3371 {
3372 seq_setwidth(seq, 127);
3373 if (v == SEQ_START_TOKEN)
3374 seq_puts(seq, " sl local_address rem_address st tx_queue "
3375 "rx_queue tr tm->when retrnsmt uid timeout "
3376 "inode ref pointer drops");
3377 else {
3378 struct udp_iter_state *state = seq->private;
3379
3380 udp4_format_sock(v, seq, state->bucket);
3381 }
3382 seq_pad(seq, '\n');
3383 return 0;
3384 }
3385
3386 #ifdef CONFIG_BPF_SYSCALL
3387 struct bpf_iter__udp {
3388 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3389 __bpf_md_ptr(struct udp_sock *, udp_sk);
3390 uid_t uid __aligned(8);
3391 int bucket __aligned(8);
3392 };
3393
3394 struct bpf_udp_iter_state {
3395 struct udp_iter_state state;
3396 unsigned int cur_sk;
3397 unsigned int end_sk;
3398 unsigned int max_sk;
3399 int offset;
3400 struct sock **batch;
3401 bool st_bucket_done;
3402 };
3403
3404 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
3405 unsigned int new_batch_sz);
bpf_iter_udp_batch(struct seq_file * seq)3406 static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3407 {
3408 struct bpf_udp_iter_state *iter = seq->private;
3409 struct udp_iter_state *state = &iter->state;
3410 struct net *net = seq_file_net(seq);
3411 int resume_bucket, resume_offset;
3412 struct udp_table *udptable;
3413 unsigned int batch_sks = 0;
3414 bool resized = false;
3415 struct sock *sk;
3416
3417 resume_bucket = state->bucket;
3418 resume_offset = iter->offset;
3419
3420 /* The current batch is done, so advance the bucket. */
3421 if (iter->st_bucket_done)
3422 state->bucket++;
3423
3424 udptable = udp_get_table_seq(seq, net);
3425
3426 again:
3427 /* New batch for the next bucket.
3428 * Iterate over the hash table to find a bucket with sockets matching
3429 * the iterator attributes, and return the first matching socket from
3430 * the bucket. The remaining matched sockets from the bucket are batched
3431 * before releasing the bucket lock. This allows BPF programs that are
3432 * called in seq_show to acquire the bucket lock if needed.
3433 */
3434 iter->cur_sk = 0;
3435 iter->end_sk = 0;
3436 iter->st_bucket_done = false;
3437 batch_sks = 0;
3438
3439 for (; state->bucket <= udptable->mask; state->bucket++) {
3440 struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot;
3441
3442 if (hlist_empty(&hslot2->head))
3443 continue;
3444
3445 iter->offset = 0;
3446 spin_lock_bh(&hslot2->lock);
3447 udp_portaddr_for_each_entry(sk, &hslot2->head) {
3448 if (seq_sk_match(seq, sk)) {
3449 /* Resume from the last iterated socket at the
3450 * offset in the bucket before iterator was stopped.
3451 */
3452 if (state->bucket == resume_bucket &&
3453 iter->offset < resume_offset) {
3454 ++iter->offset;
3455 continue;
3456 }
3457 if (iter->end_sk < iter->max_sk) {
3458 sock_hold(sk);
3459 iter->batch[iter->end_sk++] = sk;
3460 }
3461 batch_sks++;
3462 }
3463 }
3464 spin_unlock_bh(&hslot2->lock);
3465
3466 if (iter->end_sk)
3467 break;
3468 }
3469
3470 /* All done: no batch made. */
3471 if (!iter->end_sk)
3472 return NULL;
3473
3474 if (iter->end_sk == batch_sks) {
3475 /* Batching is done for the current bucket; return the first
3476 * socket to be iterated from the batch.
3477 */
3478 iter->st_bucket_done = true;
3479 goto done;
3480 }
3481 if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) {
3482 resized = true;
3483 /* After allocating a larger batch, retry one more time to grab
3484 * the whole bucket.
3485 */
3486 goto again;
3487 }
3488 done:
3489 return iter->batch[0];
3490 }
3491
bpf_iter_udp_seq_next(struct seq_file * seq,void * v,loff_t * pos)3492 static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3493 {
3494 struct bpf_udp_iter_state *iter = seq->private;
3495 struct sock *sk;
3496
3497 /* Whenever seq_next() is called, the iter->cur_sk is
3498 * done with seq_show(), so unref the iter->cur_sk.
3499 */
3500 if (iter->cur_sk < iter->end_sk) {
3501 sock_put(iter->batch[iter->cur_sk++]);
3502 ++iter->offset;
3503 }
3504
3505 /* After updating iter->cur_sk, check if there are more sockets
3506 * available in the current bucket batch.
3507 */
3508 if (iter->cur_sk < iter->end_sk)
3509 sk = iter->batch[iter->cur_sk];
3510 else
3511 /* Prepare a new batch. */
3512 sk = bpf_iter_udp_batch(seq);
3513
3514 ++*pos;
3515 return sk;
3516 }
3517
bpf_iter_udp_seq_start(struct seq_file * seq,loff_t * pos)3518 static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos)
3519 {
3520 /* bpf iter does not support lseek, so it always
3521 * continue from where it was stop()-ped.
3522 */
3523 if (*pos)
3524 return bpf_iter_udp_batch(seq);
3525
3526 return SEQ_START_TOKEN;
3527 }
3528
udp_prog_seq_show(struct bpf_prog * prog,struct bpf_iter_meta * meta,struct udp_sock * udp_sk,uid_t uid,int bucket)3529 static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3530 struct udp_sock *udp_sk, uid_t uid, int bucket)
3531 {
3532 struct bpf_iter__udp ctx;
3533
3534 meta->seq_num--; /* skip SEQ_START_TOKEN */
3535 ctx.meta = meta;
3536 ctx.udp_sk = udp_sk;
3537 ctx.uid = uid;
3538 ctx.bucket = bucket;
3539 return bpf_iter_run_prog(prog, &ctx);
3540 }
3541
bpf_iter_udp_seq_show(struct seq_file * seq,void * v)3542 static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
3543 {
3544 struct udp_iter_state *state = seq->private;
3545 struct bpf_iter_meta meta;
3546 struct bpf_prog *prog;
3547 struct sock *sk = v;
3548 uid_t uid;
3549 int ret;
3550
3551 if (v == SEQ_START_TOKEN)
3552 return 0;
3553
3554 lock_sock(sk);
3555
3556 if (unlikely(sk_unhashed(sk))) {
3557 ret = SEQ_SKIP;
3558 goto unlock;
3559 }
3560
3561 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3562 meta.seq = seq;
3563 prog = bpf_iter_get_info(&meta, false);
3564 ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
3565
3566 unlock:
3567 release_sock(sk);
3568 return ret;
3569 }
3570
bpf_iter_udp_put_batch(struct bpf_udp_iter_state * iter)3571 static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
3572 {
3573 while (iter->cur_sk < iter->end_sk)
3574 sock_put(iter->batch[iter->cur_sk++]);
3575 }
3576
bpf_iter_udp_seq_stop(struct seq_file * seq,void * v)3577 static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
3578 {
3579 struct bpf_udp_iter_state *iter = seq->private;
3580 struct bpf_iter_meta meta;
3581 struct bpf_prog *prog;
3582
3583 if (!v) {
3584 meta.seq = seq;
3585 prog = bpf_iter_get_info(&meta, true);
3586 if (prog)
3587 (void)udp_prog_seq_show(prog, &meta, v, 0, 0);
3588 }
3589
3590 if (iter->cur_sk < iter->end_sk) {
3591 bpf_iter_udp_put_batch(iter);
3592 iter->st_bucket_done = false;
3593 }
3594 }
3595
3596 static const struct seq_operations bpf_iter_udp_seq_ops = {
3597 .start = bpf_iter_udp_seq_start,
3598 .next = bpf_iter_udp_seq_next,
3599 .stop = bpf_iter_udp_seq_stop,
3600 .show = bpf_iter_udp_seq_show,
3601 };
3602 #endif
3603
seq_file_family(const struct seq_file * seq)3604 static unsigned short seq_file_family(const struct seq_file *seq)
3605 {
3606 const struct udp_seq_afinfo *afinfo;
3607
3608 #ifdef CONFIG_BPF_SYSCALL
3609 /* BPF iterator: bpf programs to filter sockets. */
3610 if (seq->op == &bpf_iter_udp_seq_ops)
3611 return AF_UNSPEC;
3612 #endif
3613
3614 /* Proc fs iterator */
3615 afinfo = pde_data(file_inode(seq->file));
3616 return afinfo->family;
3617 }
3618
3619 const struct seq_operations udp_seq_ops = {
3620 .start = udp_seq_start,
3621 .next = udp_seq_next,
3622 .stop = udp_seq_stop,
3623 .show = udp4_seq_show,
3624 };
3625 EXPORT_SYMBOL(udp_seq_ops);
3626
3627 static struct udp_seq_afinfo udp4_seq_afinfo = {
3628 .family = AF_INET,
3629 .udp_table = NULL,
3630 };
3631
udp4_proc_init_net(struct net * net)3632 static int __net_init udp4_proc_init_net(struct net *net)
3633 {
3634 if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
3635 sizeof(struct udp_iter_state), &udp4_seq_afinfo))
3636 return -ENOMEM;
3637 return 0;
3638 }
3639
udp4_proc_exit_net(struct net * net)3640 static void __net_exit udp4_proc_exit_net(struct net *net)
3641 {
3642 remove_proc_entry("udp", net->proc_net);
3643 }
3644
3645 static struct pernet_operations udp4_net_ops = {
3646 .init = udp4_proc_init_net,
3647 .exit = udp4_proc_exit_net,
3648 };
3649
udp4_proc_init(void)3650 int __init udp4_proc_init(void)
3651 {
3652 return register_pernet_subsys(&udp4_net_ops);
3653 }
3654
udp4_proc_exit(void)3655 void udp4_proc_exit(void)
3656 {
3657 unregister_pernet_subsys(&udp4_net_ops);
3658 }
3659 #endif /* CONFIG_PROC_FS */
3660
3661 static __initdata unsigned long uhash_entries;
set_uhash_entries(char * str)3662 static int __init set_uhash_entries(char *str)
3663 {
3664 ssize_t ret;
3665
3666 if (!str)
3667 return 0;
3668
3669 ret = kstrtoul(str, 0, &uhash_entries);
3670 if (ret)
3671 return 0;
3672
3673 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
3674 uhash_entries = UDP_HTABLE_SIZE_MIN;
3675 return 1;
3676 }
3677 __setup("uhash_entries=", set_uhash_entries);
3678
udp_table_init(struct udp_table * table,const char * name)3679 void __init udp_table_init(struct udp_table *table, const char *name)
3680 {
3681 unsigned int i, slot_size;
3682
3683 slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
3684 udp_hash4_slot_size();
3685 table->hash = alloc_large_system_hash(name,
3686 slot_size,
3687 uhash_entries,
3688 21, /* one slot per 2 MB */
3689 0,
3690 &table->log,
3691 &table->mask,
3692 UDP_HTABLE_SIZE_MIN,
3693 UDP_HTABLE_SIZE_MAX);
3694
3695 table->hash2 = (void *)(table->hash + (table->mask + 1));
3696 for (i = 0; i <= table->mask; i++) {
3697 INIT_HLIST_HEAD(&table->hash[i].head);
3698 table->hash[i].count = 0;
3699 spin_lock_init(&table->hash[i].lock);
3700 }
3701 for (i = 0; i <= table->mask; i++) {
3702 INIT_HLIST_HEAD(&table->hash2[i].hslot.head);
3703 table->hash2[i].hslot.count = 0;
3704 spin_lock_init(&table->hash2[i].hslot.lock);
3705 }
3706 udp_table_hash4_init(table);
3707 }
3708
udp_flow_hashrnd(void)3709 u32 udp_flow_hashrnd(void)
3710 {
3711 static u32 hashrnd __read_mostly;
3712
3713 net_get_random_once(&hashrnd, sizeof(hashrnd));
3714
3715 return hashrnd;
3716 }
3717 EXPORT_SYMBOL(udp_flow_hashrnd);
3718
udp_sysctl_init(struct net * net)3719 static void __net_init udp_sysctl_init(struct net *net)
3720 {
3721 net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE;
3722 net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE;
3723
3724 #ifdef CONFIG_NET_L3_MASTER_DEV
3725 net->ipv4.sysctl_udp_l3mdev_accept = 0;
3726 #endif
3727 }
3728
udp_pernet_table_alloc(unsigned int hash_entries)3729 static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries)
3730 {
3731 struct udp_table *udptable;
3732 unsigned int slot_size;
3733 int i;
3734
3735 udptable = kmalloc(sizeof(*udptable), GFP_KERNEL);
3736 if (!udptable)
3737 goto out;
3738
3739 slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
3740 udp_hash4_slot_size();
3741 udptable->hash = vmalloc_huge(hash_entries * slot_size,
3742 GFP_KERNEL_ACCOUNT);
3743 if (!udptable->hash)
3744 goto free_table;
3745
3746 udptable->hash2 = (void *)(udptable->hash + hash_entries);
3747 udptable->mask = hash_entries - 1;
3748 udptable->log = ilog2(hash_entries);
3749
3750 for (i = 0; i < hash_entries; i++) {
3751 INIT_HLIST_HEAD(&udptable->hash[i].head);
3752 udptable->hash[i].count = 0;
3753 spin_lock_init(&udptable->hash[i].lock);
3754
3755 INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head);
3756 udptable->hash2[i].hslot.count = 0;
3757 spin_lock_init(&udptable->hash2[i].hslot.lock);
3758 }
3759 udp_table_hash4_init(udptable);
3760
3761 return udptable;
3762
3763 free_table:
3764 kfree(udptable);
3765 out:
3766 return NULL;
3767 }
3768
udp_pernet_table_free(struct net * net)3769 static void __net_exit udp_pernet_table_free(struct net *net)
3770 {
3771 struct udp_table *udptable = net->ipv4.udp_table;
3772
3773 if (udptable == &udp_table)
3774 return;
3775
3776 kvfree(udptable->hash);
3777 kfree(udptable);
3778 }
3779
udp_set_table(struct net * net)3780 static void __net_init udp_set_table(struct net *net)
3781 {
3782 struct udp_table *udptable;
3783 unsigned int hash_entries;
3784 struct net *old_net;
3785
3786 if (net_eq(net, &init_net))
3787 goto fallback;
3788
3789 old_net = current->nsproxy->net_ns;
3790 hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries);
3791 if (!hash_entries)
3792 goto fallback;
3793
3794 /* Set min to keep the bitmap on stack in udp_lib_get_port() */
3795 if (hash_entries < UDP_HTABLE_SIZE_MIN_PERNET)
3796 hash_entries = UDP_HTABLE_SIZE_MIN_PERNET;
3797 else
3798 hash_entries = roundup_pow_of_two(hash_entries);
3799
3800 udptable = udp_pernet_table_alloc(hash_entries);
3801 if (udptable) {
3802 net->ipv4.udp_table = udptable;
3803 } else {
3804 pr_warn("Failed to allocate UDP hash table (entries: %u) "
3805 "for a netns, fallback to the global one\n",
3806 hash_entries);
3807 fallback:
3808 net->ipv4.udp_table = &udp_table;
3809 }
3810 }
3811
udp_pernet_init(struct net * net)3812 static int __net_init udp_pernet_init(struct net *net)
3813 {
3814 udp_sysctl_init(net);
3815 udp_set_table(net);
3816
3817 return 0;
3818 }
3819
udp_pernet_exit(struct net * net)3820 static void __net_exit udp_pernet_exit(struct net *net)
3821 {
3822 udp_pernet_table_free(net);
3823 }
3824
3825 static struct pernet_operations __net_initdata udp_sysctl_ops = {
3826 .init = udp_pernet_init,
3827 .exit = udp_pernet_exit,
3828 };
3829
3830 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(udp,struct bpf_iter_meta * meta,struct udp_sock * udp_sk,uid_t uid,int bucket)3831 DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
3832 struct udp_sock *udp_sk, uid_t uid, int bucket)
3833
3834 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
3835 unsigned int new_batch_sz)
3836 {
3837 struct sock **new_batch;
3838
3839 new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
3840 GFP_USER | __GFP_NOWARN);
3841 if (!new_batch)
3842 return -ENOMEM;
3843
3844 bpf_iter_udp_put_batch(iter);
3845 kvfree(iter->batch);
3846 iter->batch = new_batch;
3847 iter->max_sk = new_batch_sz;
3848
3849 return 0;
3850 }
3851
3852 #define INIT_BATCH_SZ 16
3853
bpf_iter_init_udp(void * priv_data,struct bpf_iter_aux_info * aux)3854 static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux)
3855 {
3856 struct bpf_udp_iter_state *iter = priv_data;
3857 int ret;
3858
3859 ret = bpf_iter_init_seq_net(priv_data, aux);
3860 if (ret)
3861 return ret;
3862
3863 ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ);
3864 if (ret)
3865 bpf_iter_fini_seq_net(priv_data);
3866
3867 return ret;
3868 }
3869
bpf_iter_fini_udp(void * priv_data)3870 static void bpf_iter_fini_udp(void *priv_data)
3871 {
3872 struct bpf_udp_iter_state *iter = priv_data;
3873
3874 bpf_iter_fini_seq_net(priv_data);
3875 kvfree(iter->batch);
3876 }
3877
3878 static const struct bpf_iter_seq_info udp_seq_info = {
3879 .seq_ops = &bpf_iter_udp_seq_ops,
3880 .init_seq_private = bpf_iter_init_udp,
3881 .fini_seq_private = bpf_iter_fini_udp,
3882 .seq_priv_size = sizeof(struct bpf_udp_iter_state),
3883 };
3884
3885 static struct bpf_iter_reg udp_reg_info = {
3886 .target = "udp",
3887 .ctx_arg_info_size = 1,
3888 .ctx_arg_info = {
3889 { offsetof(struct bpf_iter__udp, udp_sk),
3890 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
3891 },
3892 .seq_info = &udp_seq_info,
3893 };
3894
bpf_iter_register(void)3895 static void __init bpf_iter_register(void)
3896 {
3897 udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP];
3898 if (bpf_iter_reg_target(&udp_reg_info))
3899 pr_warn("Warning: could not register bpf iterator udp\n");
3900 }
3901 #endif
3902
udp_init(void)3903 void __init udp_init(void)
3904 {
3905 unsigned long limit;
3906 unsigned int i;
3907
3908 udp_table_init(&udp_table, "UDP");
3909 limit = nr_free_buffer_pages() / 8;
3910 limit = max(limit, 128UL);
3911 sysctl_udp_mem[0] = limit / 4 * 3;
3912 sysctl_udp_mem[1] = limit;
3913 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
3914
3915 /* 16 spinlocks per cpu */
3916 udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
3917 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
3918 GFP_KERNEL);
3919 if (!udp_busylocks)
3920 panic("UDP: failed to alloc udp_busylocks\n");
3921 for (i = 0; i < (1U << udp_busylocks_log); i++)
3922 spin_lock_init(udp_busylocks + i);
3923
3924 if (register_pernet_subsys(&udp_sysctl_ops))
3925 panic("UDP: failed to init sysctl parameters.\n");
3926
3927 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3928 bpf_iter_register();
3929 #endif
3930 }
3931