1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <asm/ioctls.h>
10 #include <linux/icmp.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/random.h>
14 #include <linux/socket.h>
15 #include <linux/l2tp.h>
16 #include <linux/in.h>
17 #include <net/sock.h>
18 #include <net/ip.h>
19 #include <net/icmp.h>
20 #include <net/udp.h>
21 #include <net/inet_common.h>
22 #include <net/tcp_states.h>
23 #include <net/protocol.h>
24 #include <net/xfrm.h>
25 #include <net/net_namespace.h>
26 #include <net/netns/generic.h>
27
28 #include "l2tp_core.h"
29
30 /* per-net private data for this module */
31 static unsigned int l2tp_ip_net_id;
32 struct l2tp_ip_net {
33 rwlock_t l2tp_ip_lock;
34 struct hlist_head l2tp_ip_table;
35 struct hlist_head l2tp_ip_bind_table;
36 };
37
38 struct l2tp_ip_sock {
39 /* inet_sock has to be the first member of l2tp_ip_sock */
40 struct inet_sock inet;
41
42 u32 conn_id;
43 u32 peer_conn_id;
44 };
45
l2tp_ip_sk(const struct sock * sk)46 static struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
47 {
48 return (struct l2tp_ip_sock *)sk;
49 }
50
l2tp_ip_pernet(const struct net * net)51 static struct l2tp_ip_net *l2tp_ip_pernet(const struct net *net)
52 {
53 return net_generic(net, l2tp_ip_net_id);
54 }
55
__l2tp_ip_bind_lookup(const struct net * net,__be32 laddr,__be32 raddr,int dif,u32 tunnel_id)56 static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
57 __be32 raddr, int dif, u32 tunnel_id)
58 {
59 struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
60 struct sock *sk;
61
62 sk_for_each_bound(sk, &pn->l2tp_ip_bind_table) {
63 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
64 const struct inet_sock *inet = inet_sk(sk);
65 int bound_dev_if;
66
67 if (!net_eq(sock_net(sk), net))
68 continue;
69
70 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
71 if (bound_dev_if && dif && bound_dev_if != dif)
72 continue;
73
74 if (inet->inet_rcv_saddr && laddr &&
75 inet->inet_rcv_saddr != laddr)
76 continue;
77
78 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
79 continue;
80
81 if (l2tp->conn_id != tunnel_id)
82 continue;
83
84 goto found;
85 }
86
87 sk = NULL;
88 found:
89 return sk;
90 }
91
92 /* When processing receive frames, there are two cases to
93 * consider. Data frames consist of a non-zero session-id and an
94 * optional cookie. Control frames consist of a regular L2TP header
95 * preceded by 32-bits of zeros.
96 *
97 * L2TPv3 Session Header Over IP
98 *
99 * 0 1 2 3
100 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | Session ID |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * | Cookie (optional, maximum 64 bits)...
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 *
109 * L2TPv3 Control Message Header Over IP
110 *
111 * 0 1 2 3
112 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * | (32 bits of zeros) |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | Control Connection ID |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 * | Ns | Nr |
121 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122 *
123 * All control frames are passed to userspace.
124 */
l2tp_ip_recv(struct sk_buff * skb)125 static int l2tp_ip_recv(struct sk_buff *skb)
126 {
127 struct net *net = dev_net(skb->dev);
128 struct l2tp_ip_net *pn;
129 struct sock *sk;
130 u32 session_id;
131 u32 tunnel_id;
132 unsigned char *ptr, *optr;
133 struct l2tp_session *session;
134 struct l2tp_tunnel *tunnel = NULL;
135 struct iphdr *iph;
136
137 pn = l2tp_ip_pernet(net);
138
139 if (!pskb_may_pull(skb, 4))
140 goto discard;
141
142 /* Point to L2TP header */
143 optr = skb->data;
144 ptr = skb->data;
145 session_id = ntohl(*((__be32 *)ptr));
146 ptr += 4;
147
148 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
149 * the session_id. If it is 0, the packet is a L2TP control
150 * frame and the session_id value can be discarded.
151 */
152 if (session_id == 0) {
153 __skb_pull(skb, 4);
154 goto pass_up;
155 }
156
157 /* Ok, this is a data packet. Lookup the session. */
158 session = l2tp_v3_session_get(net, NULL, session_id);
159 if (!session)
160 goto discard;
161
162 tunnel = session->tunnel;
163 if (!tunnel)
164 goto discard_sess;
165
166 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
167 goto discard_sess;
168
169 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
170 l2tp_session_put(session);
171
172 return 0;
173
174 pass_up:
175 /* Get the tunnel_id from the L2TP header */
176 if (!pskb_may_pull(skb, 12))
177 goto discard;
178
179 if ((skb->data[0] & 0xc0) != 0xc0)
180 goto discard;
181
182 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
183 iph = (struct iphdr *)skb_network_header(skb);
184
185 read_lock_bh(&pn->l2tp_ip_lock);
186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
187 tunnel_id);
188 if (!sk) {
189 read_unlock_bh(&pn->l2tp_ip_lock);
190 goto discard;
191 }
192 sock_hold(sk);
193 read_unlock_bh(&pn->l2tp_ip_lock);
194
195 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
196 goto discard_put;
197
198 nf_reset_ct(skb);
199
200 return sk_receive_skb(sk, skb, 1);
201
202 discard_sess:
203 l2tp_session_put(session);
204 goto discard;
205
206 discard_put:
207 sock_put(sk);
208
209 discard:
210 kfree_skb(skb);
211 return 0;
212 }
213
l2tp_ip_hash(struct sock * sk)214 static int l2tp_ip_hash(struct sock *sk)
215 {
216 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
217
218 if (sk_unhashed(sk)) {
219 write_lock_bh(&pn->l2tp_ip_lock);
220 sk_add_node(sk, &pn->l2tp_ip_table);
221 write_unlock_bh(&pn->l2tp_ip_lock);
222 }
223 return 0;
224 }
225
l2tp_ip_unhash(struct sock * sk)226 static void l2tp_ip_unhash(struct sock *sk)
227 {
228 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
229
230 if (sk_unhashed(sk))
231 return;
232 write_lock_bh(&pn->l2tp_ip_lock);
233 sk_del_node_init(sk);
234 write_unlock_bh(&pn->l2tp_ip_lock);
235 }
236
l2tp_ip_open(struct sock * sk)237 static int l2tp_ip_open(struct sock *sk)
238 {
239 /* Prevent autobind. We don't have ports. */
240 inet_sk(sk)->inet_num = IPPROTO_L2TP;
241
242 l2tp_ip_hash(sk);
243 return 0;
244 }
245
l2tp_ip_close(struct sock * sk,long timeout)246 static void l2tp_ip_close(struct sock *sk, long timeout)
247 {
248 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
249
250 write_lock_bh(&pn->l2tp_ip_lock);
251 hlist_del_init(&sk->sk_bind_node);
252 sk_del_node_init(sk);
253 write_unlock_bh(&pn->l2tp_ip_lock);
254 sk_common_release(sk);
255 }
256
l2tp_ip_destroy_sock(struct sock * sk)257 static void l2tp_ip_destroy_sock(struct sock *sk)
258 {
259 struct l2tp_tunnel *tunnel;
260
261 __skb_queue_purge(&sk->sk_write_queue);
262
263 tunnel = l2tp_sk_to_tunnel(sk);
264 if (tunnel) {
265 l2tp_tunnel_delete(tunnel);
266 l2tp_tunnel_put(tunnel);
267 }
268 }
269
l2tp_ip_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len)270 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271 {
272 struct inet_sock *inet = inet_sk(sk);
273 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
274 struct net *net = sock_net(sk);
275 struct l2tp_ip_net *pn;
276 int ret;
277 int chk_addr_ret;
278
279 if (addr_len < sizeof(struct sockaddr_l2tpip))
280 return -EINVAL;
281 if (addr->l2tp_family != AF_INET)
282 return -EINVAL;
283
284 lock_sock(sk);
285
286 ret = -EINVAL;
287 if (!sock_flag(sk, SOCK_ZAPPED))
288 goto out;
289
290 if (sk->sk_state != TCP_CLOSE)
291 goto out;
292
293 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
294 ret = -EADDRNOTAVAIL;
295 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
296 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
297 goto out;
298
299 if (addr->l2tp_addr.s_addr) {
300 inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
301 inet->inet_saddr = addr->l2tp_addr.s_addr;
302 }
303 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
304 inet->inet_saddr = 0; /* Use device */
305
306 pn = l2tp_ip_pernet(net);
307 write_lock_bh(&pn->l2tp_ip_lock);
308 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
309 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
310 write_unlock_bh(&pn->l2tp_ip_lock);
311 ret = -EADDRINUSE;
312 goto out;
313 }
314
315 sk_dst_reset(sk);
316 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
317
318 sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
319 sk_del_node_init(sk);
320 write_unlock_bh(&pn->l2tp_ip_lock);
321
322 ret = 0;
323 sock_reset_flag(sk, SOCK_ZAPPED);
324
325 out:
326 release_sock(sk);
327
328 return ret;
329 }
330
l2tp_ip_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)331 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
332 {
333 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
334 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
335 int rc;
336
337 if (addr_len < sizeof(*lsa))
338 return -EINVAL;
339
340 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
341 return -EINVAL;
342
343 lock_sock(sk);
344
345 /* Must bind first - autobinding does not work */
346 if (sock_flag(sk, SOCK_ZAPPED)) {
347 rc = -EINVAL;
348 goto out_sk;
349 }
350
351 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
352 if (rc < 0)
353 goto out_sk;
354
355 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
356
357 write_lock_bh(&pn->l2tp_ip_lock);
358 hlist_del_init(&sk->sk_bind_node);
359 sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
360 write_unlock_bh(&pn->l2tp_ip_lock);
361
362 out_sk:
363 release_sock(sk);
364
365 return rc;
366 }
367
l2tp_ip_disconnect(struct sock * sk,int flags)368 static int l2tp_ip_disconnect(struct sock *sk, int flags)
369 {
370 if (sock_flag(sk, SOCK_ZAPPED))
371 return 0;
372
373 return __udp_disconnect(sk, flags);
374 }
375
l2tp_ip_getname(struct socket * sock,struct sockaddr * uaddr,int peer)376 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
377 int peer)
378 {
379 struct sock *sk = sock->sk;
380 struct inet_sock *inet = inet_sk(sk);
381 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
382 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
383
384 memset(lsa, 0, sizeof(*lsa));
385 lsa->l2tp_family = AF_INET;
386 if (peer) {
387 if (!inet->inet_dport)
388 return -ENOTCONN;
389 lsa->l2tp_conn_id = lsk->peer_conn_id;
390 lsa->l2tp_addr.s_addr = inet->inet_daddr;
391 } else {
392 __be32 addr = inet->inet_rcv_saddr;
393
394 if (!addr)
395 addr = inet->inet_saddr;
396 lsa->l2tp_conn_id = lsk->conn_id;
397 lsa->l2tp_addr.s_addr = addr;
398 }
399 return sizeof(*lsa);
400 }
401
l2tp_ip_backlog_recv(struct sock * sk,struct sk_buff * skb)402 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
403 {
404 int rc;
405
406 /* Charge it to the socket, dropping if the queue is full. */
407 rc = sock_queue_rcv_skb(sk, skb);
408 if (rc < 0)
409 goto drop;
410
411 return 0;
412
413 drop:
414 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
415 kfree_skb(skb);
416 return 0;
417 }
418
419 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
420 * control frames.
421 */
l2tp_ip_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)422 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
423 {
424 struct sk_buff *skb;
425 int rc;
426 struct inet_sock *inet = inet_sk(sk);
427 struct rtable *rt = NULL;
428 int connected = 0;
429 __be32 daddr;
430
431 lock_sock(sk);
432
433 rc = -ENOTCONN;
434 if (sock_flag(sk, SOCK_DEAD))
435 goto out;
436
437 /* Get and verify the address. */
438 if (msg->msg_name) {
439 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
440
441 rc = -EINVAL;
442 if (msg->msg_namelen < sizeof(*lip))
443 goto out;
444
445 if (lip->l2tp_family != AF_INET) {
446 rc = -EAFNOSUPPORT;
447 if (lip->l2tp_family != AF_UNSPEC)
448 goto out;
449 }
450
451 daddr = lip->l2tp_addr.s_addr;
452 } else {
453 rc = -EDESTADDRREQ;
454 if (sk->sk_state != TCP_ESTABLISHED)
455 goto out;
456
457 connected = 1;
458 }
459
460 /* Allocate a socket buffer */
461 rc = -ENOMEM;
462 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
463 4 + len, 0, GFP_KERNEL);
464 if (!skb)
465 goto error;
466
467 /* Reserve space for headers, putting IP header on 4-byte boundary. */
468 skb_reserve(skb, 2 + NET_SKB_PAD);
469 skb_reset_network_header(skb);
470 skb_reserve(skb, sizeof(struct iphdr));
471 skb_reset_transport_header(skb);
472
473 /* Insert 0 session_id */
474 *((__be32 *)skb_put(skb, 4)) = 0;
475
476 /* Copy user data into skb */
477 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
478 if (rc < 0) {
479 kfree_skb(skb);
480 goto error;
481 }
482
483 if (connected)
484 rt = dst_rtable(__sk_dst_check(sk, 0));
485
486 rcu_read_lock();
487 if (!rt) {
488 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
489
490 inet_sk_init_flowi4(inet, fl4);
491
492 /* Overwrite ->daddr if msg->msg_name was provided */
493 if (!connected)
494 fl4->daddr = daddr;
495
496 /* If this fails, retransmit mechanism of transport layer will
497 * keep trying until route appears or the connection times
498 * itself out.
499 */
500 rt = ip_route_output_flow(sock_net(sk), fl4, sk);
501 if (IS_ERR(rt))
502 goto no_route;
503 if (connected) {
504 sk_setup_caps(sk, &rt->dst);
505 } else {
506 skb_dst_set(skb, &rt->dst);
507 goto xmit;
508 }
509 }
510
511 /* We don't need to clone dst here, it is guaranteed to not disappear.
512 * __dev_xmit_skb() might force a refcount if needed.
513 */
514 skb_dst_set_noref(skb, &rt->dst);
515
516 xmit:
517 /* Queue the packet to IP for output */
518 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
519 rcu_read_unlock();
520
521 error:
522 if (rc >= 0)
523 rc = len;
524
525 out:
526 release_sock(sk);
527 return rc;
528
529 no_route:
530 rcu_read_unlock();
531 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
532 kfree_skb(skb);
533 rc = -EHOSTUNREACH;
534 goto out;
535 }
536
l2tp_ip_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)537 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
538 size_t len, int flags, int *addr_len)
539 {
540 struct inet_sock *inet = inet_sk(sk);
541 size_t copied = 0;
542 int err = -EOPNOTSUPP;
543 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
544 struct sk_buff *skb;
545
546 if (flags & MSG_OOB)
547 goto out;
548
549 skb = skb_recv_datagram(sk, flags, &err);
550 if (!skb)
551 goto out;
552
553 copied = skb->len;
554 if (len < copied) {
555 msg->msg_flags |= MSG_TRUNC;
556 copied = len;
557 }
558
559 err = skb_copy_datagram_msg(skb, 0, msg, copied);
560 if (err)
561 goto done;
562
563 sock_recv_timestamp(msg, sk, skb);
564
565 /* Copy the address. */
566 if (sin) {
567 sin->sin_family = AF_INET;
568 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
569 sin->sin_port = 0;
570 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
571 *addr_len = sizeof(*sin);
572 }
573 if (inet_cmsg_flags(inet))
574 ip_cmsg_recv(msg, skb);
575 if (flags & MSG_TRUNC)
576 copied = skb->len;
577 done:
578 skb_free_datagram(sk, skb);
579 out:
580 return err ? err : copied;
581 }
582
l2tp_ioctl(struct sock * sk,int cmd,int * karg)583 int l2tp_ioctl(struct sock *sk, int cmd, int *karg)
584 {
585 struct sk_buff *skb;
586
587 switch (cmd) {
588 case SIOCOUTQ:
589 *karg = sk_wmem_alloc_get(sk);
590 break;
591 case SIOCINQ:
592 spin_lock_bh(&sk->sk_receive_queue.lock);
593 skb = skb_peek(&sk->sk_receive_queue);
594 *karg = skb ? skb->len : 0;
595 spin_unlock_bh(&sk->sk_receive_queue.lock);
596 break;
597
598 default:
599 return -ENOIOCTLCMD;
600 }
601
602 return 0;
603 }
604 EXPORT_SYMBOL_GPL(l2tp_ioctl);
605
606 static struct proto l2tp_ip_prot = {
607 .name = "L2TP/IP",
608 .owner = THIS_MODULE,
609 .init = l2tp_ip_open,
610 .close = l2tp_ip_close,
611 .bind = l2tp_ip_bind,
612 .connect = l2tp_ip_connect,
613 .disconnect = l2tp_ip_disconnect,
614 .ioctl = l2tp_ioctl,
615 .destroy = l2tp_ip_destroy_sock,
616 .setsockopt = ip_setsockopt,
617 .getsockopt = ip_getsockopt,
618 .sendmsg = l2tp_ip_sendmsg,
619 .recvmsg = l2tp_ip_recvmsg,
620 .backlog_rcv = l2tp_ip_backlog_recv,
621 .hash = l2tp_ip_hash,
622 .unhash = l2tp_ip_unhash,
623 .obj_size = sizeof(struct l2tp_ip_sock),
624 };
625
626 static const struct proto_ops l2tp_ip_ops = {
627 .family = PF_INET,
628 .owner = THIS_MODULE,
629 .release = inet_release,
630 .bind = inet_bind,
631 .connect = inet_dgram_connect,
632 .socketpair = sock_no_socketpair,
633 .accept = sock_no_accept,
634 .getname = l2tp_ip_getname,
635 .poll = datagram_poll,
636 .ioctl = inet_ioctl,
637 .gettstamp = sock_gettstamp,
638 .listen = sock_no_listen,
639 .shutdown = inet_shutdown,
640 .setsockopt = sock_common_setsockopt,
641 .getsockopt = sock_common_getsockopt,
642 .sendmsg = inet_sendmsg,
643 .recvmsg = sock_common_recvmsg,
644 .mmap = sock_no_mmap,
645 };
646
647 static struct inet_protosw l2tp_ip_protosw = {
648 .type = SOCK_DGRAM,
649 .protocol = IPPROTO_L2TP,
650 .prot = &l2tp_ip_prot,
651 .ops = &l2tp_ip_ops,
652 };
653
654 static struct net_protocol l2tp_ip_protocol __read_mostly = {
655 .handler = l2tp_ip_recv,
656 };
657
l2tp_ip_init_net(struct net * net)658 static __net_init int l2tp_ip_init_net(struct net *net)
659 {
660 struct l2tp_ip_net *pn = net_generic(net, l2tp_ip_net_id);
661
662 rwlock_init(&pn->l2tp_ip_lock);
663 INIT_HLIST_HEAD(&pn->l2tp_ip_table);
664 INIT_HLIST_HEAD(&pn->l2tp_ip_bind_table);
665 return 0;
666 }
667
l2tp_ip_exit_net(struct net * net)668 static __net_exit void l2tp_ip_exit_net(struct net *net)
669 {
670 struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
671
672 write_lock_bh(&pn->l2tp_ip_lock);
673 WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_table) != 0);
674 WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_bind_table) != 0);
675 write_unlock_bh(&pn->l2tp_ip_lock);
676 }
677
678 static struct pernet_operations l2tp_ip_net_ops = {
679 .init = l2tp_ip_init_net,
680 .exit = l2tp_ip_exit_net,
681 .id = &l2tp_ip_net_id,
682 .size = sizeof(struct l2tp_ip_net),
683 };
684
l2tp_ip_init(void)685 static int __init l2tp_ip_init(void)
686 {
687 int err;
688
689 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
690
691 err = register_pernet_device(&l2tp_ip_net_ops);
692 if (err)
693 goto out;
694
695 err = proto_register(&l2tp_ip_prot, 1);
696 if (err != 0)
697 goto out1;
698
699 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
700 if (err)
701 goto out2;
702
703 inet_register_protosw(&l2tp_ip_protosw);
704 return 0;
705
706 out2:
707 proto_unregister(&l2tp_ip_prot);
708 out1:
709 unregister_pernet_device(&l2tp_ip_net_ops);
710 out:
711 return err;
712 }
713
l2tp_ip_exit(void)714 static void __exit l2tp_ip_exit(void)
715 {
716 inet_unregister_protosw(&l2tp_ip_protosw);
717 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
718 proto_unregister(&l2tp_ip_prot);
719 unregister_pernet_device(&l2tp_ip_net_ops);
720 }
721
722 module_init(l2tp_ip_init);
723 module_exit(l2tp_ip_exit);
724
725 MODULE_LICENSE("GPL");
726 MODULE_AUTHOR("James Chapman <[email protected]>");
727 MODULE_DESCRIPTION("L2TP over IP");
728 MODULE_VERSION("1.0");
729
730 /* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
731 * because __stringify doesn't like enums
732 */
733 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
734 MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
735