Lines Matching +full:ports +full:- +full:block +full:- +full:pack +full:- +full:mode
1 /* SPDX-License-Identifier: GPL-2.0 */
48 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
50 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
52 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
55 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
64 ------------------------------------
67 - policy rule, struct xfrm_policy (=SPD entry)
68 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69 - instance of a transformer, struct xfrm_state (=SA)
70 - template to clone xfrm_state, struct xfrm_tmpl
73 length setting, net->xfrm.policy_hthresh). Other policies are stored in
81 If "action" is "block", then we prohibit the flow, otherwise:
85 to a complete xfrm_state (see below) and we pack bundle of transformations
88 dst -. xfrm .-> xfrm_state #1
89 |---. child .-> dst -. xfrm .-> xfrm_state #2
90 |---. child .-> dst -. xfrm .-> xfrm_state #3
91 |---. child .-> NULL
95 -----------------------
97 1. ->mode Mode: transport or tunnel
98 2. ->id.proto Protocol: AH/ESP/IPCOMP
99 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
101 4. ->id.spi If not zero, static SPI.
102 5. ->saddr Local tunnel endpoint, ignored for transport mode.
103 6. ->algos List of allowed algos. Plain bitmask now.
105 7. ->share Sharing mode.
106 Q: how to implement private sharing mode? To add struct sock* to
110 with appropriate mode/proto/algo, permitted by selector.
121 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
210 u8 mode; member
245 /* Data for care-of address */
262 /* replay detection mode */
284 /* used to fix curlft->add_time when changing date */
315 return read_pnet(&x->xs_net); in xs_net()
318 /* xflags - make enum if more show up */
470 if (!x->type_offload) in xfrm_unset_type_offload()
473 module_put(x->type_offload->owner); in xfrm_unset_type_offload()
474 x->type_offload = NULL; in xfrm_unset_type_offload()
478 * struct xfrm_mode_cbs - XFRM mode callbacks
480 * @init_state: Add/init mode specific state in `xfrm_state *x`
481 * @clone_state: Copy mode specific values from `orig` to new state `x`
482 * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
483 * @user_init: Process mode specific netlink attributes from user
485 * @sa_len: Return space required to store mode specific netlink attributes
487 * @input: Process received packet from SA using mode
488 * @output: Output given packet using mode
489 * @prepare_output: Add mode specific encapsulation to packet in skb. On return
513 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
514 void xfrm_unregister_mode_cbs(u8 mode);
530 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || in xfrm_ip2inner_mode()
531 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) in xfrm_ip2inner_mode()
532 return &x->inner_mode; in xfrm_ip2inner_mode()
534 return &x->inner_mode_iaf; in xfrm_ip2inner_mode()
539 * daddr - destination of tunnel, may be zero for transport mode.
540 * spi - zero to acquire spi. Not zero if spi is static, then
542 * proto - AH/ESP/IPCOMP
553 /* Mode: transport, tunnel etc. */
554 u8 mode; member
556 /* Sharing mode: unique, this session only, this user only etc. */
592 * struct xfrm_policy - xfrm policy
599 * @pos: kernel internal tie-breaker to determine age of policy
660 return read_pnet(&xp->xp_net); in xp_net()
676 u8 mode; member
727 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
750 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
754 * to transmit header information to the mode input/output functions.
782 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
796 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
891 refcount_inc(&policy->refcnt); in xfrm_pol_hold()
898 if (refcount_dec_and_test(&policy->refcnt)) in xfrm_pol_put()
905 for (i = npols - 1; i >= 0; --i) in xfrm_pols_put()
913 refcount_dec(&x->refcnt); in __xfrm_state_put()
918 if (refcount_dec_and_test(&x->refcnt)) in xfrm_state_put()
924 if (refcount_dec_and_test(&x->refcnt)) in xfrm_state_put_sync()
930 refcount_inc(&x->refcnt); in xfrm_state_hold()
951 mask = htonl((0xffffffff) << (32 - pbi)); in addr_match()
965 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen))); in addr4_match()
972 switch(fl->flowi_proto) { in xfrm_flowi_sport()
977 port = uli->ports.sport; in xfrm_flowi_sport()
981 port = htons(uli->icmpt.type); in xfrm_flowi_sport()
984 port = htons(uli->mht.type); in xfrm_flowi_sport()
987 port = htons(ntohl(uli->gre_key) >> 16); in xfrm_flowi_sport()
999 switch(fl->flowi_proto) { in xfrm_flowi_dport()
1004 port = uli->ports.dport; in xfrm_flowi_dport()
1008 port = htons(uli->icmpt.code); in xfrm_flowi_dport()
1011 port = htons(ntohl(uli->gre_key) & 0xffff); in xfrm_flowi_dport()
1023 /* If neither has a context --> match
1030 (s1->ctx_sid == s2->ctx_sid) && in xfrm_sec_ctx_match()
1031 (s1->ctx_doi == s2->ctx_doi) && in xfrm_sec_ctx_match()
1032 (s1->ctx_alg == s2->ctx_alg))); in xfrm_sec_ctx_match()
1043 * xdst->child points to the next element of bundle.
1044 * dst->xfrm points to an instanse of transformer.
1074 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { in xfrm_dst_path()
1077 return xdst->path; in xfrm_dst_path()
1086 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { in xfrm_dst_child()
1088 return xdst->child; in xfrm_dst_child()
1097 xdst->child = child; in xfrm_dst_set_child()
1102 xfrm_pols_put(xdst->pols, xdst->num_pols); in xfrm_dst_destroy()
1103 dst_release(xdst->route); in xfrm_dst_destroy()
1104 if (likely(xdst->u.dst.xfrm)) in xfrm_dst_destroy()
1105 xfrm_state_put(xdst->u.dst.xfrm); in xfrm_dst_destroy()
1154 /* Used to keep whole l2 header for transport mode GRO */
1185 return addr->a4 == 0; in xfrm_addr_any()
1187 return ipv6_addr_any(&addr->in6); in xfrm_addr_any()
1195 return (tmpl->saddr.a4 && in __xfrm4_state_addr_cmp()
1196 tmpl->saddr.a4 != x->props.saddr.a4); in __xfrm4_state_addr_cmp()
1202 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && in __xfrm6_state_addr_cmp()
1203 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); in __xfrm6_state_addr_cmp()
1223 return sp->xvec[sp->len - 1]; in xfrm_input_state()
1232 if (!sp || !sp->olen || sp->len != sp->olen) in xfrm_offload()
1235 return &sp->ovec[sp->olen - 1]; in xfrm_offload()
1248 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) in __xfrm_check_nopolicy()
1249 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT; in __xfrm_check_nopolicy()
1261 return IPCB(skb)->flags & IPSKB_NOPOLICY; in __xfrm_check_dev_nopolicy()
1263 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY); in __xfrm_check_dev_nopolicy()
1270 struct net *net = dev_net(skb->dev); in __xfrm_policy_check2()
1275 if (sk && sk->sk_policy[XFRM_POLICY_IN]) in __xfrm_policy_check2()
1280 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) { in __xfrm_policy_check2()
1281 bool check = (xo->flags & CRYPTO_DONE) && in __xfrm_policy_check2()
1282 (xo->status & CRYPTO_SUCCESS); in __xfrm_policy_check2()
1347 struct net *net = dev_net(skb->dev); in xfrm_route_forward()
1349 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] && in xfrm_route_forward()
1350 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT) in xfrm_route_forward()
1353 return (skb_dst(skb)->flags & DST_NOXFRM) || in xfrm_route_forward()
1373 sk->sk_policy[0] = NULL; in xfrm_sk_clone_policy()
1374 sk->sk_policy[1] = NULL; in xfrm_sk_clone_policy()
1375 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) in xfrm_sk_clone_policy()
1386 pol = rcu_dereference_protected(sk->sk_policy[0], 1); in xfrm_sk_free_policy()
1389 sk->sk_policy[0] = NULL; in xfrm_sk_free_policy()
1391 pol = rcu_dereference_protected(sk->sk_policy[1], 1); in xfrm_sk_free_policy()
1394 sk->sk_policy[1] = NULL; in xfrm_sk_free_policy()
1420 return -ENOSYS; in xfrm_decode_session_reverse()
1439 return (xfrm_address_t *)&fl->u.ip4.daddr; in xfrm_flowi_daddr()
1441 return (xfrm_address_t *)&fl->u.ip6.daddr; in xfrm_flowi_daddr()
1451 return (xfrm_address_t *)&fl->u.ip4.saddr; in xfrm_flowi_saddr()
1453 return (xfrm_address_t *)&fl->u.ip6.saddr; in xfrm_flowi_saddr()
1465 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); in xfrm_flowi_addr_get()
1466 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); in xfrm_flowi_addr_get()
1469 saddr->in6 = fl->u.ip6.saddr; in xfrm_flowi_addr_get()
1470 daddr->in6 = fl->u.ip6.daddr; in xfrm_flowi_addr_get()
1479 if (daddr->a4 == x->id.daddr.a4 && in __xfrm4_state_addr_check()
1480 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) in __xfrm4_state_addr_check()
1489 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && in __xfrm6_state_addr_check()
1490 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) || in __xfrm6_state_addr_check()
1492 ipv6_addr_any((struct in6_addr *)&x->props.saddr))) in __xfrm6_state_addr_check()
1518 (const xfrm_address_t *)&fl->u.ip4.daddr, in xfrm_state_addr_flow_check()
1519 (const xfrm_address_t *)&fl->u.ip4.saddr); in xfrm_state_addr_flow_check()
1522 (const xfrm_address_t *)&fl->u.ip6.daddr, in xfrm_state_addr_flow_check()
1523 (const xfrm_address_t *)&fl->u.ip6.saddr); in xfrm_state_addr_flow_check()
1530 return atomic_read(&x->tunnel_users); in xfrm_state_kern()
1693 u8 mode, u8 proto, u32 reqid);
1701 struct xfrm_dev_offload *xdo = &x->xso; in xfrm_dev_state_update_stats()
1702 struct net_device *dev = READ_ONCE(xdo->dev); in xfrm_dev_state_update_stats()
1704 if (dev && dev->xfrmdev_ops && in xfrm_dev_state_update_stats()
1705 dev->xfrmdev_ops->xdo_dev_state_update_stats) in xfrm_dev_state_update_stats()
1706 dev->xfrmdev_ops->xdo_dev_state_update_stats(x); in xfrm_dev_state_update_stats()
1798 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; in xfrm4_rcv_spi()
1799 XFRM_SPI_SKB_CB(skb)->family = AF_INET; in xfrm4_rcv_spi()
1800 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); in xfrm4_rcv_spi()
1842 return -ENOPROTOOPT; in xfrm_user_policy()
1873 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1932 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0; in xfrm_addr_equal()
1956 nlsk = rcu_dereference(net->xfrm.nlsk); in xfrm_aevent_is_on()
1969 nlsk = rcu_dereference(net->xfrm.nlsk); in xfrm_acquire_is_on()
1980 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in aead_len()
1985 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in xfrm_alg_len()
1990 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in xfrm_alg_auth_len()
1995 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32); in xfrm_replay_state_esn_len()
2003 x->replay_esn = kmemdup(orig->replay_esn, in xfrm_replay_clone()
2004 xfrm_replay_state_esn_len(orig->replay_esn), in xfrm_replay_clone()
2006 if (!x->replay_esn) in xfrm_replay_clone()
2007 return -ENOMEM; in xfrm_replay_clone()
2008 x->preplay_esn = kmemdup(orig->preplay_esn, in xfrm_replay_clone()
2009 xfrm_replay_state_esn_len(orig->preplay_esn), in xfrm_replay_clone()
2011 if (!x->preplay_esn) in xfrm_replay_clone()
2012 return -ENOMEM; in xfrm_replay_clone()
2066 struct xfrm_dev_offload *xso = &x->xso; in xfrm_dev_state_advance_esn()
2067 struct net_device *dev = READ_ONCE(xso->dev); in xfrm_dev_state_advance_esn()
2069 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) in xfrm_dev_state_advance_esn()
2070 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); in xfrm_dev_state_advance_esn()
2075 struct xfrm_state *x = dst->xfrm; in xfrm_dst_offload_ok()
2078 if (!x || !x->type_offload) in xfrm_dst_offload_ok()
2082 if (!x->xso.offload_handle && !xdst->child->xfrm) in xfrm_dst_offload_ok()
2084 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && in xfrm_dst_offload_ok()
2085 !xdst->child->xfrm) in xfrm_dst_offload_ok()
2093 struct xfrm_dev_offload *xdo = &x->xdo; in xfrm_dev_policy_delete()
2094 struct net_device *dev = xdo->dev; in xfrm_dev_policy_delete()
2096 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete) in xfrm_dev_policy_delete()
2097 dev->xfrmdev_ops->xdo_dev_policy_delete(x); in xfrm_dev_policy_delete()
2102 struct xfrm_dev_offload *xdo = &x->xdo; in xfrm_dev_policy_free()
2103 struct net_device *dev = xdo->dev; in xfrm_dev_policy_free()
2105 if (dev && dev->xfrmdev_ops) { in xfrm_dev_policy_free()
2106 if (dev->xfrmdev_ops->xdo_dev_policy_free) in xfrm_dev_policy_free()
2107 dev->xfrmdev_ops->xdo_dev_policy_free(x); in xfrm_dev_policy_free()
2108 xdo->dev = NULL; in xfrm_dev_policy_free()
2109 netdev_put(dev, &xdo->dev_tracker); in xfrm_dev_policy_free()
2174 m->v = m->m = 0; in xfrm_mark_get()
2176 return m->v & m->m; in xfrm_mark_get()
2183 if (m->m | m->v) in xfrm_mark_put()
2190 struct xfrm_mark *m = &x->props.smark; in xfrm_smark_get()
2192 return (m->v & m->m) | (mark & ~m->m); in xfrm_smark_get()
2211 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) in xfrm_tunnel_check()
2215 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) in xfrm_tunnel_check()
2219 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)) in xfrm_tunnel_check()
2220 return -EINVAL; in xfrm_tunnel_check()
2232 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2237 /* Translate 32-bit user_policy from sockptr */
2263 if (!sk || sk->sk_family != AF_INET6) in xfrm6_local_dontfrag()
2266 proto = sk->sk_protocol; in xfrm6_local_dontfrag()