Lines Matching +full:xo +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * xfrm_device.c - IPsec device offloading code.
26 struct xfrm_offload *xo = xfrm_offload(skb); in __xfrm_transport_prep() local
29 if (xo->flags & XFRM_GSO_SEGMENT) in __xfrm_transport_prep()
30 skb->transport_header -= x->props.header_len; in __xfrm_transport_prep()
32 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); in __xfrm_transport_prep()
39 struct xfrm_offload *xo = xfrm_offload(skb); in __xfrm_mode_tunnel_prep() local
41 if (xo->flags & XFRM_GSO_SEGMENT) in __xfrm_mode_tunnel_prep()
42 skb->transport_header = skb->network_header + hsize; in __xfrm_mode_tunnel_prep()
46 skb->mac_len + x->props.header_len - x->props.enc_hdr_len); in __xfrm_mode_tunnel_prep()
52 struct xfrm_offload *xo = xfrm_offload(skb); in __xfrm_mode_beet_prep() local
55 if (xo->flags & XFRM_GSO_SEGMENT) in __xfrm_mode_beet_prep()
56 skb->transport_header = skb->network_header + hsize; in __xfrm_mode_beet_prep()
59 if (x->sel.family != AF_INET6) { in __xfrm_mode_beet_prep()
61 if (x->outer_mode.family == AF_INET6) in __xfrm_mode_beet_prep()
62 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); in __xfrm_mode_beet_prep()
65 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); in __xfrm_mode_beet_prep()
71 switch (x->outer_mode.encap) { in xfrm_outer_mode_prep()
74 if (x->outer_mode.family == AF_INET) in xfrm_outer_mode_prep()
77 if (x->outer_mode.family == AF_INET6) in xfrm_outer_mode_prep()
82 if (x->outer_mode.family == AF_INET) in xfrm_outer_mode_prep()
85 if (x->outer_mode.family == AF_INET6) in xfrm_outer_mode_prep()
90 if (x->outer_mode.family == AF_INET) in xfrm_outer_mode_prep()
93 if (x->outer_mode.family == AF_INET6) in xfrm_outer_mode_prep()
105 struct xfrm_offload *xo = xfrm_offload(skb); in xmit_xfrm_check_overflow() local
106 __u32 seq = xo->seq.low; in xmit_xfrm_check_overflow()
108 seq += skb_shinfo(skb)->gso_segs; in xmit_xfrm_check_overflow()
109 if (unlikely(seq < xo->seq.low)) in xmit_xfrm_check_overflow()
123 struct xfrm_offload *xo = xfrm_offload(skb); in validate_xmit_xfrm() local
124 struct net_device *dev = skb->dev; in validate_xmit_xfrm()
127 if (!xo || (xo->flags & XFRM_XMIT)) in validate_xmit_xfrm()
134 x = sp->xvec[sp->len - 1]; in validate_xmit_xfrm()
135 if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN) in validate_xmit_xfrm()
142 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) { in validate_xmit_xfrm()
149 if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) in validate_xmit_xfrm()
154 err = !skb_queue_empty(&sd->xfrm_backlog); in validate_xmit_xfrm()
162 if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || in validate_xmit_xfrm()
180 if (!skb->next) { in validate_xmit_xfrm()
181 esp_features |= skb->dev->gso_partial_features; in validate_xmit_xfrm()
184 xo->flags |= XFRM_DEV_RESUME; in validate_xmit_xfrm()
186 err = x->type_offload->xmit(x, skb, esp_features); in validate_xmit_xfrm()
188 if (err == -EINPROGRESS) in validate_xmit_xfrm()
196 skb_push(skb, skb->data - skb_mac_header(skb)); in validate_xmit_xfrm()
202 esp_features |= skb->dev->gso_partial_features; in validate_xmit_xfrm()
205 xo = xfrm_offload(skb2); in validate_xmit_xfrm()
206 xo->flags |= XFRM_DEV_RESUME; in validate_xmit_xfrm()
210 err = x->type_offload->xmit(x, skb2, esp_features); in validate_xmit_xfrm()
212 skb2->next = nskb; in validate_xmit_xfrm()
213 } else if (err != -EINPROGRESS) { in validate_xmit_xfrm()
215 skb2->next = nskb; in validate_xmit_xfrm()
222 pskb->next = nskb; in validate_xmit_xfrm()
227 skb_push(skb2, skb2->data - skb_mac_header(skb2)); in validate_xmit_xfrm()
242 struct xfrm_dev_offload *xso = &x->xso; in xfrm_dev_state_add()
247 if (xuo->flags & in xfrm_dev_state_add()
250 return -EINVAL; in xfrm_dev_state_add()
253 if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) || in xfrm_dev_state_add()
254 (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) { in xfrm_dev_state_add()
256 return -EINVAL; in xfrm_dev_state_add()
259 is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; in xfrm_dev_state_add()
262 if (x->tfcpad) { in xfrm_dev_state_add()
264 return -EINVAL; in xfrm_dev_state_add()
267 dev = dev_get_by_index(net, xuo->ifindex); in xfrm_dev_state_add()
271 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { in xfrm_dev_state_add()
272 saddr = &x->props.saddr; in xfrm_dev_state_add()
273 daddr = &x->id.daddr; in xfrm_dev_state_add()
275 saddr = &x->id.daddr; in xfrm_dev_state_add()
276 daddr = &x->props.saddr; in xfrm_dev_state_add()
284 dst = __xfrm_dst_lookup(x->props.family, ¶ms); in xfrm_dev_state_add()
286 return (is_packet_offload) ? -EINVAL : 0; in xfrm_dev_state_add()
288 dev = dst->dev; in xfrm_dev_state_add()
294 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { in xfrm_dev_state_add()
295 xso->dev = NULL; in xfrm_dev_state_add()
297 return (is_packet_offload) ? -EINVAL : 0; in xfrm_dev_state_add()
300 if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN && in xfrm_dev_state_add()
301 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { in xfrm_dev_state_add()
303 xso->dev = NULL; in xfrm_dev_state_add()
305 return -EINVAL; in xfrm_dev_state_add()
309 if (!x->type_offload) { in xfrm_dev_state_add()
312 return -EINVAL; in xfrm_dev_state_add()
315 xso->dev = dev; in xfrm_dev_state_add()
316 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); in xfrm_dev_state_add()
317 xso->real_dev = dev; in xfrm_dev_state_add()
319 if (xuo->flags & XFRM_OFFLOAD_INBOUND) in xfrm_dev_state_add()
320 xso->dir = XFRM_DEV_OFFLOAD_IN; in xfrm_dev_state_add()
322 xso->dir = XFRM_DEV_OFFLOAD_OUT; in xfrm_dev_state_add()
325 xso->type = XFRM_DEV_OFFLOAD_PACKET; in xfrm_dev_state_add()
327 xso->type = XFRM_DEV_OFFLOAD_CRYPTO; in xfrm_dev_state_add()
329 err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack); in xfrm_dev_state_add()
331 xso->dev = NULL; in xfrm_dev_state_add()
332 xso->dir = 0; in xfrm_dev_state_add()
333 xso->real_dev = NULL; in xfrm_dev_state_add()
334 netdev_put(dev, &xso->dev_tracker); in xfrm_dev_state_add()
335 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; in xfrm_dev_state_add()
342 if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) { in xfrm_dev_state_add()
356 struct xfrm_dev_offload *xdo = &xp->xdo; in xfrm_dev_policy_add()
360 if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) { in xfrm_dev_policy_add()
365 return -EINVAL; in xfrm_dev_policy_add()
368 dev = dev_get_by_index(net, xuo->ifindex); in xfrm_dev_policy_add()
370 return -EINVAL; in xfrm_dev_policy_add()
372 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) { in xfrm_dev_policy_add()
373 xdo->dev = NULL; in xfrm_dev_policy_add()
376 return -EINVAL; in xfrm_dev_policy_add()
379 xdo->dev = dev; in xfrm_dev_policy_add()
380 netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); in xfrm_dev_policy_add()
381 xdo->real_dev = dev; in xfrm_dev_policy_add()
382 xdo->type = XFRM_DEV_OFFLOAD_PACKET; in xfrm_dev_policy_add()
385 xdo->dir = XFRM_DEV_OFFLOAD_IN; in xfrm_dev_policy_add()
388 xdo->dir = XFRM_DEV_OFFLOAD_OUT; in xfrm_dev_policy_add()
391 xdo->dir = XFRM_DEV_OFFLOAD_FWD; in xfrm_dev_policy_add()
394 xdo->dev = NULL; in xfrm_dev_policy_add()
395 netdev_put(dev, &xdo->dev_tracker); in xfrm_dev_policy_add()
397 return -EINVAL; in xfrm_dev_policy_add()
400 err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack); in xfrm_dev_policy_add()
402 xdo->dev = NULL; in xfrm_dev_policy_add()
403 xdo->real_dev = NULL; in xfrm_dev_policy_add()
404 xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; in xfrm_dev_policy_add()
405 xdo->dir = 0; in xfrm_dev_policy_add()
406 netdev_put(dev, &xdo->dev_tracker); in xfrm_dev_policy_add()
420 struct net_device *dev = x->xso.dev; in xfrm_dev_offload_ok()
422 if (!x->type_offload || in xfrm_dev_offload_ok()
423 (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) in xfrm_dev_offload_ok()
426 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET || in xfrm_dev_offload_ok()
427 ((!dev || (dev == xfrm_dst_path(dst)->dev)) && in xfrm_dev_offload_ok()
428 !xdst->child->xfrm)) { in xfrm_dev_offload_ok()
429 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); in xfrm_dev_offload_ok()
430 if (skb->len <= mtu) in xfrm_dev_offload_ok()
440 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) in xfrm_dev_offload_ok()
441 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); in xfrm_dev_offload_ok()
449 struct net_device *dev = skb->dev; in xfrm_dev_resume()
466 skb_queue_tail(&sd->xfrm_backlog, skb); in xfrm_dev_resume()
476 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; in xfrm_dev_backlog()
485 spin_lock(&xfrm_backlog->lock); in xfrm_dev_backlog()
487 spin_unlock(&xfrm_backlog->lock); in xfrm_dev_backlog()
500 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && in xfrm_api_check()
501 !(dev->features & NETIF_F_HW_ESP)) in xfrm_api_check()
504 if ((dev->features & NETIF_F_HW_ESP) && in xfrm_api_check()
505 (!(dev->xfrmdev_ops && in xfrm_api_check()
506 dev->xfrmdev_ops->xdo_dev_state_add && in xfrm_api_check()
507 dev->xfrmdev_ops->xdo_dev_state_delete))) in xfrm_api_check()
510 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) in xfrm_api_check()
519 if (dev->features & NETIF_F_HW_ESP) { in xfrm_dev_down()