Lines Matching +full:px +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Florian La Roche <[email protected]-sb.de>
19 * Ray VanTassle : Fixed --skb->lock in free
153 * drop_reasons_register_subsys - register another drop reason subsystem
172 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
191 * skb_panic - private function for out-of-line support
197 * Out-of-line support for skb_put() and skb_push().
205 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", in skb_panic()
206 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
207 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
208 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
246 data = __page_frag_alloc_align(&nc->page, fragsz, in __napi_alloc_frag_align()
280 if (unlikely(!nc->skb_count)) { in napi_skb_cache_get()
281 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get()
284 nc->skb_cache); in napi_skb_cache_get()
285 if (unlikely(!nc->skb_count)) { in napi_skb_cache_get()
291 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
303 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __finalize_skb_around()
306 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
307 refcount_set(&skb->users, 1); in __finalize_skb_around()
308 skb->head = data; in __finalize_skb_around()
309 skb->data = data; in __finalize_skb_around()
312 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
313 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
314 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
318 atomic_set(&shinfo->dataref, 1); in __finalize_skb_around()
380 * __build_skb - build a network buffer
415 * takes care of skb->head and skb->pfmemalloc
422 skb->head_frag = 1; in build_skb()
430 * build_skb_around - build a network buffer around provided skb
444 skb->head_frag = 1; in build_skb_around()
452 * __napi_build_skb - build a network buffer
476 * napi_build_skb - build a network buffer
480 * Version of __napi_build_skb() that takes care of skb->head_frag
481 * and skb->pfmemalloc when the data is a page or page fragment.
490 skb->head_frag = 1; in napi_build_skb()
528 /* The following cast might truncate high-order bits of obj_size, this in kmalloc_reserve()
561 * __alloc_skb - allocate a network buffer
604 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
622 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
629 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
630 refcount_set(&fclones->fclone_ref, 1); in __alloc_skb()
642 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
665 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
707 skb->pfmemalloc = 1; in __netdev_alloc_skb()
708 skb->head_frag = 1; in __netdev_alloc_skb()
712 skb->dev = dev; in __netdev_alloc_skb()
720 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
727 * CPU cycles by avoiding having to disable and re-enable IRQs.
743 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
763 data = page_frag_alloc(&nc->page, len, gfp_mask); in napi_alloc_skb()
764 pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page); in napi_alloc_skb()
777 skb->pfmemalloc = 1; in napi_alloc_skb()
778 skb->head_frag = 1; in napi_alloc_skb()
782 skb->dev = napi->dev; in napi_alloc_skb()
795 skb->len += size; in skb_add_rx_frag_netmem()
796 skb->data_len += size; in skb_add_rx_frag_netmem()
797 skb->truesize += truesize; in skb_add_rx_frag_netmem()
804 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
809 skb->len += size; in skb_coalesce_rx_frag()
810 skb->data_len += size; in skb_coalesce_rx_frag()
811 skb->truesize += truesize; in skb_coalesce_rx_frag()
823 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
852 return -EOPNOTSUPP; in skb_pp_cow_data()
854 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); in skb_pp_cow_data()
855 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
856 return -ENOMEM; in skb_pp_cow_data()
858 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
862 return -ENOMEM; in skb_pp_cow_data()
867 return -ENOMEM; in skb_pp_cow_data()
874 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
881 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
885 len = skb->len - off; in skb_pp_cow_data()
886 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
896 return -ENOMEM; in skb_pp_cow_data()
907 len -= size; in skb_pp_cow_data()
916 return -EOPNOTSUPP; in skb_pp_cow_data()
924 if (!prog->aux->xdp_has_frags) in skb_cow_data_for_xdp()
925 return -EINVAL; in skb_cow_data_for_xdp()
936 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation in napi_pp_put_page()
955 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
961 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
966 * i.e. when skb->pp_recycle is true, and not for fragments in a
967 * non-pp-recycling skb. It has a fallback to increase references on normal
976 if (!skb->pp_recycle) in skb_pp_frag_ref()
977 return -EINVAL; in skb_pp_frag_ref()
981 for (i = 0; i < shinfo->nr_frags; i++) { in skb_pp_frag_ref()
982 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); in skb_pp_frag_ref()
1001 unsigned char *head = skb->head; in skb_free_head()
1003 if (skb->head_frag) { in skb_free_head()
1021 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; in skb_release_data()
1028 for (i = 0; i < shinfo->nr_frags; i++) in skb_release_data()
1029 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1032 if (shinfo->frag_list) in skb_release_data()
1033 kfree_skb_list_reason(shinfo->frag_list, reason); in skb_release_data()
1046 skb->pp_recycle = 0; in skb_release_data()
1056 switch (skb->fclone) { in kfree_skbmem()
1068 if (refcount_read(&fclones->fclone_ref) == 1) in kfree_skbmem()
1076 if (!refcount_dec_and_test(&fclones->fclone_ref)) in kfree_skbmem()
1085 if (skb->destructor) { in skb_release_head_state()
1087 skb->destructor(skb); in skb_release_head_state()
1099 if (likely(skb->head)) in skb_release_all()
1104 * __kfree_skb - private function
1139 * sk_skb_reason_drop - free an sk_buff with special reason
1168 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1174 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1176 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { in kfree_skb_add_bulk()
1178 sa->skb_array); in kfree_skb_add_bulk()
1179 sa->skb_count = 0; in kfree_skb_add_bulk()
1191 struct sk_buff *next = segs->next; in kfree_skb_list_reason()
1208 * Must only be called from net_ratelimit()-ed paths.
1215 struct net_device *dev = skb->dev; in skb_dump()
1216 struct sock *sk = skb->sk; in skb_dump()
1223 len = skb->len; in skb_dump()
1225 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1240 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1241 has_mac ? skb->mac_header : -1, in skb_dump()
1242 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1243 skb->mac_len, in skb_dump()
1244 skb->network_header, in skb_dump()
1245 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1246 has_trans ? skb->transport_header : -1, in skb_dump()
1247 sh->tx_flags, sh->nr_frags, in skb_dump()
1248 sh->gso_size, sh->gso_type, sh->gso_segs, in skb_dump()
1249 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1250 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1251 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1252 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1253 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1254 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1255 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1259 level, dev->name, &dev->features); in skb_dump()
1262 level, sk->sk_family, sk->sk_type, sk->sk_protocol); in skb_dump()
1266 16, 1, skb->head, headroom, false); in skb_dump()
1271 16, 1, skb->data, seg_len, false); in skb_dump()
1272 len -= seg_len; in skb_dump()
1278 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1279 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1286 len -= skb_frag_size(frag); in skb_dump()
1301 len -= seg_len; in skb_dump()
1316 * skb_tx_error - report an sk_buff xmit error
1333 * consume_skb - free an skbuff
1352 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1374 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1376 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { in napi_skb_cache_put()
1378 kasan_mempool_unpoison_object(nc->skb_cache[i], in napi_skb_cache_put()
1382 nc->skb_cache + NAPI_SKB_CACHE_HALF); in napi_skb_cache_put()
1383 nc->skb_count = NAPI_SKB_CACHE_HALF; in napi_skb_cache_put()
1396 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1401 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1408 /* Zero budget indicate non-NAPI context called us, like netpoll */ in napi_consume_skb()
1423 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1440 new->tstamp = old->tstamp; in __copy_skb_header()
1441 /* We do not copy old->sk */ in __copy_skb_header()
1442 new->dev = old->dev; in __copy_skb_header()
1443 memcpy(new->cb, old->cb, sizeof(old->cb)); in __copy_skb_header()
1451 new->queue_mapping = old->queue_mapping; in __copy_skb_header()
1453 memcpy(&new->headers, &old->headers, sizeof(new->headers)); in __copy_skb_header()
1491 #define C(x) n->x = skb->x in __skb_clone()
1493 n->next = n->prev = NULL; in __skb_clone()
1494 n->sk = NULL; in __skb_clone()
1500 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1501 n->cloned = 1; in __skb_clone()
1502 n->nohdr = 0; in __skb_clone()
1503 n->peeked = 0; in __skb_clone()
1506 n->destructor = NULL; in __skb_clone()
1513 refcount_set(&n->users, 1); in __skb_clone()
1515 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1516 skb->cloned = 1; in __skb_clone()
1523 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1534 n->len = first->len; in alloc_skb_for_msg()
1535 n->data_len = first->len; in alloc_skb_for_msg()
1536 n->truesize = first->truesize; in alloc_skb_for_msg()
1538 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg()
1541 n->destructor = NULL; in alloc_skb_for_msg()
1548 * skb_morph - morph one skb into another
1550 * @src: the skb to supply the contents
1578 user = mmp->user ? : current_user(); in mm_account_pinned_pages()
1580 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages()
1584 return -ENOBUFS; in mm_account_pinned_pages()
1585 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); in mm_account_pinned_pages()
1587 if (!mmp->user) { in mm_account_pinned_pages()
1588 mmp->user = get_uid(user); in mm_account_pinned_pages()
1589 mmp->num_pg = num_pg; in mm_account_pinned_pages()
1591 mmp->num_pg += num_pg; in mm_account_pinned_pages()
1600 if (mmp->user) { in mm_unaccount_pinned_pages()
1601 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
1602 free_uid(mmp->user); in mm_unaccount_pinned_pages()
1618 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1619 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1620 uarg->mmp.user = NULL; in msg_zerocopy_alloc()
1622 if (mm_account_pinned_pages(&uarg->mmp, size)) { in msg_zerocopy_alloc()
1627 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; in msg_zerocopy_alloc()
1628 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; in msg_zerocopy_alloc()
1629 uarg->len = 1; in msg_zerocopy_alloc()
1630 uarg->bytelen = size; in msg_zerocopy_alloc()
1631 uarg->zerocopy = 1; in msg_zerocopy_alloc()
1632 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; in msg_zerocopy_alloc()
1633 refcount_set(&uarg->ubuf.refcnt, 1); in msg_zerocopy_alloc()
1636 return &uarg->ubuf; in msg_zerocopy_alloc()
1653 if (uarg->ops != &msg_zerocopy_ubuf_ops) in msg_zerocopy_realloc()
1657 * so uarg->len and sk_zckey access is serialized in msg_zerocopy_realloc()
1665 bytelen = uarg_zc->bytelen + size; in msg_zerocopy_realloc()
1666 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { in msg_zerocopy_realloc()
1668 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1673 next = (u32)atomic_read(&sk->sk_zckey); in msg_zerocopy_realloc()
1674 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { in msg_zerocopy_realloc()
1675 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) in msg_zerocopy_realloc()
1677 uarg_zc->len++; in msg_zerocopy_realloc()
1678 uarg_zc->bytelen = bytelen; in msg_zerocopy_realloc()
1679 atomic_set(&sk->sk_zckey, ++next); in msg_zerocopy_realloc()
1682 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1700 old_lo = serr->ee.ee_info; in skb_zerocopy_notify_extend()
1701 old_hi = serr->ee.ee_data; in skb_zerocopy_notify_extend()
1702 sum_len = old_hi - old_lo + 1ULL + len; in skb_zerocopy_notify_extend()
1710 serr->ee.ee_data += len; in skb_zerocopy_notify_extend()
1718 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1725 mm_unaccount_pinned_pages(&uarg->mmp); in __msg_zerocopy_callback()
1730 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) in __msg_zerocopy_callback()
1733 len = uarg->len; in __msg_zerocopy_callback()
1734 lo = uarg->id; in __msg_zerocopy_callback()
1735 hi = uarg->id + len - 1; in __msg_zerocopy_callback()
1736 is_zerocopy = uarg->zerocopy; in __msg_zerocopy_callback()
1740 serr->ee.ee_errno = 0; in __msg_zerocopy_callback()
1741 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; in __msg_zerocopy_callback()
1742 serr->ee.ee_data = hi; in __msg_zerocopy_callback()
1743 serr->ee.ee_info = lo; in __msg_zerocopy_callback()
1745 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; in __msg_zerocopy_callback()
1747 q = &sk->sk_error_queue; in __msg_zerocopy_callback()
1748 spin_lock_irqsave(&q->lock, flags); in __msg_zerocopy_callback()
1750 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || in __msg_zerocopy_callback()
1755 spin_unlock_irqrestore(&q->lock, flags); in __msg_zerocopy_callback()
1769 uarg_zc->zerocopy = uarg_zc->zerocopy & success; in msg_zerocopy_complete()
1771 if (refcount_dec_and_test(&uarg->refcnt)) in msg_zerocopy_complete()
1777 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; in msg_zerocopy_put_abort()
1779 atomic_dec(&sk->sk_zckey); in msg_zerocopy_put_abort()
1780 uarg_to_msgzc(uarg)->len--; in msg_zerocopy_put_abort()
1796 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1798 if (uarg->ops->link_skb) { in skb_zerocopy_iter_stream()
1799 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1810 return -EEXIST; in skb_zerocopy_iter_stream()
1813 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1814 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1815 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1818 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1819 skb->sk = sk; in skb_zerocopy_iter_stream()
1821 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1826 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1834 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1848 return -ENOMEM; in skb_zerocopy_clone()
1853 return -EIO; in skb_zerocopy_clone()
1861 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1877 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1883 return -EINVAL; in skb_copy_ubufs()
1886 return -EFAULT; in skb_copy_ubufs()
1899 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1908 return -ENOMEM; in skb_copy_ubufs()
1917 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1932 copy = min_t(u32, psize - d_off, p_len - done); in skb_copy_ubufs()
1947 for (i = 0; i < new_frags - 1; i++) { in skb_copy_ubufs()
1951 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
1953 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1962 * skb_clone - duplicate an sk_buff
1985 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1986 refcount_read(&fclones->fclone_ref) == 1) { in skb_clone()
1987 n = &fclones->skb2; in skb_clone()
1988 refcount_set(&fclones->fclone_ref, 2); in skb_clone()
1989 n->fclone = SKB_FCLONE_CLONE; in skb_clone()
1998 n->fclone = SKB_FCLONE_UNAVAILABLE; in skb_clone()
2008 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2009 skb->csum_start += off; in skb_headers_offset_update()
2010 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2011 skb->transport_header += off; in skb_headers_offset_update()
2012 skb->network_header += off; in skb_headers_offset_update()
2014 skb->mac_header += off; in skb_headers_offset_update()
2015 skb->inner_transport_header += off; in skb_headers_offset_update()
2016 skb->inner_network_header += off; in skb_headers_offset_update()
2017 skb->inner_mac_header += off; in skb_headers_offset_update()
2025 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; in skb_copy_header()
2026 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; in skb_copy_header()
2027 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; in skb_copy_header()
2039 * skb_copy - create private copy of an sk_buff
2048 * As by-product this function converts non-linear &sk_buff to linear
2064 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2068 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2077 skb_put(n, skb->len); in skb_copy()
2079 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2087 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2118 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2120 n->truesize += skb->data_len; in __pskb_copy_fclone()
2121 n->data_len = skb->data_len; in __pskb_copy_fclone()
2122 n->len = skb->len; in __pskb_copy_fclone()
2124 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2134 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2137 skb_shinfo(n)->nr_frags = i; in __pskb_copy_fclone()
2141 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2152 * pskb_expand_head - reallocate header of &sk_buff
2193 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2197 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2208 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2219 off = (data + nhead) - skb->head; in pskb_expand_head()
2221 skb->head = data; in pskb_expand_head()
2222 skb->head_frag = 0; in pskb_expand_head()
2223 skb->data += off; in pskb_expand_head()
2229 skb->tail += off; in pskb_expand_head()
2231 skb->cloned = 0; in pskb_expand_head()
2232 skb->hdr_len = 0; in pskb_expand_head()
2233 skb->nohdr = 0; in pskb_expand_head()
2234 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2238 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2242 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2243 skb->truesize += size - osize; in pskb_expand_head()
2250 return -ENOMEM; in pskb_expand_head()
2259 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2275 /* Note: We plan to rework this in linux-6.4 */
2283 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2289 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2294 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2302 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", in __skb_unclone_keeptruesize()
2310 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2313 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2315 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); in __skb_unclone_keeptruesize()
2323 * skb_expand_head - reallocate header of &sk_buff
2328 * if possible; copies skb->sk to new skb as needed
2336 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2338 struct sock *sk = skb->sk; in skb_expand_head()
2361 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2362 refcount_add(delta, &sk->sk_wmem_alloc); in skb_expand_head()
2363 skb->truesize += delta; in skb_expand_head()
2374 * skb_copy_expand - copy and expand sk_buff
2405 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2409 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2418 skb_put(n, skb->len); in skb_copy_expand()
2425 head_copy_off = newheadroom - head_copy_len; in skb_copy_expand()
2428 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2429 skb->len + head_copy_len)); in skb_copy_expand()
2433 skb_headers_offset_update(n, newheadroom - oldheadroom); in skb_copy_expand()
2440 * __skb_pad - zero pad the tail of an skb
2460 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2464 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2471 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2478 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2489 * pskb_put - add data to the tail of a potentially fragmented buffer
2495 * fragmented buffer. @tail must be the last fragment of @skb -- or
2504 skb->data_len += len; in pskb_put()
2505 skb->len += len; in pskb_put()
2512 * skb_put - add data to a buffer
2524 skb->tail += len; in skb_put()
2525 skb->len += len; in skb_put()
2526 if (unlikely(skb->tail > skb->end)) in skb_put()
2533 * skb_push - add data to the start of a buffer
2543 skb->data -= len; in skb_push()
2544 skb->len += len; in skb_push()
2545 if (unlikely(skb->data < skb->head)) in skb_push()
2547 return skb->data; in skb_push()
2552 * skb_pull - remove data from the start of a buffer
2568 * skb_pull_data - remove data from the start of a buffer returning its
2580 void *data = skb->data; in skb_pull_data()
2582 if (skb->len < len) in skb_pull_data()
2592 * skb_trim - remove end from a buffer
2602 if (skb->len > len) in skb_trim()
2615 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2628 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2635 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2638 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2648 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2649 fragp = &frag->next) { in ___pskb_trim()
2650 int end = offset + frag->len; in ___pskb_trim()
2657 return -ENOMEM; in ___pskb_trim()
2659 nfrag->next = frag->next; in ___pskb_trim()
2671 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim()
2674 if (frag->next) in ___pskb_trim()
2675 skb_drop_list(&frag->next); in ___pskb_trim()
2681 skb->data_len -= skb->len - len; in ___pskb_trim()
2682 skb->len = len; in ___pskb_trim()
2684 skb->len = len; in ___pskb_trim()
2685 skb->data_len = 0; in ___pskb_trim()
2689 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2699 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2700 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2702 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2705 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2707 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2710 return -EINVAL; in pskb_trim_rcsum_slow()
2717 * __pskb_pull_tail - advance tail of skb header
2747 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2769 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2770 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2774 eat -= size; in __pskb_pull_tail()
2785 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2790 if (list->len <= eat) { in __pskb_pull_tail()
2792 eat -= list->len; in __pskb_pull_tail()
2793 list = list->next; in __pskb_pull_tail()
2797 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2799 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2802 /* Sucks! We need to fork list. :-( */ in __pskb_pull_tail()
2806 insp = list->next; in __pskb_pull_tail()
2822 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2823 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2828 clone->next = list; in __pskb_pull_tail()
2829 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2837 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2838 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2842 eat -= size; in __pskb_pull_tail()
2844 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2846 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2857 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2860 skb->tail += delta; in __pskb_pull_tail()
2861 skb->data_len -= delta; in __pskb_pull_tail()
2863 if (!skb->data_len) in __pskb_pull_tail()
2871 * skb_copy_bits - copy bits from skb to kernel buffer
2891 if (offset > (int)skb->len - len) in skb_copy_bits()
2895 if ((copy = start - offset) > 0) { in skb_copy_bits()
2899 if ((len -= copy) == 0) in skb_copy_bits()
2908 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2910 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2915 if ((copy = end - offset) > 0) { in skb_copy_bits()
2924 skb_frag_off(f) + offset - start, in skb_copy_bits()
2931 if ((len -= copy) == 0) in skb_copy_bits()
2944 end = start + frag_iter->len; in skb_copy_bits()
2945 if ((copy = end - offset) > 0) { in skb_copy_bits()
2948 if (skb_copy_bits(frag_iter, offset - start, to, copy)) in skb_copy_bits()
2950 if ((len -= copy) == 0) in skb_copy_bits()
2962 return -EFAULT; in skb_copy_bits()
2972 put_page(spd->pages[i]); in sock_spd_release()
2984 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); in linear_to_page()
2986 memcpy(page_address(pfrag->page) + pfrag->offset, in linear_to_page()
2988 *offset = pfrag->offset; in linear_to_page()
2989 pfrag->offset += *len; in linear_to_page()
2991 return pfrag->page; in linear_to_page()
2998 return spd->nr_pages && in spd_can_coalesce()
2999 spd->pages[spd->nr_pages - 1] == page && in spd_can_coalesce()
3000 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce()
3001 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce()
3013 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in spd_fill_page()
3022 spd->partial[spd->nr_pages - 1].len += *len; in spd_fill_page()
3026 spd->pages[spd->nr_pages] = page; in spd_fill_page()
3027 spd->partial[spd->nr_pages].len = *len; in spd_fill_page()
3028 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page()
3029 spd->nr_pages++; in spd_fill_page()
3046 *off -= plen; in __splice_segment()
3052 plen -= *off; in __splice_segment()
3062 plen -= flen; in __splice_segment()
3063 *len -= flen; in __splice_segment()
3081 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3085 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3086 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3099 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3100 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3112 if (*offset >= iter->len) { in __skb_splice_bits()
3113 *offset -= iter->len; in __skb_splice_bits()
3157 struct socket *sock = sk->sk_socket; in sendmsg_locked()
3161 return -EINVAL; in sendmsg_locked()
3163 if (!sock->ops->sendmsg_locked) in sendmsg_locked()
3166 return sock->ops->sendmsg_locked(sk, msg, size); in sendmsg_locked()
3171 struct socket *sock = sk->sk_socket; in sendmsg_unlocked()
3174 return -EINVAL; in sendmsg_unlocked()
3194 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3195 kv.iov_base = skb->data + offset; in __skb_send_sock()
3207 len -= ret; in __skb_send_sock()
3215 offset -= skb_headlen(skb); in __skb_send_sock()
3218 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3219 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3224 offset -= skb_frag_size(frag); in __skb_send_sock()
3227 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3228 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3230 slen = min_t(size_t, len, skb_frag_size(frag) - offset); in __skb_send_sock()
3248 len -= ret; in __skb_send_sock()
3250 slen -= ret; in __skb_send_sock()
3261 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3264 } else if (skb->next) { in __skb_send_sock()
3265 skb = skb->next; in __skb_send_sock()
3271 return orig_len - len; in __skb_send_sock()
3274 return orig_len == len ? ret : orig_len - len; in __skb_send_sock()
3292 * skb_store_bits - store bits from kernel buffer to skb
3309 if (offset > (int)skb->len - len) in skb_store_bits()
3312 if ((copy = start - offset) > 0) { in skb_store_bits()
3316 if ((len -= copy) == 0) in skb_store_bits()
3325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3326 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3332 if ((copy = end - offset) > 0) { in skb_store_bits()
3341 skb_frag_off(frag) + offset - start, in skb_store_bits()
3348 if ((len -= copy) == 0) in skb_store_bits()
3361 end = start + frag_iter->len; in skb_store_bits()
3362 if ((copy = end - offset) > 0) { in skb_store_bits()
3365 if (skb_store_bits(frag_iter, offset - start, in skb_store_bits()
3368 if ((len -= copy) == 0) in skb_store_bits()
3379 return -EFAULT; in skb_store_bits()
3388 int i, copy = start - offset; in __skb_checksum()
3396 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, in __skb_checksum()
3397 skb->data + offset, copy, csum); in __skb_checksum()
3398 if ((len -= copy) == 0) in __skb_checksum()
3407 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3409 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3414 if ((copy = end - offset) > 0) { in __skb_checksum()
3424 skb_frag_off(frag) + offset - start, in __skb_checksum()
3427 csum2 = INDIRECT_CALL_1(ops->update, in __skb_checksum()
3431 csum = INDIRECT_CALL_1(ops->combine, in __skb_checksum()
3437 if (!(len -= copy)) in __skb_checksum()
3449 end = start + frag_iter->len; in __skb_checksum()
3450 if ((copy = end - offset) > 0) { in __skb_checksum()
3454 csum2 = __skb_checksum(frag_iter, offset - start, in __skb_checksum()
3456 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, in __skb_checksum()
3458 if ((len -= copy) == 0) in __skb_checksum()
3489 int i, copy = start - offset; in skb_copy_and_csum_bits()
3498 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3500 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3510 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3515 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3516 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3517 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3527 skb_frag_off(frag) + offset - start, in skb_copy_and_csum_bits()
3538 if (!(len -= copy)) in skb_copy_and_csum_bits()
3552 end = start + frag_iter->len; in skb_copy_and_csum_bits()
3553 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3557 offset - start, in skb_copy_and_csum_bits()
3560 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3577 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3580 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3581 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3582 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3585 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3590 /* This function assumes skb->csum already holds pseudo header's checksum,
3592 * __skb_checksum_validate_complete(). And, the original skb->csum must
3595 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3596 * zero. The new checksum is stored back into skb->csum unless the skb is
3604 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3606 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3609 * re-computed checksum is valid instead, then we have a mismatch in __skb_checksum_complete()
3610 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3611 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3612 * when moving skb->data around. in __skb_checksum_complete()
3615 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3616 !skb->csum_complete_sw) in __skb_checksum_complete()
3617 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3622 skb->csum = csum; in __skb_checksum_complete()
3623 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3624 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3625 skb->csum_valid = !sum; in __skb_checksum_complete()
3659 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3670 if (!from->head_frag || in skb_zerocopy_headlen()
3672 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { in skb_zerocopy_headlen()
3675 hlen = from->len; in skb_zerocopy_headlen()
3679 hlen = from->len; in skb_zerocopy_headlen()
3686 * skb_zerocopy - Zero copy skb to skb
3700 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3701 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3707 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3712 BUG_ON(!from->head_frag && !hlen); in skb_zerocopy()
3722 len -= hlen; in skb_zerocopy()
3726 page = virt_to_head_page(from->head); in skb_zerocopy()
3727 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
3732 len -= plen; in skb_zerocopy()
3740 return -ENOMEM; in skb_zerocopy()
3744 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { in skb_zerocopy()
3749 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; in skb_zerocopy()
3750 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), in skb_zerocopy()
3752 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); in skb_zerocopy()
3753 len -= size; in skb_zerocopy()
3757 skb_shinfo(to)->nr_frags = j; in skb_zerocopy()
3768 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3778 if (csstart != skb->len) in skb_copy_and_csum_dev()
3780 skb->len - csstart); in skb_copy_and_csum_dev()
3782 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3783 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3791 * skb_dequeue - remove from the head of the queue
3804 spin_lock_irqsave(&list->lock, flags); in skb_dequeue()
3806 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue()
3812 * skb_dequeue_tail - remove from the tail of the queue
3824 spin_lock_irqsave(&list->lock, flags); in skb_dequeue_tail()
3826 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue_tail()
3832 * skb_queue_purge_reason - empty a list
3851 spin_lock_irqsave(&list->lock, flags); in skb_queue_purge_reason()
3853 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_purge_reason()
3860 * skb_rbtree_purge - empty a skb rbtree
3867 * out-of-order queue is protected by the socket lock).
3878 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3879 sum += skb->truesize; in skb_rbtree_purge()
3893 spin_lock_irqsave(&list->lock, flags); in skb_errqueue_purge()
3895 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3896 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3901 spin_unlock_irqrestore(&list->lock, flags); in skb_errqueue_purge()
3907 * skb_queue_head - queue a buffer at the list head
3921 spin_lock_irqsave(&list->lock, flags); in skb_queue_head()
3923 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_head()
3928 * skb_queue_tail - queue a buffer at the list tail
3942 spin_lock_irqsave(&list->lock, flags); in skb_queue_tail()
3944 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_tail()
3949 * skb_unlink - remove a buffer from a list
3962 spin_lock_irqsave(&list->lock, flags); in skb_unlink()
3964 spin_unlock_irqrestore(&list->lock, flags); in skb_unlink()
3969 * skb_append - append a buffer
3982 spin_lock_irqsave(&list->lock, flags); in skb_append()
3984 spin_unlock_irqrestore(&list->lock, flags); in skb_append()
3994 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3995 pos - len); in skb_split_inside_header()
3997 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3998 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4000 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4001 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4002 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4003 skb1->data_len = skb->data_len; in skb_split_inside_header()
4004 skb1->len += skb1->data_len; in skb_split_inside_header()
4005 skb->data_len = 0; in skb_split_inside_header()
4006 skb->len = len; in skb_split_inside_header()
4015 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4017 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4018 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4019 skb->len = len; in skb_split_no_header()
4020 skb->data_len = len - pos; in skb_split_no_header()
4023 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4026 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4038 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
4039 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
4040 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4041 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4045 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4048 skb_shinfo(skb1)->nr_frags = k; in skb_split_no_header()
4050 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4054 * skb_split - Split fragmented skb to two parts at length len.
4066 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4075 /* Shifting from/to a cloned skb is a no-go.
4085 * skb_shift - Shifts paged data partially from skb to another
4097 * to have non-paged data as well.
4100 * specialized skb free'er to handle frags without up-to-date nr_frags.
4107 BUG_ON(shiftlen > skb->len); in skb_shift()
4114 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4119 to = skb_shinfo(tgt)->nr_frags; in skb_shift()
4120 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4127 merge = -1; in skb_shift()
4129 merge = to - 1; in skb_shift()
4131 todo -= skb_frag_size(fragfrom); in skb_shift()
4138 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4139 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4151 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4152 if ((shiftlen == skb->len) && in skb_shift()
4153 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4159 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4163 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4164 fragto = &skb_shinfo(tgt)->frags[to]; in skb_shift()
4168 todo -= skb_frag_size(fragfrom); in skb_shift()
4188 skb_shinfo(tgt)->nr_frags = to; in skb_shift()
4191 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4192 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4195 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4200 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4201 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4202 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4204 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4210 tgt->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4211 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4213 skb_len_add(skb, -shiftlen); in skb_shift()
4220 * skb_prepare_seq_read - Prepare a sequential read of skb data
4232 st->lower_offset = from; in skb_prepare_seq_read()
4233 st->upper_offset = to; in skb_prepare_seq_read()
4234 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4235 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read()
4236 st->frag_data = NULL; in skb_prepare_seq_read()
4237 st->frag_off = 0; in skb_prepare_seq_read()
4242 * skb_seq_read - Sequentially read skb data
4263 * at the moment, state->root_skb could be replaced with
4269 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read()
4272 if (unlikely(abs_offset >= st->upper_offset)) { in skb_seq_read()
4273 if (st->frag_data) { in skb_seq_read()
4274 kunmap_atomic(st->frag_data); in skb_seq_read()
4275 st->frag_data = NULL; in skb_seq_read()
4281 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; in skb_seq_read()
4283 if (abs_offset < block_limit && !st->frag_data) { in skb_seq_read()
4284 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); in skb_seq_read()
4285 return block_limit - abs_offset; in skb_seq_read()
4288 if (!skb_frags_readable(st->cur_skb)) in skb_seq_read()
4291 if (st->frag_idx == 0 && !st->frag_data) in skb_seq_read()
4292 st->stepped_offset += skb_headlen(st->cur_skb); in skb_seq_read()
4294 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { in skb_seq_read()
4297 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; in skb_seq_read()
4304 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; in skb_seq_read()
4305 pg_off = offset_in_page(pg_off + st->frag_off); in skb_seq_read()
4306 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, in skb_seq_read()
4307 PAGE_SIZE - pg_off); in skb_seq_read()
4310 block_limit = pg_sz + st->stepped_offset; in skb_seq_read()
4312 if (!st->frag_data) in skb_seq_read()
4313 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); in skb_seq_read()
4315 *data = (u8 *)st->frag_data + pg_off + in skb_seq_read()
4316 (abs_offset - st->stepped_offset); in skb_seq_read()
4318 return block_limit - abs_offset; in skb_seq_read()
4321 if (st->frag_data) { in skb_seq_read()
4322 kunmap_atomic(st->frag_data); in skb_seq_read()
4323 st->frag_data = NULL; in skb_seq_read()
4326 st->stepped_offset += pg_sz; in skb_seq_read()
4327 st->frag_off += pg_sz; in skb_seq_read()
4328 if (st->frag_off == skb_frag_size(frag)) { in skb_seq_read()
4329 st->frag_off = 0; in skb_seq_read()
4330 st->frag_idx++; in skb_seq_read()
4334 if (st->frag_data) { in skb_seq_read()
4335 kunmap_atomic(st->frag_data); in skb_seq_read()
4336 st->frag_data = NULL; in skb_seq_read()
4339 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { in skb_seq_read()
4340 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; in skb_seq_read()
4341 st->frag_idx = 0; in skb_seq_read()
4343 } else if (st->cur_skb->next) { in skb_seq_read()
4344 st->cur_skb = st->cur_skb->next; in skb_seq_read()
4345 st->frag_idx = 0; in skb_seq_read()
4354 * skb_abort_seq_read - Abort a sequential read of skb data
4362 if (st->frag_data) in skb_abort_seq_read()
4363 kunmap_atomic(st->frag_data); in skb_abort_seq_read()
4368 * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
4379 * Return: 0 on success or -EINVAL if the copy ended early
4389 return -EINVAL; in skb_copy_seq_read()
4397 len -= sqlen; in skb_copy_seq_read()
4402 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4417 * skb_find_text - Find a text pattern in skb data
4431 unsigned int patlen = config->ops->get_pattern_len(config); in skb_find_text()
4437 config->get_next_block = skb_ts_get_next_block; in skb_find_text()
4438 config->finish = skb_ts_finish; in skb_find_text()
4443 return (ret + patlen <= to - from ? ret : UINT_MAX); in skb_find_text()
4450 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4453 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4459 return -EMSGSIZE; in skb_append_pagefrags()
4467 * skb_pull_rcsum - pull skb and update receive checksum
4479 unsigned char *data = skb->data; in skb_pull_rcsum()
4481 BUG_ON(len > skb->len); in skb_pull_rcsum()
4484 return skb->data; in skb_pull_rcsum()
4493 page = virt_to_head_page(frag_skb->head); in skb_head_frag_to_page_desc()
4494 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - in skb_head_frag_to_page_desc()
4504 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4512 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4519 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4523 list_skb = list_skb->next; in skb_segment_list()
4526 delta_truesize += nskb->truesize; in skb_segment_list()
4534 err = -ENOMEM; in skb_segment_list()
4539 skb->next = nskb; in skb_segment_list()
4541 tail->next = nskb; in skb_segment_list()
4544 nskb->next = list_skb; in skb_segment_list()
4550 delta_len += nskb->len; in skb_segment_list()
4552 skb_push(nskb, -skb_network_offset(nskb) + offset); in skb_segment_list()
4555 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4558 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4559 nskb->transport_header += len_diff; in skb_segment_list()
4560 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4561 nskb->data - tnl_hlen, in skb_segment_list()
4569 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4570 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4571 skb->len = skb->len - delta_len; in skb_segment_list()
4575 skb->prev = tail; in skb_segment_list()
4586 kfree_skb_list(skb->next); in skb_segment_list()
4587 skb->next = NULL; in skb_segment_list()
4588 return ERR_PTR(-ENOMEM); in skb_segment_list()
4593 * skb_segment - Perform protocol segmentation on skb.
4595 * @features: features for the output path (see dev->features)
4606 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; in skb_segment()
4607 unsigned int mss = skb_shinfo(head_skb)->gso_size; in skb_segment()
4608 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); in skb_segment()
4613 unsigned int len = head_skb->len; in skb_segment()
4618 int err = -ENOMEM; in skb_segment()
4622 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && in skb_segment()
4626 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { in skb_segment()
4627 if (skb_headlen(check_skb) && !check_skb->head_frag) { in skb_segment()
4646 return ERR_PTR(-EINVAL); in skb_segment()
4657 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) in skb_segment()
4668 frag_len = list_skb->len; in skb_segment()
4670 if (frag_len != iter->len && iter->next) in skb_segment()
4672 if (skb_headlen(iter) && !iter->head_frag) in skb_segment()
4675 len -= iter->len; in skb_segment()
4687 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; in skb_segment()
4699 return ERR_PTR(-ENOMEM); in skb_segment()
4701 nfrags = skb_shinfo(head_skb)->nr_frags; in skb_segment()
4702 frag = skb_shinfo(head_skb)->frags; in skb_segment()
4712 len = list_skb->len; in skb_segment()
4714 len = head_skb->len - offset; in skb_segment()
4719 hsize = skb_headlen(head_skb) - offset; in skb_segment()
4730 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4731 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4747 list_skb = list_skb->next; in skb_segment()
4760 nskb->truesize += skb_end_offset(nskb) - hsize; in skb_segment()
4781 tail->next = nskb; in skb_segment()
4788 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); in skb_segment()
4791 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, in skb_segment()
4792 nskb->data - tnl_hlen, in skb_segment()
4795 if (nskb->len == len + doffset) in skb_segment()
4800 if (!nskb->remcsum_offload) in skb_segment()
4801 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4802 SKB_GSO_CB(nskb)->csum = in skb_segment()
4807 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4816 nskb_frag = skb_shinfo(nskb)->frags; in skb_segment()
4821 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & in skb_segment()
4835 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4836 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4841 BUG_ON(!list_skb->head_frag); in skb_segment()
4844 i--; in skb_segment()
4845 frag--; in skb_segment()
4848 list_skb = list_skb->next; in skb_segment()
4851 if (unlikely(skb_shinfo(nskb)->nr_frags >= in skb_segment()
4856 err = -EINVAL; in skb_segment()
4865 skb_frag_off_add(nskb_frag, offset - pos); in skb_segment()
4866 skb_frag_size_sub(nskb_frag, offset - pos); in skb_segment()
4869 skb_shinfo(nskb)->nr_frags++; in skb_segment()
4876 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); in skb_segment()
4884 nskb->data_len = len - hsize; in skb_segment()
4885 nskb->len += nskb->data_len; in skb_segment()
4886 nskb->truesize += nskb->data_len; in skb_segment()
4894 if (!nskb->remcsum_offload) in skb_segment()
4895 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4896 SKB_GSO_CB(nskb)->csum = in skb_segment()
4898 nskb->len - doffset, 0); in skb_segment()
4899 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4902 } while ((offset += len) < head_skb->len); in skb_segment()
4905 * Put it in segs->prev to avoid walking the list. in skb_segment()
4908 segs->prev = tail; in skb_segment()
4912 int type = skb_shinfo(head_skb)->gso_type; in skb_segment()
4913 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; in skb_segment()
4922 for (iter = segs; iter; iter = iter->next) { in skb_segment()
4923 skb_shinfo(iter)->gso_size = gso_size; in skb_segment()
4924 skb_shinfo(iter)->gso_segs = partial_segs; in skb_segment()
4925 skb_shinfo(iter)->gso_type = type; in skb_segment()
4926 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; in skb_segment()
4929 if (tail->len - doffset <= gso_size) in skb_segment()
4930 skb_shinfo(tail)->gso_size = 0; in skb_segment()
4932 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); in skb_segment()
4939 if (head_skb->destructor == sock_wfree) { in skb_segment()
4940 swap(tail->truesize, head_skb->truesize); in skb_segment()
4941 swap(tail->destructor, head_skb->destructor); in skb_segment()
4942 swap(tail->sk, head_skb->sk); in skb_segment()
5008 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
5028 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5046 int i, copy = start - offset; in __skb_to_sgvec()
5051 return -EMSGSIZE; in __skb_to_sgvec()
5056 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5058 if ((len -= copy) == 0) in __skb_to_sgvec()
5063 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5068 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5069 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5070 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5071 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
5072 return -EMSGSIZE; in __skb_to_sgvec()
5077 skb_frag_off(frag) + offset - start); in __skb_to_sgvec()
5079 if (!(len -= copy)) in __skb_to_sgvec()
5091 end = start + frag_iter->len; in __skb_to_sgvec()
5092 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5093 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
5094 return -EMSGSIZE; in __skb_to_sgvec()
5098 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, in __skb_to_sgvec()
5103 if ((len -= copy) == 0) in __skb_to_sgvec()
5114 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
5116 * @sg: The scatter-gather list to map into
5120 * Fill the specified scatter-gather list with mappings/pointers into a
5122 * the number of scatterlist items used, or -EMSGSIZE if the contents
5132 sg_mark_end(&sg[nsg - 1]); in skb_to_sgvec()
5167 * skb_cow_data - Check that a socket buffer's data buffers are writable
5193 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5195 return -ENOMEM; in skb_cow_data()
5205 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5206 return -ENOMEM; in skb_cow_data()
5216 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5231 if (skb1->next == NULL && tailbits) { in skb_cow_data()
5232 if (skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5241 skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5254 return -ENOMEM; in skb_cow_data()
5256 if (skb1->sk) in skb_cow_data()
5257 skb_set_owner_w(skb2, skb1->sk); in skb_cow_data()
5262 skb2->next = skb1->next; in skb_cow_data()
5269 skb_p = &skb1->next; in skb_cow_data()
5278 struct sock *sk = skb->sk; in sock_rmem_free()
5280 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5288 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5297 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5298 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) in sock_queue_err_skb()
5299 return -ENOMEM; in sock_queue_err_skb()
5302 skb->sk = sk; in sock_queue_err_skb()
5303 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5304 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5310 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5319 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5320 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5325 struct sk_buff_head *q = &sk->sk_error_queue; in sock_dequeue_err_skb()
5333 spin_lock_irqsave(&q->lock, flags); in sock_dequeue_err_skb()
5338 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; in sock_dequeue_err_skb()
5340 spin_unlock_irqrestore(&q->lock, flags); in sock_dequeue_err_skb()
5343 sk->sk_err = 0; in sock_dequeue_err_skb()
5353 * skb_clone_sk - create clone of skb, and take reference to socket
5367 struct sock *sk = skb->sk; in skb_clone_sk()
5370 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) in skb_clone_sk()
5379 clone->sk = sk; in skb_clone_sk()
5380 clone->destructor = sock_efree; in skb_clone_sk()
5394 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5398 serr->ee.ee_errno = ENOMSG; in __skb_complete_tx_timestamp()
5399 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; in __skb_complete_tx_timestamp()
5400 serr->ee.ee_info = tstype; in __skb_complete_tx_timestamp()
5401 serr->opt_stats = opt_stats; in __skb_complete_tx_timestamp()
5402 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5403 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { in __skb_complete_tx_timestamp()
5404 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5406 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); in __skb_complete_tx_timestamp()
5419 if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) in skb_may_tx_timestamp()
5422 read_lock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5423 ret = sk->sk_socket && sk->sk_socket->file && in skb_may_tx_timestamp()
5424 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); in skb_may_tx_timestamp()
5425 read_unlock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5432 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5440 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_tx_timestamp()
5464 tsflags = READ_ONCE(sk->sk_tsflags); in __skb_tstamp_tx()
5466 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) in __skb_tstamp_tx()
5495 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5497 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5512 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, in skb_tstamp_tx()
5520 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5524 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5525 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5529 serr->ee.ee_errno = ENOMSG; in skb_complete_wifi_ack()
5530 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; in skb_complete_wifi_ack()
5535 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_wifi_ack()
5546 * skb_partial_csum_set - set up and verify partial csum values for packet
5548 * @start: the number of bytes after skb->data to start checksumming.
5551 * For untrusted partially-checksummed packets, we need to make sure the values
5552 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5554 * This function checks and sets those values and skb->ip_summed: if this
5567 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5568 skb->csum_start = csum_start; in skb_partial_csum_set()
5569 skb->csum_offset = off; in skb_partial_csum_set()
5570 skb->transport_header = csum_start; in skb_partial_csum_set()
5584 if (max > skb->len) in skb_maybe_pull_tail()
5585 max = skb->len; in skb_maybe_pull_tail()
5587 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5588 return -ENOMEM; in skb_maybe_pull_tail()
5591 return -EPROTO; in skb_maybe_pull_tail()
5611 err = -EPROTO; in skb_checksum_setup_ip()
5612 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5620 err = -EPROTO; in skb_checksum_setup_ip()
5621 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5624 return ERR_PTR(-EPROTO); in skb_checksum_setup_ip()
5652 err = -EPROTO; in skb_checksum_setup_ipv4()
5657 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5662 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5663 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5664 skb->len - off, in skb_checksum_setup_ipv4()
5665 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5699 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5701 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5717 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5732 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5748 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) in skb_checksum_setup_ipv6()
5751 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5761 err = -EPROTO; in skb_checksum_setup_ipv6()
5771 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5772 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5773 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5781 * skb_checksum_setup - set up partial checksum offset
5783 * @recalculate: if true the pseudo-header checksum will be recalculated
5789 switch (skb->protocol) { in skb_checksum_setup()
5799 err = -EPROTO; in skb_checksum_setup()
5808 * skb_checksum_maybe_trim - maybe trims the given skb
5815 * (e.g. transport_len exceeds skb length or out-of-memory).
5827 if (skb->len < len) in skb_checksum_maybe_trim()
5829 else if (skb->len == len) in skb_checksum_maybe_trim()
5846 * skb_checksum_trimmed - validate checksum of an skb
5896 skb->dev->name); in __skb_warn_lro_forwarding()
5912 * skb_try_coalesce - try to merge skb to prior one
5922 int i, delta, len = from->len; in skb_try_coalesce()
5929 /* In general, avoid mixing page_pool and non-page_pool allocated in skb_try_coalesce()
5931 * references if @from is cloned and !@to->pp_recycle but its in skb_try_coalesce()
5935 if (to->pp_recycle != from->pp_recycle) in skb_try_coalesce()
5950 if (to_shinfo->frag_list || from_shinfo->frag_list) in skb_try_coalesce()
5959 if (to_shinfo->nr_frags + in skb_try_coalesce()
5960 from_shinfo->nr_frags >= MAX_SKB_FRAGS) in skb_try_coalesce()
5966 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_try_coalesce()
5968 page = virt_to_head_page(from->head); in skb_try_coalesce()
5969 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
5971 skb_fill_page_desc(to, to_shinfo->nr_frags, in skb_try_coalesce()
5975 if (to_shinfo->nr_frags + in skb_try_coalesce()
5976 from_shinfo->nr_frags > MAX_SKB_FRAGS) in skb_try_coalesce()
5979 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); in skb_try_coalesce()
5984 memcpy(to_shinfo->frags + to_shinfo->nr_frags, in skb_try_coalesce()
5985 from_shinfo->frags, in skb_try_coalesce()
5986 from_shinfo->nr_frags * sizeof(skb_frag_t)); in skb_try_coalesce()
5987 to_shinfo->nr_frags += from_shinfo->nr_frags; in skb_try_coalesce()
5990 from_shinfo->nr_frags = 0; in skb_try_coalesce()
5996 for (i = 0; i < from_shinfo->nr_frags; i++) in skb_try_coalesce()
5997 __skb_frag_ref(&from_shinfo->frags[i]); in skb_try_coalesce()
6000 to->truesize += delta; in skb_try_coalesce()
6001 to->len += len; in skb_try_coalesce()
6002 to->data_len += len; in skb_try_coalesce()
6010 * skb_scrub_packet - scrub an skb
6024 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6025 skb->skb_iif = 0; in skb_scrub_packet()
6026 skb->ignore_df = 0; in skb_scrub_packet()
6033 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6034 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6041 skb->mark = 0; in skb_scrub_packet()
6056 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6059 mac_len - VLAN_HLEN - ETH_TLEN); in skb_reorder_vlan_header()
6064 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6068 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6078 /* vlan_tci is already set-up so leave this for another time */ in skb_vlan_untag()
6089 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6090 vlan_tci = ntohs(vhdr->h_vlan_TCI); in skb_vlan_untag()
6091 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6116 return -ENOMEM; in skb_ensure_writable()
6119 return -EFAULT; in skb_ensure_writable()
6130 int needed_headroom = dev->needed_headroom; in skb_ensure_writable_head_tail()
6131 int needed_tailroom = dev->needed_tailroom; in skb_ensure_writable_head_tail()
6138 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6139 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6141 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6142 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6158 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6162 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6164 return -EINVAL; in __skb_vlan_pop()
6171 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6175 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6187 * Expects skb->data at mac header.
6198 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6206 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6209 vlan_proto = skb->protocol; in skb_vlan_pop()
6220 * Expects skb->data at mac header.
6225 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6229 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6231 return -EINVAL; in skb_vlan_push()
6234 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6239 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6240 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6242 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6250 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6256 * Expects that skb->data points to the mac header and that no VLAN tags are
6259 * Returns 0 on success, -errno otherwise.
6265 return -EPROTO; in skb_eth_pop()
6276 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6284 * Expects that skb->data points to the mac header, which must be empty.
6286 * Returns 0 on success, -errno otherwise.
6295 return -EPROTO; in skb_eth_push()
6306 ether_addr_copy(eth->h_dest, dst); in skb_eth_push()
6307 ether_addr_copy(eth->h_source, src); in skb_eth_push()
6308 eth->h_proto = skb->protocol; in skb_eth_push()
6320 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6321 __be16 diff[] = { ~hdr->h_proto, ethertype }; in skb_mod_eth_type()
6323 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6326 hdr->h_proto = ethertype; in skb_mod_eth_type()
6330 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6340 * Expects skb->data at mac header.
6342 * Returns 0 on success, -errno otherwise.
6351 return -EINVAL; in skb_mpls_push()
6354 if (skb->encapsulation) in skb_mpls_push()
6355 return -EINVAL; in skb_mpls_push()
6361 if (!skb->inner_protocol) { in skb_mpls_push()
6363 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6367 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6374 lse->label_stack_entry = mpls_lse; in skb_mpls_push()
6379 skb->protocol = mpls_proto; in skb_mpls_push()
6386 * skb_mpls_pop() - pop the outermost MPLS header
6393 * Expects skb->data at mac header.
6395 * Returns 0 on success, -errno otherwise.
6402 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6421 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6424 skb->protocol = next_proto; in skb_mpls_pop()
6431 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6436 * Expects skb->data at mac header.
6438 * Returns 0 on success, -errno otherwise.
6444 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6445 return -EINVAL; in skb_mpls_update_lse()
6447 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6451 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6452 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6454 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6457 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6464 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6468 * Expects skb->data at mac header.
6470 * Returns 0 on success, -errno otherwise.
6477 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6478 return -EINVAL; in skb_mpls_dec_ttl()
6481 return -ENOMEM; in skb_mpls_dec_ttl()
6483 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6485 if (!--ttl) in skb_mpls_dec_ttl()
6486 return -EINVAL; in skb_mpls_dec_ttl()
6496 * alloc_skb_with_frags - allocate skb with page frags
6517 *errcode = -EMSGSIZE; in alloc_skb_with_frags()
6521 *errcode = -ENOBUFS; in alloc_skb_with_frags()
6527 if (nr_frags == MAX_SKB_FRAGS - 1) in alloc_skb_with_frags()
6530 order--; in alloc_skb_with_frags()
6538 order--; in alloc_skb_with_frags()
6550 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6551 data_len -= chunk; in alloc_skb_with_frags()
6567 int new_hlen = headlen - off; in pskb_carve_inside_header()
6575 return -ENOMEM; in pskb_carve_inside_header()
6580 skb->len -= off; in pskb_carve_inside_header()
6585 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6590 return -ENOMEM; in pskb_carve_inside_header()
6592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6598 /* we can reuse existing recount- all we did was in pskb_carve_inside_header()
6604 skb->head = data; in pskb_carve_inside_header()
6605 skb->data = data; in pskb_carve_inside_header()
6606 skb->head_frag = 0; in pskb_carve_inside_header()
6610 skb->cloned = 0; in pskb_carve_inside_header()
6611 skb->hdr_len = 0; in pskb_carve_inside_header()
6612 skb->nohdr = 0; in pskb_carve_inside_header()
6613 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6627 struct sk_buff *list = shinfo->frag_list; in pskb_carve_frag_list()
6634 return -EFAULT; in pskb_carve_frag_list()
6636 if (list->len <= eat) { in pskb_carve_frag_list()
6638 eat -= list->len; in pskb_carve_frag_list()
6639 list = list->next; in pskb_carve_frag_list()
6646 return -ENOMEM; in pskb_carve_frag_list()
6647 insp = list->next; in pskb_carve_frag_list()
6655 return -ENOMEM; in pskb_carve_frag_list()
6662 while ((list = shinfo->frag_list) != insp) { in pskb_carve_frag_list()
6663 shinfo->frag_list = list->next; in pskb_carve_frag_list()
6668 clone->next = list; in pskb_carve_frag_list()
6669 shinfo->frag_list = clone; in pskb_carve_frag_list()
6675 * non-linear part of skb
6683 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6691 return -ENOMEM; in pskb_carve_inside_nonlinear()
6698 return -ENOMEM; in pskb_carve_inside_nonlinear()
6702 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6705 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6716 skb_frag_off_add(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6717 skb_frag_size_sub(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6724 shinfo->nr_frags = k; in pskb_carve_inside_nonlinear()
6729 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6730 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ in pskb_carve_inside_nonlinear()
6732 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6734 return -ENOMEM; in pskb_carve_inside_nonlinear()
6738 skb->head = data; in pskb_carve_inside_nonlinear()
6739 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6740 skb->data = data; in pskb_carve_inside_nonlinear()
6744 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6745 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6746 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6747 skb->len -= off; in pskb_carve_inside_nonlinear()
6748 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6749 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6785 * skb_condense - try to get rid of fragments/frag_list if possible
6789 * If packet has bytes in frags and enough tail room in skb->head,
6793 * We do not reallocate skb->head thus can not fail.
6794 * Caller must re-evaluate skb->truesize if needed.
6798 if (skb->data_len) { in skb_condense()
6799 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6804 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6806 /* At this point, skb->truesize might be over estimated, in skb_condense()
6809 * When we pulled its content into skb->head, fragment in skb_condense()
6811 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6813 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6820 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); in skb_ext_get_ptr()
6824 * __skb_ext_alloc - allocate a new skb extensions storage
6837 memset(new->offset, 0, sizeof(new->offset)); in __skb_ext_alloc()
6838 refcount_set(&new->refcnt, 1); in __skb_ext_alloc()
6849 if (refcount_read(&old->refcnt) == 1) in skb_ext_maybe_cow()
6856 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); in skb_ext_maybe_cow()
6857 refcount_set(&new->refcnt, 1); in skb_ext_maybe_cow()
6864 for (i = 0; i < sp->len; i++) in skb_ext_maybe_cow()
6865 xfrm_state_hold(sp->xvec[i]); in skb_ext_maybe_cow()
6872 if (flow->key) in skb_ext_maybe_cow()
6873 refcount_inc(&flow->key->refs); in skb_ext_maybe_cow()
6881 * __skb_ext_set - attach the specified extension storage to this skb
6897 ext->chunks = newlen; in __skb_ext_set()
6898 ext->offset[id] = newoff; in __skb_ext_set()
6899 skb->extensions = ext; in __skb_ext_set()
6900 skb->active_extensions = 1 << id; in __skb_ext_set()
6905 * skb_ext_add - allocate space for given extension, COW if needed
6923 if (skb->active_extensions) { in skb_ext_add()
6924 old = skb->extensions; in skb_ext_add()
6926 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6933 newoff = new->chunks; in skb_ext_add()
6943 new->chunks = newlen; in skb_ext_add()
6944 new->offset[id] = newoff; in skb_ext_add()
6946 skb->slow_gro = 1; in skb_ext_add()
6947 skb->extensions = new; in skb_ext_add()
6948 skb->active_extensions |= 1 << id; in skb_ext_add()
6958 for (i = 0; i < sp->len; i++) in skb_ext_put_sp()
6959 xfrm_state_put(sp->xvec[i]); in skb_ext_put_sp()
6966 if (flow->key) in skb_ext_put_mctp()
6967 mctp_key_unref(flow->key); in skb_ext_put_mctp()
6973 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6975 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6976 if (skb->active_extensions == 0) { in __skb_ext_del()
6977 skb->extensions = NULL; in __skb_ext_del()
6981 refcount_read(&ext->refcnt) == 1) { in __skb_ext_del()
6985 sp->len = 0; in __skb_ext_del()
6996 if (refcount_read(&ext->refcnt) == 1) in __skb_ext_put()
6999 if (!refcount_dec_and_test(&ext->refcnt)) in __skb_ext_put()
7019 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7030 * skb_attempt_defer_free - queue skb for remote freeing
7033 * Put @skb in a per-cpu list, using the cpu which
7039 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7052 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7056 if (READ_ONCE(sd->defer_count) >= defer_max) in skb_attempt_defer_free()
7059 spin_lock_bh(&sd->defer_lock); in skb_attempt_defer_free()
7061 kick = sd->defer_count == (defer_max >> 1); in skb_attempt_defer_free()
7063 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); in skb_attempt_defer_free()
7065 skb->next = sd->defer_list; in skb_attempt_defer_free()
7067 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()
7068 spin_unlock_bh(&sd->defer_lock); in skb_attempt_defer_free()
7086 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7090 * skb_splice_from_iter - Splice (or copy) pages to skbuff
7101 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
7112 while (iter->count > 0) { in skb_splice_from_iter()
7116 ret = -EMSGSIZE; in skb_splice_from_iter()
7117 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7126 ret = len ?: -EIO; in skb_splice_from_iter()
7133 size_t part = min_t(size_t, PAGE_SIZE - off, len); in skb_splice_from_iter()
7135 ret = -EIO; in skb_splice_from_iter()
7146 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7151 maxsize -= part; in skb_splice_from_iter()
7152 len -= part; in skb_splice_from_iter()
7192 if (WARN_ON_ONCE(!i->data_source)) in csum_and_copy_from_iter_full()