Lines Matching +full:conntrack +full:- +full:related

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
7 * Florian La Roche, <[email protected]-sb.de>
28 #include <linux/dma-mapping.h>
40 #include <net/dropreason-core.h>
49 * IP checksum related features
57 * .. flat-table:: Checksum related device features
60 * * - %NETIF_F_HW_CSUM
61 * - The driver (or its device) is able to compute one
67 * * - %NETIF_F_IP_CSUM
68 * - Driver (device) is only able to checksum plain
77 * * - %NETIF_F_IPV6_CSUM
78 * - Driver (device) is only able to checksum plain
88 * * - %NETIF_F_RXCSUM
89 * - Driver (device) performs receive checksum offload.
101 * - %CHECKSUM_NONE
105 * not in skb->csum. Thus, skb->csum is undefined in this case.
107 * - %CHECKSUM_UNNECESSARY
118 * - TCP: IPv6 and IPv4.
119 * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
122 * - GRE: only if the checksum is present in the header.
123 * - SCTP: indicates the CRC in SCTP header has been validated.
124 * - FCOE: indicates the CRC in FC frame has been validated.
128 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
133 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
136 * - %CHECKSUM_COMPLETE
144 * - Even if device supports only some protocols, but is able to produce
145 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
146 * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
148 * - %CHECKSUM_PARTIAL
155 * referred to by skb->csum_start + skb->csum_offset and any preceding
160 * Checksumming on transmit for non-GSO
166 * - %CHECKSUM_PARTIAL
174 * checksum refers to a legitimate transport layer checksum -- it is the
192 * - %CHECKSUM_NONE
197 * - %CHECKSUM_UNNECESSARY
202 * - %CHECKSUM_COMPLETE
209 * Non-IP checksum (CRC) offloads
212 * .. flat-table::
215 * * - %NETIF_F_SCTP_CRC
216 * - This feature indicates that a device is capable of
226 * * - %NETIF_F_FCOE_CRC
227 * - This feature indicates that a device is capable of offloading the FCOE
253 /* Maximum value in skb->csum_level */
258 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
260 /* For X bytes available in skb->head, what is the minimal
268 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
302 /* always valid & non-NULL from FORWARD on, for physdev match */
357 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
369 * skb_frag_size() - Returns the size of a skb fragment
374 return frag->len; in skb_frag_size()
378 * skb_frag_size_set() - Sets the size of a skb fragment
384 frag->len = size; in skb_frag_size_set()
388 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
394 frag->len += delta; in skb_frag_size_add()
398 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
404 frag->len -= delta; in skb_frag_size_sub()
408 * skb_frag_must_loop - Test if %p is a high memory page
421 * skb_frag_foreach_page - loop over pages in a fragment
424 * @f_off: offset from start of f->netmem
428 * non-zero only on first page.
433 * A fragment can hold a compound page, in which case per-page
439 p_off = (f_off) & (PAGE_SIZE - 1), \
441 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
445 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
448 * struct skb_shared_hwtstamps - hardware time stamps
455 * skb->tstamp.
572 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
589 * the end of the header data, ie. at skb->end.
636 * The higher 16 bits indicate how many of the references are payload-only.
652 * doing the right thing. In practice there's usually only one payload-only skb.
653 * Having multiple payload-only skbs with different lengths of hdr_len is not
654 * possible. The payload-only skbs should never leave their owner.
657 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
734 * - data buffer, containing headers and sometimes payload;
737 * - shared info (struct skb_shared_info) which holds an array of pointers
738 * to read-only data in the (page, offset, length) format.
744 * ---------------
746 * ---------------
747 * ,--------------------------- + head
748 * / ,----------------- + data
749 * / / ,----------- + tail
753 * -----------------------------------------------
755 * -----------------------------------------------
759 * + [page frag] ---------
760 * + frag_list --> | sk_buff |
761 * ---------
766 * struct sk_buff - socket buffer
785 * @csum_start: Offset from skb->head where checksumming should start
798 * @offload_fwd_mark: Packet was L2-forwarded in hardware
799 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
804 * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
810 * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
825 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
844 * @tstamp_type: When set, skb->tstamp has the
845 * delivery_time clock base of skb->tstamp.
858 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
871 * @users: User count - see {datagram,tcp}.c
1090 /* only usable after checking ->active_extensions != 0 */
1126 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
1131 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
1142 * skb_dst - returns skb dst_entry
1152 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
1155 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
1159 * skb_dst_set - sets skb dst
1168 skb->slow_gro |= !!dst; in skb_dst_set()
1169 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
1173 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
1185 skb->slow_gro |= !!dst; in skb_dst_set_noref()
1186 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; in skb_dst_set_noref()
1190 * skb_dst_is_noref - Test if skb dst isn't refcounted
1195 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); in skb_dst_is_noref()
1198 /* For mangling skb->pkt_type from user space side from applications
1208 * skb_napi_id - Returns the skb's NAPI id
1214 return skb->napi_id; in skb_napi_id()
1223 return skb->wifi_acked_valid; in skb_wifi_acked_valid()
1230 * skb_unref - decrement the skb's reference count
1239 if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) in skb_unref()
1241 else if (likely(!refcount_dec_and_test(&skb->users))) in skb_unref()
1252 if (!skb->cloned) in skb_data_unref()
1255 bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; in skb_data_unref()
1257 if (atomic_read(&shinfo->dataref) == bias) in skb_data_unref()
1259 else if (atomic_sub_return(bias, &shinfo->dataref)) in skb_data_unref()
1275 * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
1322 * alloc_skb - allocate a network buffer
1351 * skb_fclone_busy - check if fclone is busy
1366 return skb->fclone == SKB_FCLONE_ORIG && in skb_fclone_busy()
1367 refcount_read(&fclones->fclone_ref) > 1 && in skb_fclone_busy()
1368 READ_ONCE(fclones->skb2.sk) == sk; in skb_fclone_busy()
1372 * alloc_skb_fclone - allocate a network buffer from fclone cache
1412 * skb_pad - zero pad the tail of an skb
1458 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1487 skb->hash = 0; in skb_clear_hash()
1488 skb->sw_hash = 0; in skb_clear_hash()
1489 skb->l4_hash = 0; in skb_clear_hash()
1494 if (!skb->l4_hash) in skb_clear_hash_if_not_l4()
1501 skb->l4_hash = is_l4; in __skb_set_hash()
1502 skb->sw_hash = is_sw; in __skb_set_hash()
1503 skb->hash = hash; in __skb_set_hash()
1607 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash_net()
1610 return skb->hash; in skb_get_hash_net()
1615 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash()
1618 return skb->hash; in skb_get_hash()
1623 if (!skb->l4_hash && !skb->sw_hash) { in skb_get_hash_flowi6()
1630 return skb->hash; in skb_get_hash_flowi6()
1638 return skb->hash; in skb_get_hash_raw()
1643 to->hash = from->hash; in skb_copy_hash()
1644 to->sw_hash = from->sw_hash; in skb_copy_hash()
1645 to->l4_hash = from->l4_hash; in skb_copy_hash()
1652 return skb2->decrypted - skb1->decrypted; in skb_cmp_decrypted()
1661 return skb->decrypted; in skb_is_decrypted()
1671 to->decrypted = from->decrypted; in skb_copy_decrypted()
1678 return skb->head + skb->end; in skb_end_pointer()
1683 return skb->end; in skb_end_offset()
1688 skb->end = offset; in skb_set_end_offset()
1693 return skb->end; in skb_end_pointer()
1698 return skb->end - skb->head; in skb_end_offset()
1703 skb->end = skb->head + offset; in skb_set_end_offset()
1724 return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1736 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps()
1741 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; in skb_zcopy()
1748 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; in skb_zcopy_pure()
1753 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; in skb_zcopy_managed()
1764 refcount_inc(&uarg->refcnt); in net_zcopy_get()
1769 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_init()
1770 skb_shinfo(skb)->flags |= uarg->flags; in skb_zcopy_init()
1787 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg()
1788 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg()
1793 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; in skb_zcopy_is_nouarg()
1798 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); in skb_zcopy_get_nouarg()
1804 uarg->ops->complete(NULL, uarg, true); in net_zcopy_put()
1810 if (uarg->ops == &msg_zerocopy_ubuf_ops) in net_zcopy_put_abort()
1824 uarg->ops->complete(skb, uarg, zerocopy_success); in skb_zcopy_clear()
1826 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; in skb_zcopy_clear()
1841 return !skb->unreadable; in skb_frags_readable()
1846 skb->next = NULL; in skb_mark_not_on_list()
1852 skb->next = SKB_LIST_POISON_NEXT; in skb_poison_list()
1856 /* Iterate through singly-linked GSO fragments of an skb. */
1858 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1859 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1863 __list_del_entry(&skb->list); in skb_list_del_init()
1868 * skb_queue_empty - check if a queue is empty
1875 return list->next == (const struct sk_buff *) list; in skb_queue_empty()
1879 * skb_queue_empty_lockless - check if a queue is empty
1887 return READ_ONCE(list->next) == (const struct sk_buff *) list; in skb_queue_empty_lockless()
1892 * skb_queue_is_last - check if skb is the last entry in the queue
1901 return skb->next == (const struct sk_buff *) list; in skb_queue_is_last()
1905 * skb_queue_is_first - check if skb is the first entry in the queue
1914 return skb->prev == (const struct sk_buff *) list; in skb_queue_is_first()
1918 * skb_queue_next - return the next packet in the queue
1932 return skb->next; in skb_queue_next()
1936 * skb_queue_prev - return the prev packet in the queue
1950 return skb->prev; in skb_queue_prev()
1954 * skb_get - reference buffer
1962 refcount_inc(&skb->users); in skb_get()
1971 * skb_cloned - is the buffer a clone
1980 return skb->cloned && in skb_cloned()
1981 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; in skb_cloned()
1994 /* This variant of skb_unclone() makes sure skb->truesize
1995 * and skb_end_offset() are not changed, whenever a new skb->head is needed.
2011 * skb_header_cloned - is the header a clone
2021 if (!skb->cloned) in skb_header_cloned()
2024 dataref = atomic_read(&skb_shinfo(skb)->dataref); in skb_header_cloned()
2025 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); in skb_header_cloned()
2040 * __skb_header_release() - allow clones to use the headroom
2047 skb->nohdr = 1; in __skb_header_release()
2048 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); in __skb_header_release()
2053 * skb_shared - is the buffer shared
2061 return refcount_read(&skb->users) != 1; in skb_shared()
2065 * skb_share_check - check if buffer is shared and if so clone it
2100 * skb_unshare - make a copy of a shared buffer
2130 * skb_peek - peek at the head of an &sk_buff_head
2144 struct sk_buff *skb = list_->next; in skb_peek()
2152 * __skb_peek - peek at the head of a non-empty &sk_buff_head
2159 return list_->next; in __skb_peek()
2163 * skb_peek_next - peek skb following the given one from a queue
2174 struct sk_buff *next = skb->next; in skb_peek_next()
2182 * skb_peek_tail - peek at the tail of an &sk_buff_head
2196 struct sk_buff *skb = READ_ONCE(list_->prev); in skb_peek_tail()
2205 * skb_queue_len - get queue length
2212 return list_->qlen; in skb_queue_len()
2216 * skb_queue_len_lockless - get queue length
2224 return READ_ONCE(list_->qlen); in skb_queue_len_lockless()
2228 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
2234 * the spinlock. It can also be used for on-stack sk_buff_head
2239 list->prev = list->next = (struct sk_buff *)list; in __skb_queue_head_init()
2240 list->qlen = 0; in __skb_queue_head_init()
2245 * this is needed for now since a whole lot of users of the skb-queue
2253 spin_lock_init(&list->lock); in skb_queue_head_init()
2261 lockdep_set_class(&list->lock, class); in skb_queue_head_init_class()
2267 * The "__skb_xxxx()" functions are the non-atomic ones that
2277 WRITE_ONCE(newsk->next, next); in __skb_insert()
2278 WRITE_ONCE(newsk->prev, prev); in __skb_insert()
2279 WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk); in __skb_insert()
2280 WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk); in __skb_insert()
2281 WRITE_ONCE(list->qlen, list->qlen + 1); in __skb_insert()
2288 struct sk_buff *first = list->next; in __skb_queue_splice()
2289 struct sk_buff *last = list->prev; in __skb_queue_splice()
2291 WRITE_ONCE(first->prev, prev); in __skb_queue_splice()
2292 WRITE_ONCE(prev->next, first); in __skb_queue_splice()
2294 WRITE_ONCE(last->next, next); in __skb_queue_splice()
2295 WRITE_ONCE(next->prev, last); in __skb_queue_splice()
2299 * skb_queue_splice - join two skb lists, this is designed for stacks
2307 __skb_queue_splice(list, (struct sk_buff *) head, head->next); in skb_queue_splice()
2308 head->qlen += list->qlen; in skb_queue_splice()
2313 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
2323 __skb_queue_splice(list, (struct sk_buff *) head, head->next); in skb_queue_splice_init()
2324 head->qlen += list->qlen; in skb_queue_splice_init()
2330 * skb_queue_splice_tail - join two skb lists, each list being a queue
2338 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); in skb_queue_splice_tail()
2339 head->qlen += list->qlen; in skb_queue_splice_tail()
2344 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
2355 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); in skb_queue_splice_tail_init()
2356 head->qlen += list->qlen; in skb_queue_splice_tail_init()
2362 * __skb_queue_after - queue a buffer at the list head
2376 __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list); in __skb_queue_after()
2386 __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list); in __skb_queue_before()
2390 * __skb_queue_head - queue a buffer at the list head
2407 * __skb_queue_tail - queue a buffer at the list tail
2432 WRITE_ONCE(list->qlen, list->qlen - 1); in __skb_unlink()
2433 next = skb->next; in __skb_unlink()
2434 prev = skb->prev; in __skb_unlink()
2435 skb->next = skb->prev = NULL; in __skb_unlink()
2436 WRITE_ONCE(next->prev, prev); in __skb_unlink()
2437 WRITE_ONCE(prev->next, next); in __skb_unlink()
2441 * __skb_dequeue - remove from the head of the queue
2458 * __skb_dequeue_tail - remove from the tail of the queue
2477 return skb->data_len; in skb_is_nonlinear()
2482 return skb->len - skb->data_len; in skb_headlen()
2489 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) in __skb_pagelen()
2490 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_pagelen()
2503 frag->netmem = netmem; in skb_frag_fill_netmem_desc()
2504 frag->offset = off; in skb_frag_fill_netmem_desc()
2519 skb_frag_t *frag = &shinfo->frags[i]; in __skb_fill_netmem_desc_noacc()
2533 * skb_len_add - adds a number to len fields of skb
2539 skb->len += delta; in skb_len_add()
2540 skb->data_len += delta; in skb_len_add()
2541 skb->truesize += delta; in skb_len_add()
2545 * __skb_fill_netmem_desc - initialise a fragment in an skb
2565 skb->unreadable = true; in __skb_fill_netmem_desc()
2577 skb->pfmemalloc = true; in __skb_fill_netmem_desc()
2590 skb_shinfo(skb)->nr_frags = i + 1; in skb_fill_netmem_desc()
2594 * skb_fill_page_desc - initialise a paged fragment in an skb
2601 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
2614 * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
2631 shinfo->nr_frags = i + 1; in skb_fill_page_desc_noacc()
2653 return skb->head + skb->tail; in skb_tail_pointer()
2658 skb->tail = skb->data - skb->head; in skb_reset_tail_pointer()
2664 skb->tail += offset; in skb_set_tail_pointer()
2670 return skb->tail; in skb_tail_pointer()
2675 skb->tail = skb->data; in skb_reset_tail_pointer()
2680 skb->tail = skb->data + offset; in skb_set_tail_pointer()
2688 if (WARN_ONCE(!skb->len, "%s\n", __func__)) in skb_assert_len()
2708 skb->tail += len; in __skb_put()
2709 skb->len += len; in __skb_put()
2764 skb->data -= len; in __skb_push()
2765 skb->len += len; in __skb_push()
2766 return skb->data; in __skb_push()
2774 skb->len -= len; in __skb_pull()
2775 if (unlikely(skb->len < skb->data_len)) { in __skb_pull()
2777 skb->len += len; in __skb_pull()
2783 return skb->data += len; in __skb_pull()
2788 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); in skb_pull_inline()
2804 if (unlikely(len > skb->len)) in pskb_may_pull_reason()
2807 if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb)))) in pskb_may_pull_reason()
2823 skb->len -= len; in pskb_pull()
2824 return skb->data += len; in pskb_pull()
2830 * skb_headroom - bytes at buffer head
2837 return skb->data - skb->head; in skb_headroom()
2841 * skb_tailroom - bytes at buffer end
2848 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; in skb_tailroom()
2852 * skb_availroom - bytes at buffer end
2863 return skb->end - skb->tail - skb->reserved_tailroom; in skb_availroom()
2867 * skb_reserve - adjust headroom
2876 skb->data += len; in skb_reserve()
2877 skb->tail += len; in skb_reserve()
2881 * skb_tailroom_reserve - adjust reserved_tailroom
2896 if (mtu < skb_tailroom(skb) - needed_tailroom) in skb_tailroom_reserve()
2898 skb->reserved_tailroom = skb_tailroom(skb) - mtu; in skb_tailroom_reserve()
2901 skb->reserved_tailroom = needed_tailroom; in skb_tailroom_reserve()
2910 skb->inner_protocol = protocol; in skb_set_inner_protocol()
2911 skb->inner_protocol_type = ENCAP_TYPE_ETHER; in skb_set_inner_protocol()
2917 skb->inner_ipproto = ipproto; in skb_set_inner_ipproto()
2918 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; in skb_set_inner_ipproto()
2923 skb->inner_mac_header = skb->mac_header; in skb_reset_inner_headers()
2924 skb->inner_network_header = skb->network_header; in skb_reset_inner_headers()
2925 skb->inner_transport_header = skb->transport_header; in skb_reset_inner_headers()
2930 return skb->mac_header != (typeof(skb->mac_header))~0U; in skb_mac_header_was_set()
2937 skb->mac_len = 0; in skb_reset_mac_len()
2939 skb->mac_len = skb->network_header - skb->mac_header; in skb_reset_mac_len()
2946 return skb->head + skb->inner_transport_header; in skb_inner_transport_header()
2951 return skb_inner_transport_header(skb) - skb->data; in skb_inner_transport_offset()
2956 long offset = skb->data - skb->head; in skb_reset_inner_transport_header()
2958 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); in skb_reset_inner_transport_header()
2959 skb->inner_transport_header = offset; in skb_reset_inner_transport_header()
2966 skb->inner_transport_header += offset; in skb_set_inner_transport_header()
2971 return skb->head + skb->inner_network_header; in skb_inner_network_header()
2976 long offset = skb->data - skb->head; in skb_reset_inner_network_header()
2978 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); in skb_reset_inner_network_header()
2979 skb->inner_network_header = offset; in skb_reset_inner_network_header()
2986 skb->inner_network_header += offset; in skb_set_inner_network_header()
2991 return skb->inner_network_header > 0; in skb_inner_network_header_was_set()
2996 return skb->head + skb->inner_mac_header; in skb_inner_mac_header()
3001 long offset = skb->data - skb->head; in skb_reset_inner_mac_header()
3003 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); in skb_reset_inner_mac_header()
3004 skb->inner_mac_header = offset; in skb_reset_inner_mac_header()
3011 skb->inner_mac_header += offset; in skb_set_inner_mac_header()
3015 return skb->transport_header != (typeof(skb->transport_header))~0U; in skb_transport_header_was_set()
3021 return skb->head + skb->transport_header; in skb_transport_header()
3026 long offset = skb->data - skb->head; in skb_reset_transport_header()
3028 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); in skb_reset_transport_header()
3029 skb->transport_header = offset; in skb_reset_transport_header()
3036 skb->transport_header += offset; in skb_set_transport_header()
3041 return skb->head + skb->network_header; in skb_network_header()
3046 long offset = skb->data - skb->head; in skb_reset_network_header()
3048 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); in skb_reset_network_header()
3049 skb->network_header = offset; in skb_reset_network_header()
3055 skb->network_header += offset; in skb_set_network_header()
3061 return skb->head + skb->mac_header; in skb_mac_header()
3066 return skb_mac_header(skb) - skb->data; in skb_mac_offset()
3072 return skb->network_header - skb->mac_header; in skb_mac_header_len()
3077 skb->mac_header = (typeof(skb->mac_header))~0U; in skb_unset_mac_header()
3082 long offset = skb->data - skb->head; in skb_reset_mac_header()
3084 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); in skb_reset_mac_header()
3085 skb->mac_header = offset; in skb_reset_mac_header()
3091 skb->mac_header += offset; in skb_set_mac_header()
3096 skb->mac_header = skb->network_header; in skb_pop_mac_header()
3116 skb_set_mac_header(skb, -skb->mac_len); in skb_mac_header_rebuild()
3117 memmove(skb_mac_header(skb), old_mac, skb->mac_len); in skb_mac_header_rebuild()
3122 * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
3130 skb_set_mac_header(skb, -full_mac_len); in skb_mac_header_rebuild_full()
3132 __skb_push(skb, full_mac_len - skb->mac_len); in skb_mac_header_rebuild_full()
3138 return skb->csum_start - skb_headroom(skb); in skb_checksum_start_offset()
3143 return skb->head + skb->csum_start; in skb_checksum_start()
3148 return skb_transport_header(skb) - skb->data; in skb_transport_offset()
3154 return skb->transport_header - skb->network_header; in skb_network_header_len()
3159 return skb->inner_transport_header - skb->inner_network_header; in skb_inner_network_header_len()
3164 return skb_network_header(skb) - skb->data; in skb_network_offset()
3169 return skb_inner_network_header(skb) - skb->data; in skb_inner_network_offset()
3237 skb->len = len; in __skb_set_length()
3250 if (skb->data_len) in __pskb_trim()
3259 return (len < skb->len) ? __pskb_trim(skb, len) : 0; in pskb_trim()
3263 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
3268 * the skb is not cloned so we should never get an error due to out-
3269 * of-memory.
3279 unsigned int diff = len - skb->len; in __skb_grow()
3282 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), in __skb_grow()
3292 * skb_orphan - orphan a buffer
3301 if (skb->destructor) { in skb_orphan()
3302 skb->destructor(skb); in skb_orphan()
3303 skb->destructor = NULL; in skb_orphan()
3304 skb->sk = NULL; in skb_orphan()
3306 BUG_ON(skb->sk); in skb_orphan()
3311 * skb_orphan_frags - orphan the frags contained in a buffer
3323 if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) in skb_orphan_frags()
3337 * __skb_queue_purge_reason - empty a list
3373 * netdev_alloc_frag - allocate a page fragment
3388 return __netdev_alloc_frag_align(fragsz, -align); in netdev_alloc_frag_align()
3395 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
3459 return __napi_alloc_frag_align(fragsz, -align); in napi_alloc_frag_align()
3469 * __dev_alloc_pages - allocate page for network Rx
3501 * __dev_alloc_page - allocate a page for network Rx
3521 * dev_page_is_reusable - check whether a page can be reused for network Rx
3537 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
3545 skb->pfmemalloc = true; in skb_propagate_pfmemalloc()
3549 * skb_frag_off() - Returns the offset of a skb fragment
3554 return frag->offset; in skb_frag_off()
3558 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3564 frag->offset += delta; in skb_frag_off_add()
3568 * skb_frag_off_set() - Sets the offset of a skb fragment
3574 frag->offset = offset; in skb_frag_off_set()
3578 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3585 fragto->offset = fragfrom->offset; in skb_frag_off_copy()
3591 return netmem_is_net_iov(frag->netmem); in skb_frag_is_net_iov()
3595 * skb_frag_net_iov - retrieve the net_iov referred to by fragment
3606 return netmem_to_net_iov(frag->netmem); in skb_frag_net_iov()
3610 * skb_frag_page - retrieve the page referred to by a paged fragment
3621 return netmem_to_page(frag->netmem); in skb_frag_page()
3625 * skb_frag_netmem - retrieve the netmem referred to by a fragment
3632 return frag->netmem; in skb_frag_netmem()
3641 * skb_frag_address - gets the address of the data contained in a paged fragment
3656 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
3672 * skb_frag_page_copy() - sets the page in a fragment from another fragment
3679 fragto->netmem = fragfrom->netmem; in skb_frag_page_copy()
3685 * __skb_frag_dma_map - maps a paged fragment via the DMA API
3712 __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \
3740 * skb_clone_writable - is the header of a clone writable
3750 skb_headroom(skb) + len <= skb->hdr_len; in skb_clone_writable()
3766 delta = headroom - skb_headroom(skb); in __skb_cow()
3775 * skb_cow - copy header of skb when it is required
3783 * The result is skb with writable area skb->head...skb->tail
3792 * skb_cow_head - skb_cow but only making the head writable
3807 * skb_padto - pad an skbuff up to a minimal size
3818 unsigned int size = skb->len; in skb_padto()
3821 return skb_pad(skb, len - size); in skb_padto()
3825 * __skb_put_padto - increase size and pad an skbuff up to a minimal size
3839 unsigned int size = skb->len; in __skb_put_padto()
3842 len -= size; in __skb_put_padto()
3844 return -ENOMEM; in __skb_put_padto()
3851 * skb_put_padto - increase size and pad an skbuff up to a minimal size
3871 const int off = skb->len; in skb_add_data()
3873 if (skb->ip_summed == CHECKSUM_NONE) { in skb_add_data()
3877 skb->csum = csum_block_add(skb->csum, csum, off); in skb_add_data()
3884 return -EFAULT; in skb_add_data()
3893 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in skb_can_coalesce()
3903 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; in __skb_linearize()
3907 * skb_linearize - convert paged skb to linear one
3910 * If there is no free memory -ENOMEM is returned, otherwise zero
3919 * skb_has_shared_frag - can any frag be overwritten
3928 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; in skb_has_shared_frag()
3932 * skb_linearize_cow - make sure skb is linear and writable
3935 * If there is no free memory -ENOMEM is returned, otherwise zero
3948 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpull_rcsum()
3949 skb->csum = csum_block_sub(skb->csum, in __skb_postpull_rcsum()
3951 else if (skb->ip_summed == CHECKSUM_PARTIAL && in __skb_postpull_rcsum()
3953 skb->ip_summed = CHECKSUM_NONE; in __skb_postpull_rcsum()
3957 * skb_postpull_rcsum - update checksum for received skb after pull
3969 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_postpull_rcsum()
3970 skb->csum = wsum_negate(csum_partial(start, len, in skb_postpull_rcsum()
3971 wsum_negate(skb->csum))); in skb_postpull_rcsum()
3972 else if (skb->ip_summed == CHECKSUM_PARTIAL && in skb_postpull_rcsum()
3974 skb->ip_summed = CHECKSUM_NONE; in skb_postpull_rcsum()
3981 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpush_rcsum()
3982 skb->csum = csum_block_add(skb->csum, in __skb_postpush_rcsum()
3987 * skb_postpush_rcsum - update checksum for received skb after push
4004 * skb_push_rcsum - push skb and update receive checksum
4017 skb_postpush_rcsum(skb, skb->data, len); in skb_push_rcsum()
4018 return skb->data; in skb_push_rcsum()
4023 * pskb_trim_rcsum - trim received skb and update checksum
4035 if (likely(len >= skb->len)) in pskb_trim_rcsum()
4042 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_trim_rcsum()
4043 skb->ip_summed = CHECKSUM_NONE; in __skb_trim_rcsum()
4050 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_grow_rcsum()
4051 skb->ip_summed = CHECKSUM_NONE; in __skb_grow_rcsum()
4058 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
4059 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
4062 for (skb = (queue)->next; \
4064 skb = skb->next)
4067 for (skb = (queue)->next, tmp = skb->next; \
4069 skb = tmp, tmp = skb->next)
4073 skb = skb->next)
4088 for (tmp = skb->next; \
4090 skb = tmp, tmp = skb->next)
4093 for (skb = (queue)->prev; \
4095 skb = skb->prev)
4098 for (skb = (queue)->prev, tmp = skb->prev; \
4100 skb = tmp, tmp = skb->prev)
4103 for (tmp = skb->prev; \
4105 skb = tmp, tmp = skb->prev)
4109 return skb_shinfo(skb)->frag_list != NULL; in skb_has_frag_list()
4114 skb_shinfo(skb)->frag_list = NULL; in skb_frag_list_init()
4118 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
4144 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); in skb_copy_datagram_msg()
4196 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; in memcpy_from_msg()
4201 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; in memcpy_to_msg()
4220 if (likely(hlen - offset >= len)) in __skb_header_pointer()
4232 return __skb_header_pointer(skb, offset, len, skb->data, in skb_header_pointer()
4239 if (likely(skb_headlen(skb) - offset >= len)) in skb_pointer_if_linear()
4240 return skb->data + offset; in skb_pointer_if_linear()
4245 * skb_needs_linearize - check if we need to linearize a given skb
4259 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); in skb_needs_linearize()
4266 memcpy(to, skb->data, len); in skb_copy_from_linear_data()
4273 memcpy(to, skb->data + offset, len); in skb_copy_from_linear_data_offset()
4280 memcpy(skb->data, from, len); in skb_copy_to_linear_data()
4288 memcpy(skb->data + offset, from, len); in skb_copy_to_linear_data_offset()
4295 return skb->tstamp; in skb_get_ktime()
4299 * skb_get_timestamp - get timestamp from a skb
4310 *stamp = ns_to_kernel_old_timeval(skb->tstamp); in skb_get_timestamp()
4316 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestamp()
4318 stamp->tv_sec = ts.tv_sec; in skb_get_new_timestamp()
4319 stamp->tv_usec = ts.tv_nsec / 1000; in skb_get_new_timestamp()
4325 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_timestampns()
4327 stamp->tv_sec = ts.tv_sec; in skb_get_timestampns()
4328 stamp->tv_nsec = ts.tv_nsec; in skb_get_timestampns()
4334 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestampns()
4336 stamp->tv_sec = ts.tv_sec; in skb_get_new_timestampns()
4337 stamp->tv_nsec = ts.tv_nsec; in skb_get_new_timestampns()
4342 skb->tstamp = ktime_get_real(); in __net_timestamp()
4343 skb->tstamp_type = SKB_CLOCK_REALTIME; in __net_timestamp()
4354 skb->tstamp = kt; in skb_set_delivery_time()
4357 skb->tstamp_type = tstamp_type; in skb_set_delivery_time()
4359 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_set_delivery_time()
4387 * If needed, set the skb->tstamp to the (rcv) timestamp.
4391 if (skb->tstamp_type) { in skb_clear_delivery_time()
4392 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_clear_delivery_time()
4394 skb->tstamp = ktime_get_real(); in skb_clear_delivery_time()
4396 skb->tstamp = 0; in skb_clear_delivery_time()
4402 if (skb->tstamp_type) in skb_clear_tstamp()
4405 skb->tstamp = 0; in skb_clear_tstamp()
4410 if (skb->tstamp_type) in skb_tstamp()
4413 return skb->tstamp; in skb_tstamp()
4418 if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) in skb_tstamp_cond()
4419 return skb->tstamp; in skb_tstamp_cond()
4429 return skb_shinfo(skb)->meta_len; in skb_metadata_len()
4451 #define __it(x, op) (x -= sizeof(u##op)) in __skb_metadata_differs()
4471 return memcmp(a - meta_len, b - meta_len, meta_len); in __skb_metadata_differs()
4491 skb_shinfo(skb)->meta_len = meta_len; in skb_metadata_set()
4520 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
4539 * skb_tstamp_tx - queue clone of skb with send time stamps
4553 * skb_tx_timestamp() - Driver hook for transmit timestamping
4567 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) in skb_tx_timestamp()
4572 * skb_complete_wifi_ack - deliver skb with wifi status
4585 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || in skb_csum_unnecessary()
4586 skb->csum_valid || in skb_csum_unnecessary()
4587 (skb->ip_summed == CHECKSUM_PARTIAL && in skb_csum_unnecessary()
4592 * skb_checksum_complete - Calculate checksum of an entire packet
4596 * the value of skb->csum. The latter can be used to supply the
4604 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
4615 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_decr_checksum_unnecessary()
4616 if (skb->csum_level == 0) in __skb_decr_checksum_unnecessary()
4617 skb->ip_summed = CHECKSUM_NONE; in __skb_decr_checksum_unnecessary()
4619 skb->csum_level--; in __skb_decr_checksum_unnecessary()
4625 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_incr_checksum_unnecessary()
4626 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) in __skb_incr_checksum_unnecessary()
4627 skb->csum_level++; in __skb_incr_checksum_unnecessary()
4628 } else if (skb->ip_summed == CHECKSUM_NONE) { in __skb_incr_checksum_unnecessary()
4629 skb->ip_summed = CHECKSUM_UNNECESSARY; in __skb_incr_checksum_unnecessary()
4630 skb->csum_level = 0; in __skb_incr_checksum_unnecessary()
4636 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_reset_checksum_unnecessary()
4637 skb->ip_summed = CHECKSUM_NONE; in __skb_reset_checksum_unnecessary()
4638 skb->csum_level = 0; in __skb_reset_checksum_unnecessary()
4652 skb->csum_valid = 1; in __skb_checksum_validate_needed()
4665 /* Unset checksum-complete
4668 * (uncompressed for instance) and checksum-complete value is
4673 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_complete_unset()
4674 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_complete_unset()
4682 * checksum is stored in skb->csum for use in __skb_checksum_complete
4683 * non-zero: value of invalid checksum
4690 if (skb->ip_summed == CHECKSUM_COMPLETE) { in __skb_checksum_validate_complete()
4691 if (!csum_fold(csum_add(psum, skb->csum))) { in __skb_checksum_validate_complete()
4692 skb->csum_valid = 1; in __skb_checksum_validate_complete()
4697 skb->csum = psum; in __skb_checksum_validate_complete()
4699 if (complete || skb->len <= CHECKSUM_BREAK) { in __skb_checksum_validate_complete()
4703 skb->csum_valid = !csum; in __skb_checksum_validate_complete()
4723 * non-zero: value of invalid checksum
4729 skb->csum_valid = 0; \
4754 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); in __skb_checksum_convert_check()
4759 skb->csum = ~pseudo; in __skb_checksum_convert()
4760 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_convert()
4772 skb->ip_summed = CHECKSUM_PARTIAL; in skb_remcsum_adjust_partial()
4773 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; in skb_remcsum_adjust_partial()
4774 skb->csum_offset = offset - start; in skb_remcsum_adjust_partial()
4778 * When called, ptr indicates the starting point for skb->csum when
4780 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4792 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { in skb_remcsum_process()
4794 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); in skb_remcsum_process()
4797 delta = remcsum_adjust(ptr, skb->csum, start, offset); in skb_remcsum_process()
4799 /* Adjust skb->csum since we changed the packet */ in skb_remcsum_process()
4800 skb->csum = csum_add(skb->csum, delta); in skb_remcsum_process()
4806 return (void *)(skb->_nfct & NFCT_PTRMASK); in skb_nfct()
4815 return skb->_nfct; in skb_get_nfct()
4824 skb->slow_gro |= !!nfct; in skb_set_nfct()
4825 skb->_nfct = nfct; in skb_set_nfct()
4850 * struct skb_ext - sk_buff extensions
4875 if (skb->active_extensions) in skb_ext_put()
4876 __skb_ext_put(skb->extensions); in skb_ext_put()
4882 dst->active_extensions = src->active_extensions; in __skb_ext_copy()
4884 if (src->active_extensions) { in __skb_ext_copy()
4885 struct skb_ext *ext = src->extensions; in __skb_ext_copy()
4887 refcount_inc(&ext->refcnt); in __skb_ext_copy()
4888 dst->extensions = ext; in __skb_ext_copy()
4900 return !!ext->offset[i]; in __skb_ext_exist()
4905 return skb->active_extensions & (1 << id); in skb_ext_exist()
4917 struct skb_ext *ext = skb->extensions; in skb_ext_find()
4919 return (void *)ext + (ext->offset[id] << 3); in skb_ext_find()
4927 if (unlikely(skb->active_extensions)) { in skb_ext_reset()
4928 __skb_ext_put(skb->extensions); in skb_ext_reset()
4929 skb->active_extensions = 0; in skb_ext_reset()
4935 return unlikely(skb->active_extensions); in skb_has_extensions()
4950 skb->_nfct = 0; in nf_reset_ct()
4957 skb->nf_trace = 0; in nf_reset_trace()
4964 skb->ipvs_property = 0; in ipvs_reset()
4968 /* Note: This doesn't put any conntrack info in dst. */
4973 dst->_nfct = src->_nfct; in __nf_copy()
4978 dst->nf_trace = src->nf_trace; in __nf_copy()
4987 dst->slow_gro = src->slow_gro; in nf_copy()
4994 to->secmark = from->secmark; in skb_copy_secmark()
4999 skb->secmark = 0; in skb_init_secmark()
5020 return !skb->destructor && in skb_irq_freeable()
5023 !skb->_skb_refdst && in skb_irq_freeable()
5029 skb->queue_mapping = queue_mapping; in skb_set_queue_mapping()
5034 return skb->queue_mapping; in skb_get_queue_mapping()
5039 to->queue_mapping = from->queue_mapping; in skb_copy_queue_mapping()
5044 skb->queue_mapping = rx_queue + 1; in skb_record_rx_queue()
5049 return skb->queue_mapping - 1; in skb_get_rx_queue()
5054 return skb->queue_mapping != 0; in skb_rx_queue_recorded()
5059 skb->dst_pending_confirm = val; in skb_set_dst_pending_confirm()
5064 return skb->dst_pending_confirm != 0; in skb_get_dst_pending_confirm()
5078 return skb_shinfo(skb)->gso_size; in skb_is_gso()
5084 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; in skb_is_gso_v6()
5090 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; in skb_is_gso_sctp()
5096 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); in skb_is_gso_tcp()
5101 skb_shinfo(skb)->gso_size = 0; in skb_gso_reset()
5102 skb_shinfo(skb)->gso_segs = 0; in skb_gso_reset()
5103 skb_shinfo(skb)->gso_type = 0; in skb_gso_reset()
5109 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size()
5111 shinfo->gso_size += increment; in skb_increase_gso_size()
5117 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_decrease_gso_size()
5119 shinfo->gso_size -= decrement; in skb_decrease_gso_size()
5130 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
5131 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
5141 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_forward_csum()
5142 skb->ip_summed = CHECKSUM_NONE; in skb_forward_csum()
5146 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
5155 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); in skb_checksum_none_assert()
5166 * skb_head_is_locked - Determine if the skb->head is locked down
5176 return !skb->head_frag || skb_cloned(skb); in skb_head_is_locked()
5182 * See Documentation/networking/checksum-offloads.rst for
5185 * pseudo-header) before calling.
5196 skb->csum_offset)); in lco_csum()
5201 return csum_partial(l4_hdr, csum_start - l4_hdr, partial); in lco_csum()
5206 return skb->redirected; in skb_is_redirected()
5211 skb->redirected = 1; in skb_set_redirected()
5213 skb->from_ingress = from_ingress; in skb_set_redirected()
5214 if (skb->from_ingress) in skb_set_redirected()
5221 skb->redirected = 0; in skb_reset_redirect()
5227 skb->redirected = 1; in skb_set_redirected_noclear()
5229 skb->from_ingress = from_ingress; in skb_set_redirected_noclear()
5236 return skb->csum_not_inet; in skb_csum_is_sctp()
5244 skb->ip_summed = CHECKSUM_NONE; in skb_reset_csum_not_inet()
5246 skb->csum_not_inet = 0; in skb_reset_csum_not_inet()
5254 skb->kcov_handle = kcov_handle; in skb_set_kcov_handle()
5261 return skb->kcov_handle; in skb_get_kcov_handle()
5270 skb->pp_recycle = 1; in skb_mark_for_recycle()