Lines Matching full:skb

44  * DOC: skb checksums
105 * not in skb->csum. Thus, skb->csum is undefined in this case.
133 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
145 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
155 * referred to by skb->csum_start + skb->csum_offset and any preceding
194 * The skb was already checksummed by the protocol, or a checksum is not
253 /* Maximum value in skb->csum_level */
260 /* For X bytes available in skb->head, what is the minimal
272 /* return minimum truesize of one skb containing X bytes of data */
311 * skb is out in neigh layer.
357 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
369 * skb_frag_size() - Returns the size of a skb fragment
370 * @frag: skb fragment
378 * skb_frag_size_set() - Sets the size of a skb fragment
379 * @frag: skb fragment
388 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
389 * @frag: skb fragment
398 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
399 * @frag: skb fragment
423 * @f: skb frag to operate on
455 * skb->tstamp.
533 int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
537 * The callback notifies userspace to release buffers when skb DMA is done in
538 * lower device, the skb last reference should be 0 when calling this.
572 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) argument
589 * the end of the header data, ie. at skb->end.
620 * remains valid until skb destructor.
637 * skb_header_cloned() checks if skb is allowed to add / write the headers.
639 * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
640 * (via __skb_header_release()). Any clone created from marked skb will get
645 * <alloc skb>
652 * doing the right thing. In practice there's usually only one payload-only skb.
661 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
662 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
663 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
669 /* This indicates the skb is from an untrusted source. */
735 * this is the part of the skb operated on by the common helpers
740 * Optionally &skb_shared_info.frag_list may point to another skb.
783 * @hdr_len: writable header length of cloned skb
785 * @csum_start: Offset from skb->head where checksumming should start
817 * @head_frag: skb was allocated from page fragments,
839 * @unreadable: indicates that at least 1 of the fragments in this skb is
842 * @decrypted: Decrypted SKB
844 * @tstamp_type: When set, skb->tstamp has the
845 * delivery_time clock base of skb->tstamp.
846 * @napi_id: id of the NAPI struct this skb came from
848 * @alloc_cpu: CPU which did the skb allocation.
858 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
906 * first. This is owned by whoever has the skb queued ATM.
913 void (*destructor)(struct sk_buff *skb);
1126 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
1127 * @skb: buffer
1129 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument
1131 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
1135 * skb might have a dst pointer attached, refcounted or not.
1142 * skb_dst - returns skb dst_entry
1143 * @skb: buffer
1145 * Returns: skb dst_entry, regardless of reference taken or not.
1147 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument
1152 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
1155 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
1159 * skb_dst_set - sets skb dst
1160 * @skb: buffer
1163 * Sets skb dst, assuming a reference was taken on dst and should
1166 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set() argument
1168 skb->slow_gro |= !!dst; in skb_dst_set()
1169 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
1173 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
1174 * @skb: buffer
1177 * Sets skb dst, assuming a reference was not taken on dst.
1182 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set_noref() argument
1185 skb->slow_gro |= !!dst; in skb_dst_set_noref()
1186 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; in skb_dst_set_noref()
1190 * skb_dst_is_noref - Test if skb dst isn't refcounted
1191 * @skb: buffer
1193 static inline bool skb_dst_is_noref(const struct sk_buff *skb) in skb_dst_is_noref() argument
1195 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); in skb_dst_is_noref()
1198 /* For mangling skb->pkt_type from user space side from applications
1208 * skb_napi_id - Returns the skb's NAPI id
1209 * @skb: buffer
1211 static inline unsigned int skb_napi_id(const struct sk_buff *skb) in skb_napi_id() argument
1214 return skb->napi_id; in skb_napi_id()
1220 static inline bool skb_wifi_acked_valid(const struct sk_buff *skb) in skb_wifi_acked_valid() argument
1223 return skb->wifi_acked_valid; in skb_wifi_acked_valid()
1230 * skb_unref - decrement the skb's reference count
1231 * @skb: buffer
1233 * Returns: true if we can free the skb.
1235 static inline bool skb_unref(struct sk_buff *skb) in skb_unref() argument
1237 if (unlikely(!skb)) in skb_unref()
1239 if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) in skb_unref()
1241 else if (likely(!refcount_dec_and_test(&skb->users))) in skb_unref()
1247 static inline bool skb_data_unref(const struct sk_buff *skb, in skb_data_unref() argument
1252 if (!skb->cloned) in skb_data_unref()
1255 bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; in skb_data_unref()
1265 void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
1269 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) in kfree_skb_reason() argument
1271 sk_skb_reason_drop(NULL, skb, reason); in kfree_skb_reason()
1276 * @skb: buffer to free
1278 static inline void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
1280 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); in kfree_skb()
1283 void skb_release_head_state(struct sk_buff *skb);
1286 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1287 void skb_tx_error(struct sk_buff *skb);
1295 void consume_skb(struct sk_buff *skb);
1297 static inline void consume_skb(struct sk_buff *skb) in consume_skb() argument
1299 return kfree_skb(skb); in consume_skb()
1303 void __consume_stateless_skb(struct sk_buff *skb);
1304 void __kfree_skb(struct sk_buff *skb);
1306 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1314 struct sk_buff *build_skb_around(struct sk_buff *skb,
1316 void skb_attempt_defer_free(struct sk_buff *skb);
1353 * @skb: buffer
1355 * Returns: true if skb is a fast clone, and its clone is not freed.
1360 const struct sk_buff *skb) in skb_fclone_busy() argument
1364 fclones = container_of(skb, struct sk_buff_fclones, skb1); in skb_fclone_busy()
1366 return skb->fclone == SKB_FCLONE_ORIG && in skb_fclone_busy()
1385 void skb_headers_offset_update(struct sk_buff *skb, int off);
1386 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1387 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1389 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1390 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1392 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, in __pskb_copy() argument
1395 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); in __pskb_copy()
1398 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1399 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1401 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1402 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1404 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1406 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1408 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1409 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1412 * skb_pad - zero pad the tail of an skb
1413 * @skb: buffer to pad
1420 * May return error in out of memory cases. The skb is freed on error.
1422 static inline int skb_pad(struct sk_buff *skb, int pad) in skb_pad() argument
1424 return __skb_pad(skb, pad, true); in skb_pad()
1428 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1442 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1449 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1485 static inline void skb_clear_hash(struct sk_buff *skb) in skb_clear_hash() argument
1487 skb->hash = 0; in skb_clear_hash()
1488 skb->sw_hash = 0; in skb_clear_hash()
1489 skb->l4_hash = 0; in skb_clear_hash()
1492 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) in skb_clear_hash_if_not_l4() argument
1494 if (!skb->l4_hash) in skb_clear_hash_if_not_l4()
1495 skb_clear_hash(skb); in skb_clear_hash_if_not_l4()
1499 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) in __skb_set_hash() argument
1501 skb->l4_hash = is_l4; in __skb_set_hash()
1502 skb->sw_hash = is_sw; in __skb_set_hash()
1503 skb->hash = hash; in __skb_set_hash()
1507 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) in skb_set_hash() argument
1510 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); in skb_set_hash()
1514 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) in __skb_set_sw_hash() argument
1516 __skb_set_hash(skb, hash, true, is_l4); in __skb_set_sw_hash()
1519 u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
1521 static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) in __skb_get_hash_symmetric() argument
1523 return __skb_get_hash_symmetric_net(NULL, skb); in __skb_get_hash_symmetric()
1526 void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
1527 u32 skb_get_poff(const struct sk_buff *skb);
1528 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1530 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1533 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, in skb_flow_get_ports() argument
1536 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); in skb_flow_get_ports()
1548 const struct sk_buff *skb,
1553 static inline bool skb_flow_dissect(const struct sk_buff *skb, in skb_flow_dissect() argument
1557 return __skb_flow_dissect(NULL, skb, flow_dissector, in skb_flow_dissect()
1561 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, in skb_flow_dissect_flow_keys() argument
1566 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, in skb_flow_dissect_flow_keys()
1572 const struct sk_buff *skb, in skb_flow_dissect_flow_keys_basic() argument
1578 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, in skb_flow_dissect_flow_keys_basic()
1582 void skb_flow_dissect_meta(const struct sk_buff *skb,
1586 /* Gets a skb connection tracking info, ctinfo map should be a
1591 skb_flow_dissect_ct(const struct sk_buff *skb,
1597 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1601 void skb_flow_dissect_hash(const struct sk_buff *skb,
1605 static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) in skb_get_hash_net() argument
1607 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash_net()
1608 __skb_get_hash_net(net, skb); in skb_get_hash_net()
1610 return skb->hash; in skb_get_hash_net()
1613 static inline __u32 skb_get_hash(struct sk_buff *skb) in skb_get_hash() argument
1615 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash()
1616 __skb_get_hash_net(NULL, skb); in skb_get_hash()
1618 return skb->hash; in skb_get_hash()
1621 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) in skb_get_hash_flowi6() argument
1623 if (!skb->l4_hash && !skb->sw_hash) { in skb_get_hash_flowi6()
1627 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); in skb_get_hash_flowi6()
1630 return skb->hash; in skb_get_hash_flowi6()
1633 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1636 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) in skb_get_hash_raw() argument
1638 return skb->hash; in skb_get_hash_raw()
1658 static inline bool skb_is_decrypted(const struct sk_buff *skb) in skb_is_decrypted() argument
1661 return skb->decrypted; in skb_is_decrypted()
1676 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) in skb_end_pointer() argument
1678 return skb->head + skb->end; in skb_end_pointer()
1681 static inline unsigned int skb_end_offset(const struct sk_buff *skb) in skb_end_offset() argument
1683 return skb->end; in skb_end_offset()
1686 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) in skb_set_end_offset() argument
1688 skb->end = offset; in skb_set_end_offset()
1691 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) in skb_end_pointer() argument
1693 return skb->end; in skb_end_pointer()
1696 static inline unsigned int skb_end_offset(const struct sk_buff *skb) in skb_end_offset() argument
1698 return skb->end - skb->head; in skb_end_offset()
1701 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) in skb_set_end_offset() argument
1703 skb->end = skb->head + offset; in skb_set_end_offset()
1715 struct sk_buff *skb, struct iov_iter *from,
1718 int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
1721 static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, in skb_zerocopy_iter_dgram() argument
1724 return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1727 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1732 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) argument
1734 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) in skb_hwtstamps() argument
1736 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps()
1739 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) in skb_zcopy() argument
1741 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; in skb_zcopy()
1743 return is_zcopy ? skb_uarg(skb) : NULL; in skb_zcopy()
1746 static inline bool skb_zcopy_pure(const struct sk_buff *skb) in skb_zcopy_pure() argument
1748 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; in skb_zcopy_pure()
1751 static inline bool skb_zcopy_managed(const struct sk_buff *skb) in skb_zcopy_managed() argument
1753 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; in skb_zcopy_managed()
1767 static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) in skb_zcopy_init() argument
1769 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_init()
1770 skb_shinfo(skb)->flags |= uarg->flags; in skb_zcopy_init()
1773 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, in skb_zcopy_set() argument
1776 if (skb && uarg && !skb_zcopy(skb)) { in skb_zcopy_set()
1781 skb_zcopy_init(skb, uarg); in skb_zcopy_set()
1785 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) in skb_zcopy_set_nouarg() argument
1787 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg()
1788 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg()
1791 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) in skb_zcopy_is_nouarg() argument
1793 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; in skb_zcopy_is_nouarg()
1796 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) in skb_zcopy_get_nouarg() argument
1798 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); in skb_zcopy_get_nouarg()
1818 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) in skb_zcopy_clear() argument
1820 struct ubuf_info *uarg = skb_zcopy(skb); in skb_zcopy_clear()
1823 if (!skb_zcopy_is_nouarg(skb)) in skb_zcopy_clear()
1824 uarg->ops->complete(skb, uarg, zerocopy_success); in skb_zcopy_clear()
1826 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; in skb_zcopy_clear()
1830 void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
1832 static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) in skb_zcopy_downgrade_managed() argument
1834 if (unlikely(skb_zcopy_managed(skb))) in skb_zcopy_downgrade_managed()
1835 __skb_zcopy_downgrade_managed(skb); in skb_zcopy_downgrade_managed()
1838 /* Return true if frags in this skb are readable by the host. */
1839 static inline bool skb_frags_readable(const struct sk_buff *skb) in skb_frags_readable() argument
1841 return !skb->unreadable; in skb_frags_readable()
1844 static inline void skb_mark_not_on_list(struct sk_buff *skb) in skb_mark_not_on_list() argument
1846 skb->next = NULL; in skb_mark_not_on_list()
1849 static inline void skb_poison_list(struct sk_buff *skb) in skb_poison_list() argument
1852 skb->next = SKB_LIST_POISON_NEXT; in skb_poison_list()
1856 /* Iterate through singly-linked GSO fragments of an skb. */
1857 #define skb_list_walk_safe(first, skb, next_skb) \ argument
1858 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1859 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1861 static inline void skb_list_del_init(struct sk_buff *skb) in skb_list_del_init() argument
1863 __list_del_entry(&skb->list); in skb_list_del_init()
1864 skb_mark_not_on_list(skb); in skb_list_del_init()
1892 * skb_queue_is_last - check if skb is the last entry in the queue
1894 * @skb: buffer
1896 * Returns true if @skb is the last buffer on the list.
1899 const struct sk_buff *skb) in skb_queue_is_last() argument
1901 return skb->next == (const struct sk_buff *) list; in skb_queue_is_last()
1905 * skb_queue_is_first - check if skb is the first entry in the queue
1907 * @skb: buffer
1909 * Returns true if @skb is the first buffer on the list.
1912 const struct sk_buff *skb) in skb_queue_is_first() argument
1914 return skb->prev == (const struct sk_buff *) list; in skb_queue_is_first()
1920 * @skb: current buffer
1922 * Return the next packet in @list after @skb. It is only valid to
1926 const struct sk_buff *skb) in skb_queue_next() argument
1931 BUG_ON(skb_queue_is_last(list, skb)); in skb_queue_next()
1932 return skb->next; in skb_queue_next()
1938 * @skb: current buffer
1940 * Return the prev packet in @list before @skb. It is only valid to
1944 const struct sk_buff *skb) in skb_queue_prev() argument
1949 BUG_ON(skb_queue_is_first(list, skb)); in skb_queue_prev()
1950 return skb->prev; in skb_queue_prev()
1955 * @skb: buffer to reference
1960 static inline struct sk_buff *skb_get(struct sk_buff *skb) in skb_get() argument
1962 refcount_inc(&skb->users); in skb_get()
1963 return skb; in skb_get()
1972 * @skb: buffer to check
1978 static inline int skb_cloned(const struct sk_buff *skb) in skb_cloned() argument
1980 return skb->cloned && in skb_cloned()
1981 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; in skb_cloned()
1984 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) in skb_unclone() argument
1988 if (skb_cloned(skb)) in skb_unclone()
1989 return pskb_expand_head(skb, 0, 0, pri); in skb_unclone()
1994 /* This variant of skb_unclone() makes sure skb->truesize
1995 * and skb_end_offset() are not changed, whenever a new skb->head is needed.
2000 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
2001 static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in skb_unclone_keeptruesize() argument
2005 if (skb_cloned(skb)) in skb_unclone_keeptruesize()
2006 return __skb_unclone_keeptruesize(skb, pri); in skb_unclone_keeptruesize()
2012 * @skb: buffer to check
2017 static inline int skb_header_cloned(const struct sk_buff *skb) in skb_header_cloned() argument
2021 if (!skb->cloned) in skb_header_cloned()
2024 dataref = atomic_read(&skb_shinfo(skb)->dataref); in skb_header_cloned()
2029 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) in skb_header_unclone() argument
2033 if (skb_header_cloned(skb)) in skb_header_unclone()
2034 return pskb_expand_head(skb, 0, 0, pri); in skb_header_unclone()
2041 * @skb: buffer to operate on
2045 static inline void __skb_header_release(struct sk_buff *skb) in __skb_header_release() argument
2047 skb->nohdr = 1; in __skb_header_release()
2048 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); in __skb_header_release()
2054 * @skb: buffer to check
2059 static inline int skb_shared(const struct sk_buff *skb) in skb_shared() argument
2061 return refcount_read(&skb->users) != 1; in skb_shared()
2066 * @skb: buffer to check
2077 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) in skb_share_check() argument
2080 if (skb_shared(skb)) { in skb_share_check()
2081 struct sk_buff *nskb = skb_clone(skb, pri); in skb_share_check()
2084 consume_skb(skb); in skb_share_check()
2086 kfree_skb(skb); in skb_share_check()
2087 skb = nskb; in skb_share_check()
2089 return skb; in skb_share_check()
2101 * @skb: buffer to check
2112 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, in skb_unshare() argument
2116 if (skb_cloned(skb)) { in skb_unshare()
2117 struct sk_buff *nskb = skb_copy(skb, pri); in skb_unshare()
2121 consume_skb(skb); in skb_unshare()
2123 kfree_skb(skb); in skb_unshare()
2124 skb = nskb; in skb_unshare()
2126 return skb; in skb_unshare()
2144 struct sk_buff *skb = list_->next; in skb_peek() local
2146 if (skb == (struct sk_buff *)list_) in skb_peek()
2147 skb = NULL; in skb_peek()
2148 return skb; in skb_peek()
2163 * skb_peek_next - peek skb following the given one from a queue
2164 * @skb: skb to start from
2171 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, in skb_peek_next() argument
2174 struct sk_buff *next = skb->next; in skb_peek_next()
2196 struct sk_buff *skb = READ_ONCE(list_->prev); in skb_peek_tail() local
2198 if (skb == (struct sk_buff *)list_) in skb_peek_tail()
2199 skb = NULL; in skb_peek_tail()
2200 return skb; in skb_peek_tail()
2245 * this is needed for now since a whole lot of users of the skb-queue
2299 * skb_queue_splice - join two skb lists, this is designed for stacks
2313 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
2330 * skb_queue_splice_tail - join two skb lists, each list being a queue
2344 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
2427 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2428 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in __skb_unlink() argument
2433 next = skb->next; in __skb_unlink()
2434 prev = skb->prev; in __skb_unlink()
2435 skb->next = skb->prev = NULL; in __skb_unlink()
2450 struct sk_buff *skb = skb_peek(list); in __skb_dequeue() local
2451 if (skb) in __skb_dequeue()
2452 __skb_unlink(skb, list); in __skb_dequeue()
2453 return skb; in __skb_dequeue()
2467 struct sk_buff *skb = skb_peek_tail(list); in __skb_dequeue_tail() local
2468 if (skb) in __skb_dequeue_tail()
2469 __skb_unlink(skb, list); in __skb_dequeue_tail()
2470 return skb; in __skb_dequeue_tail()
2475 static inline bool skb_is_nonlinear(const struct sk_buff *skb) in skb_is_nonlinear() argument
2477 return skb->data_len; in skb_is_nonlinear()
2480 static inline unsigned int skb_headlen(const struct sk_buff *skb) in skb_headlen() argument
2482 return skb->len - skb->data_len; in skb_headlen()
2485 static inline unsigned int __skb_pagelen(const struct sk_buff *skb) in __skb_pagelen() argument
2489 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) in __skb_pagelen()
2490 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_pagelen()
2494 static inline unsigned int skb_pagelen(const struct sk_buff *skb) in skb_pagelen() argument
2496 return skb_headlen(skb) + __skb_pagelen(skb); in skb_pagelen()
2533 * skb_len_add - adds a number to len fields of skb
2534 * @skb: buffer to add len to
2537 static inline void skb_len_add(struct sk_buff *skb, int delta) in skb_len_add() argument
2539 skb->len += delta; in skb_len_add()
2540 skb->data_len += delta; in skb_len_add()
2541 skb->truesize += delta; in skb_len_add()
2545 * __skb_fill_netmem_desc - initialise a fragment in an skb
2546 * @skb: buffer containing fragment to be initialised
2552 * Initialises the @i'th fragment of @skb to point to &size bytes at
2557 static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i, in __skb_fill_netmem_desc() argument
2562 __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size); in __skb_fill_netmem_desc()
2565 skb->unreadable = true; in __skb_fill_netmem_desc()
2571 /* Propagate page pfmemalloc to the skb if we can. The problem is in __skb_fill_netmem_desc()
2577 skb->pfmemalloc = true; in __skb_fill_netmem_desc()
2580 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, in __skb_fill_page_desc() argument
2583 __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); in __skb_fill_page_desc()
2586 static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i, in skb_fill_netmem_desc() argument
2589 __skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_fill_netmem_desc()
2590 skb_shinfo(skb)->nr_frags = i + 1; in skb_fill_netmem_desc()
2594 * skb_fill_page_desc - initialise a paged fragment in an skb
2595 * @skb: buffer containing fragment to be initialised
2602 * @skb to point to @size bytes at offset @off within @page. In
2603 * addition updates @skb such that @i is the last fragment.
2607 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, in skb_fill_page_desc() argument
2610 skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); in skb_fill_page_desc()
2614 * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
2615 * @skb: buffer containing fragment to be initialised
2624 static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, in skb_fill_page_desc_noacc() argument
2628 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_fill_page_desc_noacc()
2634 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
2637 static inline void skb_add_rx_frag(struct sk_buff *skb, int i, in skb_add_rx_frag() argument
2641 skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size, in skb_add_rx_frag()
2645 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2648 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) argument
2651 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) in skb_tail_pointer() argument
2653 return skb->head + skb->tail; in skb_tail_pointer()
2656 static inline void skb_reset_tail_pointer(struct sk_buff *skb) in skb_reset_tail_pointer() argument
2658 skb->tail = skb->data - skb->head; in skb_reset_tail_pointer()
2661 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) in skb_set_tail_pointer() argument
2663 skb_reset_tail_pointer(skb); in skb_set_tail_pointer()
2664 skb->tail += offset; in skb_set_tail_pointer()
2668 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) in skb_tail_pointer() argument
2670 return skb->tail; in skb_tail_pointer()
2673 static inline void skb_reset_tail_pointer(struct sk_buff *skb) in skb_reset_tail_pointer() argument
2675 skb->tail = skb->data; in skb_reset_tail_pointer()
2678 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) in skb_set_tail_pointer() argument
2680 skb->tail = skb->data + offset; in skb_set_tail_pointer()
2685 static inline void skb_assert_len(struct sk_buff *skb) in skb_assert_len() argument
2688 if (WARN_ONCE(!skb->len, "%s\n", __func__)) in skb_assert_len()
2689 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); in skb_assert_len()
2694 void skb_might_realloc(struct sk_buff *skb);
2696 static inline void skb_might_realloc(struct sk_buff *skb) {} in skb_might_realloc() argument
2702 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2703 void *skb_put(struct sk_buff *skb, unsigned int len);
2704 static inline void *__skb_put(struct sk_buff *skb, unsigned int len) in __skb_put() argument
2706 void *tmp = skb_tail_pointer(skb); in __skb_put()
2707 SKB_LINEAR_ASSERT(skb); in __skb_put()
2708 skb->tail += len; in __skb_put()
2709 skb->len += len; in __skb_put()
2713 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) in __skb_put_zero() argument
2715 void *tmp = __skb_put(skb, len); in __skb_put_zero()
2721 static inline void *__skb_put_data(struct sk_buff *skb, const void *data, in __skb_put_data() argument
2724 void *tmp = __skb_put(skb, len); in __skb_put_data()
2730 static inline void __skb_put_u8(struct sk_buff *skb, u8 val) in __skb_put_u8() argument
2732 *(u8 *)__skb_put(skb, 1) = val; in __skb_put_u8()
2735 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) in skb_put_zero() argument
2737 void *tmp = skb_put(skb, len); in skb_put_zero()
2744 static inline void *skb_put_data(struct sk_buff *skb, const void *data, in skb_put_data() argument
2747 void *tmp = skb_put(skb, len); in skb_put_data()
2754 static inline void skb_put_u8(struct sk_buff *skb, u8 val) in skb_put_u8() argument
2756 *(u8 *)skb_put(skb, 1) = val; in skb_put_u8()
2759 void *skb_push(struct sk_buff *skb, unsigned int len);
2760 static inline void *__skb_push(struct sk_buff *skb, unsigned int len) in __skb_push() argument
2764 skb->data -= len; in __skb_push()
2765 skb->len += len; in __skb_push()
2766 return skb->data; in __skb_push()
2769 void *skb_pull(struct sk_buff *skb, unsigned int len);
2770 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) in __skb_pull() argument
2774 skb->len -= len; in __skb_pull()
2775 if (unlikely(skb->len < skb->data_len)) { in __skb_pull()
2777 skb->len += len; in __skb_pull()
2779 skb_dump(KERN_ERR, skb, false); in __skb_pull()
2783 return skb->data += len; in __skb_pull()
2786 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) in skb_pull_inline() argument
2788 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); in skb_pull_inline()
2791 void *skb_pull_data(struct sk_buff *skb, size_t len);
2793 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2796 pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) in pskb_may_pull_reason() argument
2799 skb_might_realloc(skb); in pskb_may_pull_reason()
2801 if (likely(len <= skb_headlen(skb))) in pskb_may_pull_reason()
2804 if (unlikely(len > skb->len)) in pskb_may_pull_reason()
2807 if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb)))) in pskb_may_pull_reason()
2813 static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) in pskb_may_pull() argument
2815 return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; in pskb_may_pull()
2818 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) in pskb_pull() argument
2820 if (!pskb_may_pull(skb, len)) in pskb_pull()
2823 skb->len -= len; in pskb_pull()
2824 return skb->data += len; in pskb_pull()
2827 void skb_condense(struct sk_buff *skb);
2831 * @skb: buffer to check
2835 static inline unsigned int skb_headroom(const struct sk_buff *skb) in skb_headroom() argument
2837 return skb->data - skb->head; in skb_headroom()
2842 * @skb: buffer to check
2846 static inline int skb_tailroom(const struct sk_buff *skb) in skb_tailroom() argument
2848 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; in skb_tailroom()
2853 * @skb: buffer to check
2858 static inline int skb_availroom(const struct sk_buff *skb) in skb_availroom() argument
2860 if (skb_is_nonlinear(skb)) in skb_availroom()
2863 return skb->end - skb->tail - skb->reserved_tailroom; in skb_availroom()
2868 * @skb: buffer to alter
2874 static inline void skb_reserve(struct sk_buff *skb, int len) in skb_reserve() argument
2876 skb->data += len; in skb_reserve()
2877 skb->tail += len; in skb_reserve()
2882 * @skb: buffer to alter
2892 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, in skb_tailroom_reserve() argument
2895 SKB_LINEAR_ASSERT(skb); in skb_tailroom_reserve()
2896 if (mtu < skb_tailroom(skb) - needed_tailroom) in skb_tailroom_reserve()
2898 skb->reserved_tailroom = skb_tailroom(skb) - mtu; in skb_tailroom_reserve()
2901 skb->reserved_tailroom = needed_tailroom; in skb_tailroom_reserve()
2907 static inline void skb_set_inner_protocol(struct sk_buff *skb, in skb_set_inner_protocol() argument
2910 skb->inner_protocol = protocol; in skb_set_inner_protocol()
2911 skb->inner_protocol_type = ENCAP_TYPE_ETHER; in skb_set_inner_protocol()
2914 static inline void skb_set_inner_ipproto(struct sk_buff *skb, in skb_set_inner_ipproto() argument
2917 skb->inner_ipproto = ipproto; in skb_set_inner_ipproto()
2918 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; in skb_set_inner_ipproto()
2921 static inline void skb_reset_inner_headers(struct sk_buff *skb) in skb_reset_inner_headers() argument
2923 skb->inner_mac_header = skb->mac_header; in skb_reset_inner_headers()
2924 skb->inner_network_header = skb->network_header; in skb_reset_inner_headers()
2925 skb->inner_transport_header = skb->transport_header; in skb_reset_inner_headers()
2928 static inline int skb_mac_header_was_set(const struct sk_buff *skb) in skb_mac_header_was_set() argument
2930 return skb->mac_header != (typeof(skb->mac_header))~0U; in skb_mac_header_was_set()
2933 static inline void skb_reset_mac_len(struct sk_buff *skb) in skb_reset_mac_len() argument
2935 if (!skb_mac_header_was_set(skb)) { in skb_reset_mac_len()
2937 skb->mac_len = 0; in skb_reset_mac_len()
2939 skb->mac_len = skb->network_header - skb->mac_header; in skb_reset_mac_len()
2944 *skb) in skb_inner_transport_header()
2946 return skb->head + skb->inner_transport_header; in skb_inner_transport_header()
2949 static inline int skb_inner_transport_offset(const struct sk_buff *skb) in skb_inner_transport_offset() argument
2951 return skb_inner_transport_header(skb) - skb->data; in skb_inner_transport_offset()
2954 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) in skb_reset_inner_transport_header() argument
2956 long offset = skb->data - skb->head; in skb_reset_inner_transport_header()
2958 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); in skb_reset_inner_transport_header()
2959 skb->inner_transport_header = offset; in skb_reset_inner_transport_header()
2962 static inline void skb_set_inner_transport_header(struct sk_buff *skb, in skb_set_inner_transport_header() argument
2965 skb_reset_inner_transport_header(skb); in skb_set_inner_transport_header()
2966 skb->inner_transport_header += offset; in skb_set_inner_transport_header()
2969 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) in skb_inner_network_header() argument
2971 return skb->head + skb->inner_network_header; in skb_inner_network_header()
2974 static inline void skb_reset_inner_network_header(struct sk_buff *skb) in skb_reset_inner_network_header() argument
2976 long offset = skb->data - skb->head; in skb_reset_inner_network_header()
2978 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); in skb_reset_inner_network_header()
2979 skb->inner_network_header = offset; in skb_reset_inner_network_header()
2982 static inline void skb_set_inner_network_header(struct sk_buff *skb, in skb_set_inner_network_header() argument
2985 skb_reset_inner_network_header(skb); in skb_set_inner_network_header()
2986 skb->inner_network_header += offset; in skb_set_inner_network_header()
2989 static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb) in skb_inner_network_header_was_set() argument
2991 return skb->inner_network_header > 0; in skb_inner_network_header_was_set()
2994 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) in skb_inner_mac_header() argument
2996 return skb->head + skb->inner_mac_header; in skb_inner_mac_header()
2999 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) in skb_reset_inner_mac_header() argument
3001 long offset = skb->data - skb->head; in skb_reset_inner_mac_header()
3003 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); in skb_reset_inner_mac_header()
3004 skb->inner_mac_header = offset; in skb_reset_inner_mac_header()
3007 static inline void skb_set_inner_mac_header(struct sk_buff *skb, in skb_set_inner_mac_header() argument
3010 skb_reset_inner_mac_header(skb); in skb_set_inner_mac_header()
3011 skb->inner_mac_header += offset; in skb_set_inner_mac_header()
3013 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) in skb_transport_header_was_set() argument
3015 return skb->transport_header != (typeof(skb->transport_header))~0U; in skb_transport_header_was_set()
3018 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) in skb_transport_header() argument
3020 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); in skb_transport_header()
3021 return skb->head + skb->transport_header; in skb_transport_header()
3024 static inline void skb_reset_transport_header(struct sk_buff *skb) in skb_reset_transport_header() argument
3026 long offset = skb->data - skb->head; in skb_reset_transport_header()
3028 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); in skb_reset_transport_header()
3029 skb->transport_header = offset; in skb_reset_transport_header()
3032 static inline void skb_set_transport_header(struct sk_buff *skb, in skb_set_transport_header() argument
3035 skb_reset_transport_header(skb); in skb_set_transport_header()
3036 skb->transport_header += offset; in skb_set_transport_header()
3039 static inline unsigned char *skb_network_header(const struct sk_buff *skb) in skb_network_header() argument
3041 return skb->head + skb->network_header; in skb_network_header()
3044 static inline void skb_reset_network_header(struct sk_buff *skb) in skb_reset_network_header() argument
3046 long offset = skb->data - skb->head; in skb_reset_network_header()
3048 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); in skb_reset_network_header()
3049 skb->network_header = offset; in skb_reset_network_header()
3052 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) in skb_set_network_header() argument
3054 skb_reset_network_header(skb); in skb_set_network_header()
3055 skb->network_header += offset; in skb_set_network_header()
3058 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) in skb_mac_header() argument
3060 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); in skb_mac_header()
3061 return skb->head + skb->mac_header; in skb_mac_header()
3064 static inline int skb_mac_offset(const struct sk_buff *skb) in skb_mac_offset() argument
3066 return skb_mac_header(skb) - skb->data; in skb_mac_offset()
3069 static inline u32 skb_mac_header_len(const struct sk_buff *skb) in skb_mac_header_len() argument
3071 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); in skb_mac_header_len()
3072 return skb->network_header - skb->mac_header; in skb_mac_header_len()
3075 static inline void skb_unset_mac_header(struct sk_buff *skb) in skb_unset_mac_header() argument
3077 skb->mac_header = (typeof(skb->mac_header))~0U; in skb_unset_mac_header()
3080 static inline void skb_reset_mac_header(struct sk_buff *skb) in skb_reset_mac_header() argument
3082 long offset = skb->data - skb->head; in skb_reset_mac_header()
3084 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); in skb_reset_mac_header()
3085 skb->mac_header = offset; in skb_reset_mac_header()
3088 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) in skb_set_mac_header() argument
3090 skb_reset_mac_header(skb); in skb_set_mac_header()
3091 skb->mac_header += offset; in skb_set_mac_header()
3094 static inline void skb_pop_mac_header(struct sk_buff *skb) in skb_pop_mac_header() argument
3096 skb->mac_header = skb->network_header; in skb_pop_mac_header()
3099 static inline void skb_probe_transport_header(struct sk_buff *skb) in skb_probe_transport_header() argument
3103 if (skb_transport_header_was_set(skb)) in skb_probe_transport_header()
3106 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, in skb_probe_transport_header()
3108 skb_set_transport_header(skb, keys.control.thoff); in skb_probe_transport_header()
3111 static inline void skb_mac_header_rebuild(struct sk_buff *skb) in skb_mac_header_rebuild() argument
3113 if (skb_mac_header_was_set(skb)) { in skb_mac_header_rebuild()
3114 const unsigned char *old_mac = skb_mac_header(skb); in skb_mac_header_rebuild()
3116 skb_set_mac_header(skb, -skb->mac_len); in skb_mac_header_rebuild()
3117 memmove(skb_mac_header(skb), old_mac, skb->mac_len); in skb_mac_header_rebuild()
3122 * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
3125 static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len) in skb_mac_header_rebuild_full() argument
3127 if (skb_mac_header_was_set(skb)) { in skb_mac_header_rebuild_full()
3128 const unsigned char *old_mac = skb_mac_header(skb); in skb_mac_header_rebuild_full()
3130 skb_set_mac_header(skb, -full_mac_len); in skb_mac_header_rebuild_full()
3131 memmove(skb_mac_header(skb), old_mac, full_mac_len); in skb_mac_header_rebuild_full()
3132 __skb_push(skb, full_mac_len - skb->mac_len); in skb_mac_header_rebuild_full()
3136 static inline int skb_checksum_start_offset(const struct sk_buff *skb) in skb_checksum_start_offset() argument
3138 return skb->csum_start - skb_headroom(skb); in skb_checksum_start_offset()
3141 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) in skb_checksum_start() argument
3143 return skb->head + skb->csum_start; in skb_checksum_start()
3146 static inline int skb_transport_offset(const struct sk_buff *skb) in skb_transport_offset() argument
3148 return skb_transport_header(skb) - skb->data; in skb_transport_offset()
3151 static inline u32 skb_network_header_len(const struct sk_buff *skb) in skb_network_header_len() argument
3153 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); in skb_network_header_len()
3154 return skb->transport_header - skb->network_header; in skb_network_header_len()
3157 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) in skb_inner_network_header_len() argument
3159 return skb->inner_transport_header - skb->inner_network_header; in skb_inner_network_header_len()
3162 static inline int skb_network_offset(const struct sk_buff *skb) in skb_network_offset() argument
3164 return skb_network_header(skb) - skb->data; in skb_network_offset()
3167 static inline int skb_inner_network_offset(const struct sk_buff *skb) in skb_inner_network_offset() argument
3169 return skb_inner_network_header(skb) - skb->data; in skb_inner_network_offset()
3173 pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) in pskb_network_may_pull_reason() argument
3175 return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); in pskb_network_may_pull_reason()
3178 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) in pskb_network_may_pull() argument
3180 return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; in pskb_network_may_pull()
3194 * skb_reserve(skb, NET_IP_ALIGN);
3208 * The networking layer reserves some headroom in skb data (via
3209 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
3231 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
3233 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) in __skb_set_length() argument
3235 if (WARN_ON(skb_is_nonlinear(skb))) in __skb_set_length()
3237 skb->len = len; in __skb_set_length()
3238 skb_set_tail_pointer(skb, len); in __skb_set_length()
3241 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) in __skb_trim() argument
3243 __skb_set_length(skb, len); in __skb_trim()
3246 void skb_trim(struct sk_buff *skb, unsigned int len);
3248 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) in __pskb_trim() argument
3250 if (skb->data_len) in __pskb_trim()
3251 return ___pskb_trim(skb, len); in __pskb_trim()
3252 __skb_trim(skb, len); in __pskb_trim()
3256 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) in pskb_trim() argument
3258 skb_might_realloc(skb); in pskb_trim()
3259 return (len < skb->len) ? __pskb_trim(skb, len) : 0; in pskb_trim()
3264 * @skb: buffer to alter
3268 * the skb is not cloned so we should never get an error due to out-
3271 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) in pskb_trim_unique() argument
3273 int err = pskb_trim(skb, len); in pskb_trim_unique()
3277 static inline int __skb_grow(struct sk_buff *skb, unsigned int len) in __skb_grow() argument
3279 unsigned int diff = len - skb->len; in __skb_grow()
3281 if (skb_tailroom(skb) < diff) { in __skb_grow()
3282 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), in __skb_grow()
3287 __skb_set_length(skb, len); in __skb_grow()
3293 * @skb: buffer to orphan
3296 * destructor function and make the @skb unowned. The buffer continues
3299 static inline void skb_orphan(struct sk_buff *skb) in skb_orphan() argument
3301 if (skb->destructor) { in skb_orphan()
3302 skb->destructor(skb); in skb_orphan()
3303 skb->destructor = NULL; in skb_orphan()
3304 skb->sk = NULL; in skb_orphan()
3306 BUG_ON(skb->sk); in skb_orphan()
3312 * @skb: buffer to orphan frags from
3315 * For each frag in the SKB which needs a destructor (i.e. has an
3319 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags() argument
3321 if (likely(!skb_zcopy(skb))) in skb_orphan_frags()
3323 if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) in skb_orphan_frags()
3325 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags()
3328 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
3329 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags_rx() argument
3331 if (likely(!skb_zcopy(skb))) in skb_orphan_frags_rx()
3333 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags_rx()
3348 struct sk_buff *skb; in __skb_queue_purge_reason() local
3350 while ((skb = __skb_dequeue(list)) != NULL) in __skb_queue_purge_reason()
3351 kfree_skb_reason(skb, reason); in __skb_queue_purge_reason()
3430 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); in __netdev_alloc_skb_ip_align() local
3432 if (NET_IP_ALIGN && skb) in __netdev_alloc_skb_ip_align()
3433 skb_reserve(skb, NET_IP_ALIGN); in __netdev_alloc_skb_ip_align()
3434 return skb; in __netdev_alloc_skb_ip_align()
3463 void napi_consume_skb(struct sk_buff *skb, int budget);
3465 void napi_skb_free_stolen_head(struct sk_buff *skb);
3466 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
3537 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
3539 * @skb: The skb that may need pfmemalloc set
3542 struct sk_buff *skb) in skb_propagate_pfmemalloc() argument
3545 skb->pfmemalloc = true; in skb_propagate_pfmemalloc()
3549 * skb_frag_off() - Returns the offset of a skb fragment
3558 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3559 * @frag: skb fragment
3568 * skb_frag_off_set() - Sets the offset of a skb fragment
3569 * @frag: skb fragment
3578 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3579 * @fragto: skb fragment where offset is set
3580 * @fragfrom: skb fragment offset is copied from
3673 * @fragto: skb fragment where page is set
3674 * @fragfrom: skb fragment page is copied from
3725 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, in pskb_copy() argument
3728 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); in pskb_copy()
3732 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, in pskb_copy_for_clone() argument
3735 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); in pskb_copy_for_clone()
3741 * @skb: buffer to check
3747 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) in skb_clone_writable() argument
3749 return !skb_header_cloned(skb) && in skb_clone_writable()
3750 skb_headroom(skb) + len <= skb->hdr_len; in skb_clone_writable()
3753 static inline int skb_try_make_writable(struct sk_buff *skb, in skb_try_make_writable() argument
3756 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && in skb_try_make_writable()
3757 pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_try_make_writable()
3760 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, in __skb_cow() argument
3765 if (headroom > skb_headroom(skb)) in __skb_cow()
3766 delta = headroom - skb_headroom(skb); in __skb_cow()
3769 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, in __skb_cow()
3775 * skb_cow - copy header of skb when it is required
3776 * @skb: buffer to cow
3779 * If the skb passed lacks sufficient headroom or its data part
3781 * is returned and original skb is not changed.
3783 * The result is skb with writable area skb->head...skb->tail
3786 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) in skb_cow() argument
3788 return __skb_cow(skb, headroom, skb_cloned(skb)); in skb_cow()
3793 * @skb: buffer to cow
3801 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) in skb_cow_head() argument
3803 return __skb_cow(skb, headroom, skb_header_cloned(skb)); in skb_cow_head()
3808 * @skb: buffer to pad
3814 * success. The skb is freed on error.
3816 static inline int skb_padto(struct sk_buff *skb, unsigned int len) in skb_padto() argument
3818 unsigned int size = skb->len; in skb_padto()
3821 return skb_pad(skb, len - size); in skb_padto()
3826 * @skb: buffer to pad
3833 * success. The skb is freed on error if @free_on_error is true.
3835 static inline int __must_check __skb_put_padto(struct sk_buff *skb, in __skb_put_padto() argument
3839 unsigned int size = skb->len; in __skb_put_padto()
3843 if (__skb_pad(skb, len, free_on_error)) in __skb_put_padto()
3845 __skb_put(skb, len); in __skb_put_padto()
3852 * @skb: buffer to pad
3858 * success. The skb is freed on error.
3860 static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) in skb_put_padto() argument
3862 return __skb_put_padto(skb, len, true); in skb_put_padto()
3868 static inline int skb_add_data(struct sk_buff *skb, in skb_add_data() argument
3871 const int off = skb->len; in skb_add_data()
3873 if (skb->ip_summed == CHECKSUM_NONE) { in skb_add_data()
3875 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, in skb_add_data()
3877 skb->csum = csum_block_add(skb->csum, csum, off); in skb_add_data()
3880 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) in skb_add_data()
3883 __skb_trim(skb, off); in skb_add_data()
3887 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, in skb_can_coalesce() argument
3890 if (skb_zcopy(skb)) in skb_can_coalesce()
3893 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in skb_can_coalesce()
3901 static inline int __skb_linearize(struct sk_buff *skb) in __skb_linearize() argument
3903 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; in __skb_linearize()
3907 * skb_linearize - convert paged skb to linear one
3908 * @skb: buffer to linarize
3911 * is returned and the old skb data released.
3913 static inline int skb_linearize(struct sk_buff *skb) in skb_linearize() argument
3915 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; in skb_linearize()
3920 * @skb: buffer to test
3922 * Return: true if the skb has at least one frag that might be modified
3925 static inline bool skb_has_shared_frag(const struct sk_buff *skb) in skb_has_shared_frag() argument
3927 return skb_is_nonlinear(skb) && in skb_has_shared_frag()
3928 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; in skb_has_shared_frag()
3932 * skb_linearize_cow - make sure skb is linear and writable
3933 * @skb: buffer to process
3936 * is returned and the old skb data released.
3938 static inline int skb_linearize_cow(struct sk_buff *skb) in skb_linearize_cow() argument
3940 return skb_is_nonlinear(skb) || skb_cloned(skb) ? in skb_linearize_cow()
3941 __skb_linearize(skb) : 0; in skb_linearize_cow()
3945 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, in __skb_postpull_rcsum() argument
3948 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpull_rcsum()
3949 skb->csum = csum_block_sub(skb->csum, in __skb_postpull_rcsum()
3951 else if (skb->ip_summed == CHECKSUM_PARTIAL && in __skb_postpull_rcsum()
3952 skb_checksum_start_offset(skb) < 0) in __skb_postpull_rcsum()
3953 skb->ip_summed = CHECKSUM_NONE; in __skb_postpull_rcsum()
3957 * skb_postpull_rcsum - update checksum for received skb after pull
3958 * @skb: buffer to update
3966 static inline void skb_postpull_rcsum(struct sk_buff *skb, in skb_postpull_rcsum() argument
3969 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_postpull_rcsum()
3970 skb->csum = wsum_negate(csum_partial(start, len, in skb_postpull_rcsum()
3971 wsum_negate(skb->csum))); in skb_postpull_rcsum()
3972 else if (skb->ip_summed == CHECKSUM_PARTIAL && in skb_postpull_rcsum()
3973 skb_checksum_start_offset(skb) < 0) in skb_postpull_rcsum()
3974 skb->ip_summed = CHECKSUM_NONE; in skb_postpull_rcsum()
3978 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, in __skb_postpush_rcsum() argument
3981 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpush_rcsum()
3982 skb->csum = csum_block_add(skb->csum, in __skb_postpush_rcsum()
3987 * skb_postpush_rcsum - update checksum for received skb after push
3988 * @skb: buffer to update
3995 static inline void skb_postpush_rcsum(struct sk_buff *skb, in skb_postpush_rcsum() argument
3998 __skb_postpush_rcsum(skb, start, len, 0); in skb_postpush_rcsum()
4001 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
4004 * skb_push_rcsum - push skb and update receive checksum
4005 * @skb: buffer to update
4014 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) in skb_push_rcsum() argument
4016 skb_push(skb, len); in skb_push_rcsum()
4017 skb_postpush_rcsum(skb, skb->data, len); in skb_push_rcsum()
4018 return skb->data; in skb_push_rcsum()
4021 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
4023 * pskb_trim_rcsum - trim received skb and update checksum
4024 * @skb: buffer to trim
4029 * It can change skb pointers.
4032 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum() argument
4034 skb_might_realloc(skb); in pskb_trim_rcsum()
4035 if (likely(len >= skb->len)) in pskb_trim_rcsum()
4037 return pskb_trim_rcsum_slow(skb, len); in pskb_trim_rcsum()
4040 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) in __skb_trim_rcsum() argument
4042 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_trim_rcsum()
4043 skb->ip_summed = CHECKSUM_NONE; in __skb_trim_rcsum()
4044 __skb_trim(skb, len); in __skb_trim_rcsum()
4048 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) in __skb_grow_rcsum() argument
4050 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_grow_rcsum()
4051 skb->ip_summed = CHECKSUM_NONE; in __skb_grow_rcsum()
4052 return __skb_grow(skb, len); in __skb_grow_rcsum()
4058 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) argument
4059 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) argument
4061 #define skb_queue_walk(queue, skb) \ argument
4062 for (skb = (queue)->next; \
4063 skb != (struct sk_buff *)(queue); \
4064 skb = skb->next)
4066 #define skb_queue_walk_safe(queue, skb, tmp) \ argument
4067 for (skb = (queue)->next, tmp = skb->next; \
4068 skb != (struct sk_buff *)(queue); \
4069 skb = tmp, tmp = skb->next)
4071 #define skb_queue_walk_from(queue, skb) \ argument
4072 for (; skb != (struct sk_buff *)(queue); \
4073 skb = skb->next)
4075 #define skb_rbtree_walk(skb, root) \ argument
4076 for (skb = skb_rb_first(root); skb != NULL; \
4077 skb = skb_rb_next(skb))
4079 #define skb_rbtree_walk_from(skb) \ argument
4080 for (; skb != NULL; \
4081 skb = skb_rb_next(skb))
4083 #define skb_rbtree_walk_from_safe(skb, tmp) \ argument
4084 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
4085 skb = tmp)
4087 #define skb_queue_walk_from_safe(queue, skb, tmp) \ argument
4088 for (tmp = skb->next; \
4089 skb != (struct sk_buff *)(queue); \
4090 skb = tmp, tmp = skb->next)
4092 #define skb_queue_reverse_walk(queue, skb) \ argument
4093 for (skb = (queue)->prev; \
4094 skb != (struct sk_buff *)(queue); \
4095 skb = skb->prev)
4097 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ argument
4098 for (skb = (queue)->prev, tmp = skb->prev; \
4099 skb != (struct sk_buff *)(queue); \
4100 skb = tmp, tmp = skb->prev)
4102 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ argument
4103 for (tmp = skb->prev; \
4104 skb != (struct sk_buff *)(queue); \
4105 skb = tmp, tmp = skb->prev)
4107 static inline bool skb_has_frag_list(const struct sk_buff *skb) in skb_has_frag_list() argument
4109 return skb_shinfo(skb)->frag_list != NULL; in skb_has_frag_list()
4112 static inline void skb_frag_list_init(struct sk_buff *skb) in skb_frag_list_init() argument
4114 skb_shinfo(skb)->frag_list = NULL; in skb_frag_list_init()
4117 #define skb_walk_frags(skb, iter) \ argument
4118 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
4123 const struct sk_buff *skb);
4146 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
4148 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
4151 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
4153 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
4154 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
4155 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
4156 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
4157 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
4158 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
4160 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
4163 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
4165 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
4166 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
4170 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
4171 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
4172 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
4173 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
4174 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
4176 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
4177 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
4178 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
4179 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
4180 int skb_vlan_pop(struct sk_buff *skb);
4181 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
4182 int skb_eth_pop(struct sk_buff *skb);
4183 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
4185 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
4187 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
4189 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
4190 int skb_mpls_dec_ttl(struct sk_buff *skb);
4191 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
4211 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
4213 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
4217 __skb_header_pointer(const struct sk_buff *skb, int offset, int len, in __skb_header_pointer() argument
4223 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) in __skb_header_pointer()
4230 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) in skb_header_pointer() argument
4232 return __skb_header_pointer(skb, offset, len, skb->data, in skb_header_pointer()
4233 skb_headlen(skb), buffer); in skb_header_pointer()
4237 skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) in skb_pointer_if_linear() argument
4239 if (likely(skb_headlen(skb) - offset >= len)) in skb_pointer_if_linear()
4240 return skb->data + offset; in skb_pointer_if_linear()
4245 * skb_needs_linearize - check if we need to linearize a given skb
4247 * @skb: socket buffer to check
4251 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
4252 * 2. skb is fragmented and the device does not support SG.
4254 static inline bool skb_needs_linearize(struct sk_buff *skb, in skb_needs_linearize() argument
4257 return skb_is_nonlinear(skb) && in skb_needs_linearize()
4258 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || in skb_needs_linearize()
4259 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); in skb_needs_linearize()
4262 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, in skb_copy_from_linear_data() argument
4266 memcpy(to, skb->data, len); in skb_copy_from_linear_data()
4269 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, in skb_copy_from_linear_data_offset() argument
4273 memcpy(to, skb->data + offset, len); in skb_copy_from_linear_data_offset()
4276 static inline void skb_copy_to_linear_data(struct sk_buff *skb, in skb_copy_to_linear_data() argument
4280 memcpy(skb->data, from, len); in skb_copy_to_linear_data()
4283 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, in skb_copy_to_linear_data_offset() argument
4288 memcpy(skb->data + offset, from, len); in skb_copy_to_linear_data_offset()
4293 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) in skb_get_ktime() argument
4295 return skb->tstamp; in skb_get_ktime()
4299 * skb_get_timestamp - get timestamp from a skb
4300 * @skb: skb to get stamp from
4303 * Timestamps are stored in the skb as offsets to a base timestamp.
4307 static inline void skb_get_timestamp(const struct sk_buff *skb, in skb_get_timestamp() argument
4310 *stamp = ns_to_kernel_old_timeval(skb->tstamp); in skb_get_timestamp()
4313 static inline void skb_get_new_timestamp(const struct sk_buff *skb, in skb_get_new_timestamp() argument
4316 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestamp()
4322 static inline void skb_get_timestampns(const struct sk_buff *skb, in skb_get_timestampns() argument
4325 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_timestampns()
4331 static inline void skb_get_new_timestampns(const struct sk_buff *skb, in skb_get_new_timestampns() argument
4334 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestampns()
4340 static inline void __net_timestamp(struct sk_buff *skb) in __net_timestamp() argument
4342 skb->tstamp = ktime_get_real(); in __net_timestamp()
4343 skb->tstamp_type = SKB_CLOCK_REALTIME; in __net_timestamp()
4351 static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, in skb_set_delivery_time() argument
4354 skb->tstamp = kt; in skb_set_delivery_time()
4357 skb->tstamp_type = tstamp_type; in skb_set_delivery_time()
4359 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_set_delivery_time()
4362 static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, in skb_set_delivery_type_by_clockid() argument
4381 skb_set_delivery_time(skb, kt, tstamp_type); in skb_set_delivery_type_by_clockid()
4387 * If needed, set the skb->tstamp to the (rcv) timestamp.
4389 static inline void skb_clear_delivery_time(struct sk_buff *skb) in skb_clear_delivery_time() argument
4391 if (skb->tstamp_type) { in skb_clear_delivery_time()
4392 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_clear_delivery_time()
4394 skb->tstamp = ktime_get_real(); in skb_clear_delivery_time()
4396 skb->tstamp = 0; in skb_clear_delivery_time()
4400 static inline void skb_clear_tstamp(struct sk_buff *skb) in skb_clear_tstamp() argument
4402 if (skb->tstamp_type) in skb_clear_tstamp()
4405 skb->tstamp = 0; in skb_clear_tstamp()
4408 static inline ktime_t skb_tstamp(const struct sk_buff *skb) in skb_tstamp() argument
4410 if (skb->tstamp_type) in skb_tstamp()
4413 return skb->tstamp; in skb_tstamp()
4416 static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) in skb_tstamp_cond() argument
4418 if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) in skb_tstamp_cond()
4419 return skb->tstamp; in skb_tstamp_cond()
4427 static inline u8 skb_metadata_len(const struct sk_buff *skb) in skb_metadata_len() argument
4429 return skb_shinfo(skb)->meta_len; in skb_metadata_len()
4432 static inline void *skb_metadata_end(const struct sk_buff *skb) in skb_metadata_end() argument
4434 return skb_mac_header(skb); in skb_metadata_end()
4489 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) in skb_metadata_set() argument
4491 skb_shinfo(skb)->meta_len = meta_len; in skb_metadata_set()
4494 static inline void skb_metadata_clear(struct sk_buff *skb) in skb_metadata_clear() argument
4496 skb_metadata_set(skb, 0); in skb_metadata_clear()
4499 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
4503 void skb_clone_tx_timestamp(struct sk_buff *skb);
4504 bool skb_defer_rx_timestamp(struct sk_buff *skb);
4508 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) in skb_clone_tx_timestamp() argument
4512 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) in skb_defer_rx_timestamp() argument
4520 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
4524 * must call this function to return the skb back to the stack with a
4527 * @skb: clone of the original outgoing packet
4531 void skb_complete_tx_timestamp(struct sk_buff *skb,
4539 * skb_tstamp_tx - queue clone of skb with send time stamps
4543 * If the skb has a socket associated, then this function clones the
4544 * skb (thus sharing the actual data and optional structures), stores
4562 * @skb: A socket buffer.
4564 static inline void skb_tx_timestamp(struct sk_buff *skb) in skb_tx_timestamp() argument
4566 skb_clone_tx_timestamp(skb); in skb_tx_timestamp()
4567 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) in skb_tx_timestamp()
4568 skb_tstamp_tx(skb, NULL); in skb_tx_timestamp()
4572 * skb_complete_wifi_ack - deliver skb with wifi status
4574 * @skb: the original outgoing packet
4578 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
4580 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
4581 __sum16 __skb_checksum_complete(struct sk_buff *skb);
4583 static inline int skb_csum_unnecessary(const struct sk_buff *skb) in skb_csum_unnecessary() argument
4585 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || in skb_csum_unnecessary()
4586 skb->csum_valid || in skb_csum_unnecessary()
4587 (skb->ip_summed == CHECKSUM_PARTIAL && in skb_csum_unnecessary()
4588 skb_checksum_start_offset(skb) >= 0)); in skb_csum_unnecessary()
4593 * @skb: packet to process
4596 * the value of skb->csum. The latter can be used to supply the
4604 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
4607 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) in skb_checksum_complete() argument
4609 return skb_csum_unnecessary(skb) ? in skb_checksum_complete()
4610 0 : __skb_checksum_complete(skb); in skb_checksum_complete()
4613 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) in __skb_decr_checksum_unnecessary() argument
4615 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_decr_checksum_unnecessary()
4616 if (skb->csum_level == 0) in __skb_decr_checksum_unnecessary()
4617 skb->ip_summed = CHECKSUM_NONE; in __skb_decr_checksum_unnecessary()
4619 skb->csum_level--; in __skb_decr_checksum_unnecessary()
4623 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) in __skb_incr_checksum_unnecessary() argument
4625 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_incr_checksum_unnecessary()
4626 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) in __skb_incr_checksum_unnecessary()
4627 skb->csum_level++; in __skb_incr_checksum_unnecessary()
4628 } else if (skb->ip_summed == CHECKSUM_NONE) { in __skb_incr_checksum_unnecessary()
4629 skb->ip_summed = CHECKSUM_UNNECESSARY; in __skb_incr_checksum_unnecessary()
4630 skb->csum_level = 0; in __skb_incr_checksum_unnecessary()
4634 static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) in __skb_reset_checksum_unnecessary() argument
4636 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_reset_checksum_unnecessary()
4637 skb->ip_summed = CHECKSUM_NONE; in __skb_reset_checksum_unnecessary()
4638 skb->csum_level = 0; in __skb_reset_checksum_unnecessary()
4647 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, in __skb_checksum_validate_needed() argument
4651 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { in __skb_checksum_validate_needed()
4652 skb->csum_valid = 1; in __skb_checksum_validate_needed()
4653 __skb_decr_checksum_unnecessary(skb); in __skb_checksum_validate_needed()
4671 static inline void skb_checksum_complete_unset(struct sk_buff *skb) in skb_checksum_complete_unset() argument
4673 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_complete_unset()
4674 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_complete_unset()
4682 * checksum is stored in skb->csum for use in __skb_checksum_complete
4686 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, in __skb_checksum_validate_complete() argument
4690 if (skb->ip_summed == CHECKSUM_COMPLETE) { in __skb_checksum_validate_complete()
4691 if (!csum_fold(csum_add(psum, skb->csum))) { in __skb_checksum_validate_complete()
4692 skb->csum_valid = 1; in __skb_checksum_validate_complete()
4697 skb->csum = psum; in __skb_checksum_validate_complete()
4699 if (complete || skb->len <= CHECKSUM_BREAK) { in __skb_checksum_validate_complete()
4702 csum = __skb_checksum_complete(skb); in __skb_checksum_validate_complete()
4703 skb->csum_valid = !csum; in __skb_checksum_validate_complete()
4710 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) in null_compute_pseudo() argument
4725 #define __skb_checksum_validate(skb, proto, complete, \ argument
4729 skb->csum_valid = 0; \
4730 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4731 __ret = __skb_checksum_validate_complete(skb, \
4732 complete, compute_pseudo(skb, proto)); \
4736 #define skb_checksum_init(skb, proto, compute_pseudo) \ argument
4737 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4739 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ argument
4740 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4742 #define skb_checksum_validate(skb, proto, compute_pseudo) \ argument
4743 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4745 #define skb_checksum_validate_zero_check(skb, proto, check, \ argument
4747 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4749 #define skb_checksum_simple_validate(skb) \ argument
4750 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4752 static inline bool __skb_checksum_convert_check(struct sk_buff *skb) in __skb_checksum_convert_check() argument
4754 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); in __skb_checksum_convert_check()
4757 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) in __skb_checksum_convert() argument
4759 skb->csum = ~pseudo; in __skb_checksum_convert()
4760 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_convert()
4763 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \ argument
4765 if (__skb_checksum_convert_check(skb)) \
4766 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4769 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, in skb_remcsum_adjust_partial() argument
4772 skb->ip_summed = CHECKSUM_PARTIAL; in skb_remcsum_adjust_partial()
4773 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; in skb_remcsum_adjust_partial()
4774 skb->csum_offset = offset - start; in skb_remcsum_adjust_partial()
4778 * When called, ptr indicates the starting point for skb->csum when
4780 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4782 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, in skb_remcsum_process() argument
4788 skb_remcsum_adjust_partial(skb, ptr, start, offset); in skb_remcsum_process()
4792 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { in skb_remcsum_process()
4793 __skb_checksum_complete(skb); in skb_remcsum_process()
4794 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); in skb_remcsum_process()
4797 delta = remcsum_adjust(ptr, skb->csum, start, offset); in skb_remcsum_process()
4799 /* Adjust skb->csum since we changed the packet */ in skb_remcsum_process()
4800 skb->csum = csum_add(skb->csum, delta); in skb_remcsum_process()
4803 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) in skb_nfct() argument
4806 return (void *)(skb->_nfct & NFCT_PTRMASK); in skb_nfct()
4812 static inline unsigned long skb_get_nfct(const struct sk_buff *skb) in skb_get_nfct() argument
4815 return skb->_nfct; in skb_get_nfct()
4821 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) in skb_set_nfct() argument
4824 skb->slow_gro |= !!nfct; in skb_set_nfct()
4825 skb->_nfct = nfct; in skb_set_nfct()
4867 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4869 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4870 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4873 static inline void skb_ext_put(struct sk_buff *skb) in skb_ext_put() argument
4875 if (skb->active_extensions) in skb_ext_put()
4876 __skb_ext_put(skb->extensions); in skb_ext_put()
4903 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) in skb_ext_exist() argument
4905 return skb->active_extensions & (1 << id); in skb_ext_exist()
4908 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_del() argument
4910 if (skb_ext_exist(skb, id)) in skb_ext_del()
4911 __skb_ext_del(skb, id); in skb_ext_del()
4914 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) in skb_ext_find() argument
4916 if (skb_ext_exist(skb, id)) { in skb_ext_find()
4917 struct skb_ext *ext = skb->extensions; in skb_ext_find()
4925 static inline void skb_ext_reset(struct sk_buff *skb) in skb_ext_reset() argument
4927 if (unlikely(skb->active_extensions)) { in skb_ext_reset()
4928 __skb_ext_put(skb->extensions); in skb_ext_reset()
4929 skb->active_extensions = 0; in skb_ext_reset()
4933 static inline bool skb_has_extensions(struct sk_buff *skb) in skb_has_extensions() argument
4935 return unlikely(skb->active_extensions); in skb_has_extensions()
4938 static inline void skb_ext_put(struct sk_buff *skb) {} in skb_ext_put() argument
4939 static inline void skb_ext_reset(struct sk_buff *skb) {} in skb_ext_reset() argument
4940 static inline void skb_ext_del(struct sk_buff *skb, int unused) {} in skb_ext_del() argument
4943 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } in skb_has_extensions() argument
4946 static inline void nf_reset_ct(struct sk_buff *skb) in nf_reset_ct() argument
4949 nf_conntrack_put(skb_nfct(skb)); in nf_reset_ct()
4950 skb->_nfct = 0; in nf_reset_ct()
4954 static inline void nf_reset_trace(struct sk_buff *skb) in nf_reset_trace() argument
4957 skb->nf_trace = 0; in nf_reset_trace()
4961 static inline void ipvs_reset(struct sk_buff *skb) in ipvs_reset() argument
4964 skb->ipvs_property = 0; in ipvs_reset()
4997 static inline void skb_init_secmark(struct sk_buff *skb) in skb_init_secmark() argument
4999 skb->secmark = 0; in skb_init_secmark()
5005 static inline void skb_init_secmark(struct sk_buff *skb) in skb_init_secmark() argument
5009 static inline int secpath_exists(const struct sk_buff *skb) in secpath_exists() argument
5012 return skb_ext_exist(skb, SKB_EXT_SEC_PATH); in secpath_exists()
5018 static inline bool skb_irq_freeable(const struct sk_buff *skb) in skb_irq_freeable() argument
5020 return !skb->destructor && in skb_irq_freeable()
5021 !secpath_exists(skb) && in skb_irq_freeable()
5022 !skb_nfct(skb) && in skb_irq_freeable()
5023 !skb->_skb_refdst && in skb_irq_freeable()
5024 !skb_has_frag_list(skb); in skb_irq_freeable()
5027 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) in skb_set_queue_mapping() argument
5029 skb->queue_mapping = queue_mapping; in skb_set_queue_mapping()
5032 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) in skb_get_queue_mapping() argument
5034 return skb->queue_mapping; in skb_get_queue_mapping()
5042 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) in skb_record_rx_queue() argument
5044 skb->queue_mapping = rx_queue + 1; in skb_record_rx_queue()
5047 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) in skb_get_rx_queue() argument
5049 return skb->queue_mapping - 1; in skb_get_rx_queue()
5052 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) in skb_rx_queue_recorded() argument
5054 return skb->queue_mapping != 0; in skb_rx_queue_recorded()
5057 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) in skb_set_dst_pending_confirm() argument
5059 skb->dst_pending_confirm = val; in skb_set_dst_pending_confirm()
5062 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) in skb_get_dst_pending_confirm() argument
5064 return skb->dst_pending_confirm != 0; in skb_get_dst_pending_confirm()
5067 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) in skb_sec_path() argument
5070 return skb_ext_find(skb, SKB_EXT_SEC_PATH); in skb_sec_path()
5076 static inline bool skb_is_gso(const struct sk_buff *skb) in skb_is_gso() argument
5078 return skb_shinfo(skb)->gso_size; in skb_is_gso()
5081 /* Note: Should be called only if skb_is_gso(skb) is true */
5082 static inline bool skb_is_gso_v6(const struct sk_buff *skb) in skb_is_gso_v6() argument
5084 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; in skb_is_gso_v6()
5087 /* Note: Should be called only if skb_is_gso(skb) is true */
5088 static inline bool skb_is_gso_sctp(const struct sk_buff *skb) in skb_is_gso_sctp() argument
5090 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; in skb_is_gso_sctp()
5093 /* Note: Should be called only if skb_is_gso(skb) is true */
5094 static inline bool skb_is_gso_tcp(const struct sk_buff *skb) in skb_is_gso_tcp() argument
5096 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); in skb_is_gso_tcp()
5099 static inline void skb_gso_reset(struct sk_buff *skb) in skb_gso_reset() argument
5101 skb_shinfo(skb)->gso_size = 0; in skb_gso_reset()
5102 skb_shinfo(skb)->gso_segs = 0; in skb_gso_reset()
5103 skb_shinfo(skb)->gso_type = 0; in skb_gso_reset()
5122 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
5124 static inline bool skb_warn_if_lro(const struct sk_buff *skb) in skb_warn_if_lro() argument
5128 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro()
5130 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
5132 __skb_warn_lro_forwarding(skb); in skb_warn_if_lro()
5138 static inline void skb_forward_csum(struct sk_buff *skb) in skb_forward_csum() argument
5141 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_forward_csum()
5142 skb->ip_summed = CHECKSUM_NONE; in skb_forward_csum()
5146 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
5147 * @skb: skb to check
5153 static inline void skb_checksum_none_assert(const struct sk_buff *skb) in skb_checksum_none_assert() argument
5155 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); in skb_checksum_none_assert()
5158 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
5160 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
5161 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5163 __sum16(*skb_chkf)(struct sk_buff *skb));
5166 * skb_head_is_locked - Determine if the skb->head is locked down
5167 * @skb: skb to check
5170 * not cloned. This function returns true if the skb head is locked down
5174 static inline bool skb_head_is_locked(const struct sk_buff *skb) in skb_head_is_locked() argument
5176 return !skb->head_frag || skb_cloned(skb); in skb_head_is_locked()
5188 static inline __wsum lco_csum(struct sk_buff *skb) in lco_csum() argument
5190 unsigned char *csum_start = skb_checksum_start(skb); in lco_csum()
5191 unsigned char *l4_hdr = skb_transport_header(skb); in lco_csum()
5196 skb->csum_offset)); in lco_csum()
5204 static inline bool skb_is_redirected(const struct sk_buff *skb) in skb_is_redirected() argument
5206 return skb->redirected; in skb_is_redirected()
5209 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) in skb_set_redirected() argument
5211 skb->redirected = 1; in skb_set_redirected()
5213 skb->from_ingress = from_ingress; in skb_set_redirected()
5214 if (skb->from_ingress) in skb_set_redirected()
5215 skb_clear_tstamp(skb); in skb_set_redirected()
5219 static inline void skb_reset_redirect(struct sk_buff *skb) in skb_reset_redirect() argument
5221 skb->redirected = 0; in skb_reset_redirect()
5224 static inline void skb_set_redirected_noclear(struct sk_buff *skb, in skb_set_redirected_noclear() argument
5227 skb->redirected = 1; in skb_set_redirected_noclear()
5229 skb->from_ingress = from_ingress; in skb_set_redirected_noclear()
5233 static inline bool skb_csum_is_sctp(struct sk_buff *skb) in skb_csum_is_sctp() argument
5236 return skb->csum_not_inet; in skb_csum_is_sctp()
5242 static inline void skb_reset_csum_not_inet(struct sk_buff *skb) in skb_reset_csum_not_inet() argument
5244 skb->ip_summed = CHECKSUM_NONE; in skb_reset_csum_not_inet()
5246 skb->csum_not_inet = 0; in skb_reset_csum_not_inet()
5250 static inline void skb_set_kcov_handle(struct sk_buff *skb, in skb_set_kcov_handle() argument
5254 skb->kcov_handle = kcov_handle; in skb_set_kcov_handle()
5258 static inline u64 skb_get_kcov_handle(struct sk_buff *skb) in skb_get_kcov_handle() argument
5261 return skb->kcov_handle; in skb_get_kcov_handle()
5267 static inline void skb_mark_for_recycle(struct sk_buff *skb) in skb_mark_for_recycle() argument
5270 skb->pp_recycle = 1; in skb_mark_for_recycle()
5274 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,