Lines Matching full:skb

19  *		Ray VanTassle	:	Fixed --skb->lock in free
84 #include <trace/events/skb.h>
192 * @skb: buffer
202 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
206 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
207 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
208 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
212 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
214 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
217 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
219 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
277 struct sk_buff *skb; in napi_skb_cache_get() local
291 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
293 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); in napi_skb_cache_get()
295 return skb; in napi_skb_cache_get()
298 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, in __finalize_skb_around() argument
305 /* Assumes caller memset cleared SKB */ in __finalize_skb_around()
306 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
307 refcount_set(&skb->users, 1); in __finalize_skb_around()
308 skb->head = data; in __finalize_skb_around()
309 skb->data = data; in __finalize_skb_around()
310 skb_reset_tail_pointer(skb); in __finalize_skb_around()
311 skb_set_end_offset(skb, size); in __finalize_skb_around()
312 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
313 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
314 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
316 shinfo = skb_shinfo(skb); in __finalize_skb_around()
320 skb_set_kcov_handle(skb, kcov_common_handle()); in __finalize_skb_around()
323 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, in __slab_build_skb() argument
348 struct sk_buff *skb; in slab_build_skb() local
351 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb()
353 if (unlikely(!skb)) in slab_build_skb()
356 memset(skb, 0, offsetof(struct sk_buff, tail)); in slab_build_skb()
357 data = __slab_build_skb(skb, data, &size); in slab_build_skb()
358 __finalize_skb_around(skb, data, size); in slab_build_skb()
360 return skb; in slab_build_skb()
364 /* Caller must provide SKB that is memset cleared */
365 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
374 data = __slab_build_skb(skb, data, &size); in __build_skb_around()
376 __finalize_skb_around(skb, data, size); in __build_skb_around()
389 * The return is the new skb buffer.
401 struct sk_buff *skb; in __build_skb() local
403 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb()
405 if (unlikely(!skb)) in __build_skb()
408 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
409 __build_skb_around(skb, data, frag_size); in __build_skb()
411 return skb; in __build_skb()
415 * takes care of skb->head and skb->pfmemalloc
419 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
421 if (likely(skb && frag_size)) { in build_skb()
422 skb->head_frag = 1; in build_skb()
423 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb()
425 return skb; in build_skb()
430 * build_skb_around - build a network buffer around provided skb
431 * @skb: sk_buff provide by caller, must be memset cleared
435 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
438 if (unlikely(!skb)) in build_skb_around()
441 __build_skb_around(skb, data, frag_size); in build_skb_around()
444 skb->head_frag = 1; in build_skb_around()
445 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb_around()
447 return skb; in build_skb_around()
463 struct sk_buff *skb; in __napi_build_skb() local
465 skb = napi_skb_cache_get(); in __napi_build_skb()
466 if (unlikely(!skb)) in __napi_build_skb()
469 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
470 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
472 return skb; in __napi_build_skb()
480 * Version of __napi_build_skb() that takes care of skb->head_frag
481 * and skb->pfmemalloc when the data is a page or page fragment.
487 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
489 if (likely(skb) && frag_size) { in napi_build_skb()
490 skb->head_frag = 1; in napi_build_skb()
491 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
494 return skb; in napi_build_skb()
565 * instead of head cache and allocate a cloned (child) skb.
581 struct sk_buff *skb; in __alloc_skb() local
594 skb = napi_skb_cache_get(); in __alloc_skb()
596 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
597 if (unlikely(!skb)) in __alloc_skb()
599 prefetchw(skb); in __alloc_skb()
604 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
620 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
621 __build_skb_around(skb, data, size); in __alloc_skb()
622 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
627 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
629 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
633 return skb; in __alloc_skb()
636 kmem_cache_free(cache, skb); in __alloc_skb()
658 struct sk_buff *skb; in __netdev_alloc_skb() local
665 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
670 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
671 if (!skb) in __netdev_alloc_skb()
700 skb = __build_skb(data, len); in __netdev_alloc_skb()
701 if (unlikely(!skb)) { in __netdev_alloc_skb()
707 skb->pfmemalloc = 1; in __netdev_alloc_skb()
708 skb->head_frag = 1; in __netdev_alloc_skb()
711 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
712 skb->dev = dev; in __netdev_alloc_skb()
715 return skb; in __netdev_alloc_skb()
735 struct sk_buff *skb; in napi_alloc_skb() local
743 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
748 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in napi_alloc_skb()
750 if (!skb) in napi_alloc_skb()
770 skb = __napi_build_skb(data, len); in napi_alloc_skb()
771 if (unlikely(!skb)) { in napi_alloc_skb()
777 skb->pfmemalloc = 1; in napi_alloc_skb()
778 skb->head_frag = 1; in napi_alloc_skb()
781 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in napi_alloc_skb()
782 skb->dev = napi->dev; in napi_alloc_skb()
785 return skb; in napi_alloc_skb()
789 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, in skb_add_rx_frag_netmem() argument
794 skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_add_rx_frag_netmem()
795 skb->len += size; in skb_add_rx_frag_netmem()
796 skb->data_len += size; in skb_add_rx_frag_netmem()
797 skb->truesize += truesize; in skb_add_rx_frag_netmem()
801 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
804 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
809 skb->len += size; in skb_coalesce_rx_frag()
810 skb->data_len += size; in skb_coalesce_rx_frag()
811 skb->truesize += truesize; in skb_coalesce_rx_frag()
821 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
823 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
826 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
830 skb_walk_frags(skb, list) in skb_clone_fraglist()
844 struct sk_buff *skb = *pskb, *nskb; in skb_pp_cow_data() local
849 * the skb. in skb_pp_cow_data()
851 if (skb_has_frag_list(skb)) in skb_pp_cow_data()
855 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
858 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
871 skb_copy_header(nskb, skb); in skb_pp_cow_data()
874 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
881 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
885 len = skb->len - off; in skb_pp_cow_data()
886 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
900 err = skb_copy_bits(skb, off, page_address(page) + page_off, in skb_pp_cow_data()
911 consume_skb(skb); in skb_pp_cow_data()
953 static bool skb_pp_recycle(struct sk_buff *skb, void *data) in skb_pp_recycle() argument
955 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
961 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
962 * @skb: page pool aware skb
964 * Increase the fragment reference count (pp_ref_count) of a skb. This is
966 * i.e. when skb->pp_recycle is true, and not for fragments in a
967 * non-pp-recycling skb. It has a fallback to increase references on normal
970 static int skb_pp_frag_ref(struct sk_buff *skb) in skb_pp_frag_ref() argument
976 if (!skb->pp_recycle) in skb_pp_frag_ref()
979 shinfo = skb_shinfo(skb); in skb_pp_frag_ref()
999 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
1001 unsigned char *head = skb->head; in skb_free_head()
1003 if (skb->head_frag) { in skb_free_head()
1004 if (skb_pp_recycle(skb, head)) in skb_free_head()
1008 skb_kfree_head(head, skb_end_offset(skb)); in skb_free_head()
1012 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_data() argument
1014 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1017 if (!skb_data_unref(skb, shinfo)) in skb_release_data()
1020 if (skb_zcopy(skb)) { in skb_release_data()
1023 skb_zcopy_clear(skb, true); in skb_release_data()
1029 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1035 skb_free_head(skb); in skb_release_data()
1037 /* When we clone an SKB we copy the reycling bit. The pp_recycle in skb_release_data()
1040 * to make one SKB responsible for triggering the recycle path. in skb_release_data()
1041 * So disable the recycling bit if an SKB is cloned and we have in skb_release_data()
1042 * additional references to the fragmented part of the SKB. in skb_release_data()
1043 * Eventually the last SKB will have the recycling bit set and it's in skb_release_data()
1046 skb->pp_recycle = 0; in skb_release_data()
1052 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
1056 switch (skb->fclone) { in kfree_skbmem()
1058 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem()
1062 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
1064 /* We usually free the clone (TX completion) before original skb in kfree_skbmem()
1073 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
1082 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
1084 skb_dst_drop(skb); in skb_release_head_state()
1085 if (skb->destructor) { in skb_release_head_state()
1087 skb->destructor(skb); in skb_release_head_state()
1090 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
1092 skb_ext_put(skb); in skb_release_head_state()
1096 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_all() argument
1098 skb_release_head_state(skb); in skb_release_all()
1099 if (likely(skb->head)) in skb_release_all()
1100 skb_release_data(skb, reason); in skb_release_all()
1105 * @skb: buffer
1112 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
1114 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); in __kfree_skb()
1115 kfree_skbmem(skb); in __kfree_skb()
1120 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, in __sk_skb_reason_drop() argument
1123 if (unlikely(!skb_unref(skb))) in __sk_skb_reason_drop()
1132 trace_consume_skb(skb, __builtin_return_address(0)); in __sk_skb_reason_drop()
1134 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); in __sk_skb_reason_drop()
1140 * @sk: the socket to receive @skb, or NULL if not applicable
1141 * @skb: buffer to free
1142 * @reason: reason why this skb is dropped
1149 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) in sk_skb_reason_drop() argument
1151 if (__sk_skb_reason_drop(sk, skb, reason)) in sk_skb_reason_drop()
1152 __kfree_skb(skb); in sk_skb_reason_drop()
1163 static void kfree_skb_add_bulk(struct sk_buff *skb, in kfree_skb_add_bulk() argument
1167 /* if SKB is a clone, don't handle this case */ in kfree_skb_add_bulk()
1168 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1169 __kfree_skb(skb); in kfree_skb_add_bulk()
1173 skb_release_all(skb, reason); in kfree_skb_add_bulk()
1174 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1206 /* Dump skb information and contents.
1212 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
1214 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1215 struct net_device *dev = skb->dev; in skb_dump()
1216 struct sock *sk = skb->sk; in skb_dump()
1223 len = skb->len; in skb_dump()
1225 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1227 headroom = skb_headroom(skb); in skb_dump()
1228 tailroom = skb_tailroom(skb); in skb_dump()
1230 has_mac = skb_mac_header_was_set(skb); in skb_dump()
1231 has_trans = skb_transport_header_was_set(skb); in skb_dump()
1240 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1241 has_mac ? skb->mac_header : -1, in skb_dump()
1242 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1243 skb->mac_len, in skb_dump()
1244 skb->network_header, in skb_dump()
1245 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1246 has_trans ? skb->transport_header : -1, in skb_dump()
1249 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1250 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1251 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1252 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1253 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1254 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1255 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1265 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1266 16, 1, skb->head, headroom, false); in skb_dump()
1268 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
1270 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, in skb_dump()
1271 16, 1, skb->data, seg_len, false); in skb_dump()
1275 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1276 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
1278 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1279 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1297 print_hex_dump(level, "skb frag: ", in skb_dump()
1307 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
1308 printk("skb fraglist:\n"); in skb_dump()
1309 skb_walk_frags(skb, list_skb) in skb_dump()
1317 * @skb: buffer that triggered an error
1319 * Report xmit error if a device callback is tracking this skb.
1320 * skb must be freed afterwards.
1322 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
1324 if (skb) { in skb_tx_error()
1325 skb_zcopy_downgrade_managed(skb); in skb_tx_error()
1326 skb_zcopy_clear(skb, true); in skb_tx_error()
1334 * @skb: buffer to free
1340 void consume_skb(struct sk_buff *skb) in consume_skb() argument
1342 if (!skb_unref(skb)) in consume_skb()
1345 trace_consume_skb(skb, __builtin_return_address(0)); in consume_skb()
1346 __kfree_skb(skb); in consume_skb()
1353 * @skb: buffer to free
1356 * skb reference and all the head states have been already dropped
1358 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
1360 trace_consume_skb(skb, __builtin_return_address(0)); in __consume_stateless_skb()
1361 skb_release_data(skb, SKB_CONSUMED); in __consume_stateless_skb()
1362 kfree_skbmem(skb); in __consume_stateless_skb()
1365 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
1370 if (!kasan_mempool_poison_object(skb)) in napi_skb_cache_put()
1374 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1388 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) in __napi_kfree_skb() argument
1390 skb_release_all(skb, reason); in __napi_kfree_skb()
1391 napi_skb_cache_put(skb); in __napi_kfree_skb()
1394 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
1396 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1397 nf_reset_ct(skb); in napi_skb_free_stolen_head()
1398 skb_dst_drop(skb); in napi_skb_free_stolen_head()
1399 skb_ext_put(skb); in napi_skb_free_stolen_head()
1400 skb_orphan(skb); in napi_skb_free_stolen_head()
1401 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1403 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
1406 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
1410 dev_consume_skb_any(skb); in napi_consume_skb()
1416 if (!skb_unref(skb)) in napi_consume_skb()
1419 /* if reaching here SKB is ready to free */ in napi_consume_skb()
1420 trace_consume_skb(skb, __builtin_return_address(0)); in napi_consume_skb()
1422 /* if SKB is a clone, don't handle this case */ in napi_consume_skb()
1423 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1424 __kfree_skb(skb); in napi_consume_skb()
1428 skb_release_all(skb, SKB_CONSUMED); in napi_consume_skb()
1429 napi_skb_cache_put(skb); in napi_consume_skb()
1489 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1491 #define C(x) n->x = skb->x in __skb_clone()
1495 __copy_skb_header(n, skb); in __skb_clone()
1500 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1515 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1516 skb->cloned = 1; in __skb_clone()
1548 * skb_morph - morph one skb into another
1549 * @dst: the skb to receive the contents
1550 * @src: the skb to supply the contents
1552 * This is identical to skb_clone except that the target skb is
1555 * The target skb is returned upon exit.
1610 struct sk_buff *skb; in msg_zerocopy_alloc() local
1614 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1615 if (!skb) in msg_zerocopy_alloc()
1618 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1619 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1623 kfree_skb(skb); in msg_zerocopy_alloc()
1667 /* TCP can create new skb to attach new uarg */ in msg_zerocopy_realloc()
1694 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1696 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1716 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1718 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1738 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1752 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1753 skb = NULL; in __msg_zerocopy_callback()
1760 consume_skb(skb); in __msg_zerocopy_callback()
1764 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_complete() argument
1792 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1796 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1799 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1803 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1805 /* An skb can only point to one uarg. This edge case happens in skb_zerocopy_iter_stream()
1806 * when TCP appends to an skb, but zerocopy_realloc triggered in skb_zerocopy_iter_stream()
1813 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1814 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1815 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1817 /* Streams do not free skb on error. Reset to prev state. */ in skb_zerocopy_iter_stream()
1818 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1819 skb->sk = sk; in skb_zerocopy_iter_stream()
1820 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1821 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1825 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1826 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1830 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) in __skb_zcopy_downgrade_managed() argument
1834 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1836 skb_frag_ref(skb, i); in __skb_zcopy_downgrade_managed()
1861 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1862 * @skb: the skb to modify
1865 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1875 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1877 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1882 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1885 if (!skb_frags_readable(skb)) in skb_copy_ubufs()
1895 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs()
1899 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1917 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1942 /* skb frags release userspace buffers */ in skb_copy_ubufs()
1944 skb_frag_unref(skb, i); in skb_copy_ubufs()
1946 /* skb frags point to kernel buffers */ in skb_copy_ubufs()
1948 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); in skb_copy_ubufs()
1951 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
1953 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1956 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1963 * @skb: buffer to clone
1975 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1977 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1982 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1985 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1991 if (skb_pfmemalloc(skb)) in skb_clone()
2001 return __skb_clone(n, skb); in skb_clone()
2005 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
2008 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2009 skb->csum_start += off; in skb_headers_offset_update()
2010 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2011 skb->transport_header += off; in skb_headers_offset_update()
2012 skb->network_header += off; in skb_headers_offset_update()
2013 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
2014 skb->mac_header += off; in skb_headers_offset_update()
2015 skb->inner_transport_header += off; in skb_headers_offset_update()
2016 skb->inner_network_header += off; in skb_headers_offset_update()
2017 skb->inner_mac_header += off; in skb_headers_offset_update()
2031 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
2033 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
2040 * @skb: buffer to copy
2055 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
2061 if (!skb_frags_readable(skb)) in skb_copy()
2064 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2067 headerlen = skb_headroom(skb); in skb_copy()
2068 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2070 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
2077 skb_put(n, skb->len); in skb_copy()
2079 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2081 skb_copy_header(n, skb); in skb_copy()
2088 * @skb: buffer to copy
2089 * @headroom: headroom of new skb
2091 * @fclone: if true allocate the copy of the skb from the fclone
2103 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
2106 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
2107 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
2116 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
2118 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2120 n->truesize += skb->data_len; in __pskb_copy_fclone()
2121 n->data_len = skb->data_len; in __pskb_copy_fclone()
2122 n->len = skb->len; in __pskb_copy_fclone()
2124 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2127 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
2128 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
2133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2134 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2135 skb_frag_ref(skb, i); in __pskb_copy_fclone()
2140 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
2141 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2145 skb_copy_header(n, skb); in __pskb_copy_fclone()
2153 * @skb: buffer to reallocate
2159 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2163 * All the pointers pointing into skb header may change and must be
2167 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
2170 unsigned int osize = skb_end_offset(skb); in pskb_expand_head()
2178 BUG_ON(skb_shared(skb)); in pskb_expand_head()
2180 skb_zcopy_downgrade_managed(skb); in pskb_expand_head()
2182 if (skb_pfmemalloc(skb)) in pskb_expand_head()
2193 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2196 skb_shinfo(skb), in pskb_expand_head()
2197 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2204 if (skb_cloned(skb)) { in pskb_expand_head()
2205 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
2207 if (skb_zcopy(skb)) in pskb_expand_head()
2208 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2210 skb_frag_ref(skb, i); in pskb_expand_head()
2212 if (skb_has_frag_list(skb)) in pskb_expand_head()
2213 skb_clone_fraglist(skb); in pskb_expand_head()
2215 skb_release_data(skb, SKB_CONSUMED); in pskb_expand_head()
2217 skb_free_head(skb); in pskb_expand_head()
2219 off = (data + nhead) - skb->head; in pskb_expand_head()
2221 skb->head = data; in pskb_expand_head()
2222 skb->head_frag = 0; in pskb_expand_head()
2223 skb->data += off; in pskb_expand_head()
2225 skb_set_end_offset(skb, size); in pskb_expand_head()
2229 skb->tail += off; in pskb_expand_head()
2230 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
2231 skb->cloned = 0; in pskb_expand_head()
2232 skb->hdr_len = 0; in pskb_expand_head()
2233 skb->nohdr = 0; in pskb_expand_head()
2234 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2236 skb_metadata_clear(skb); in pskb_expand_head()
2238 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2240 * when skb is orphaned (not attached to a socket). in pskb_expand_head()
2242 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2243 skb->truesize += size - osize; in pskb_expand_head()
2254 /* Make private copy of skb with writable head and some headroom */
2256 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
2259 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2262 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
2264 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
2276 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
2282 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
2283 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2285 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
2289 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2291 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
2294 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2298 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { in __skb_unclone_keeptruesize()
2303 saved_end_offset, skb_end_offset(skb)); in __skb_unclone_keeptruesize()
2308 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
2310 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2313 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2317 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
2324 * @skb: buffer to reallocate
2327 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2328 * if possible; copies skb->sk to new skb as needed
2329 * and frees original skb in case of failures.
2334 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
2336 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2337 int osize = skb_end_offset(skb); in skb_expand_head()
2338 struct sock *sk = skb->sk; in skb_expand_head()
2342 return skb; in skb_expand_head()
2345 /* pskb_expand_head() might crash, if skb is shared. */ in skb_expand_head()
2346 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
2347 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
2354 consume_skb(skb); in skb_expand_head()
2355 skb = nskb; in skb_expand_head()
2357 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
2360 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
2361 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2363 skb->truesize += delta; in skb_expand_head()
2365 return skb; in skb_expand_head()
2368 kfree_skb(skb); in skb_expand_head()
2375 * @skb: buffer to copy
2391 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
2402 if (!skb_frags_readable(skb)) in skb_copy_expand()
2405 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2408 oldheadroom = skb_headroom(skb); in skb_copy_expand()
2409 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2410 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
2418 skb_put(n, skb->len); in skb_copy_expand()
2428 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2429 skb->len + head_copy_len)); in skb_copy_expand()
2431 skb_copy_header(n, skb); in skb_copy_expand()
2440 * __skb_pad - zero pad the tail of an skb
2441 * @skb: buffer to pad
2449 * May return error in out of memory cases. The skb is freed on error
2453 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
2459 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
2460 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2464 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2465 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
2466 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
2471 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2474 err = skb_linearize(skb); in __skb_pad()
2478 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2483 kfree_skb(skb); in __skb_pad()
2490 * @skb: start of the buffer to use
2495 * fragmented buffer. @tail must be the last fragment of @skb -- or
2496 * @skb itself. If this would exceed the total buffer size the kernel
2501 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2503 if (tail != skb) { in pskb_put()
2504 skb->data_len += len; in pskb_put()
2505 skb->len += len; in pskb_put()
2513 * @skb: buffer to use
2520 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2522 void *tmp = skb_tail_pointer(skb); in skb_put()
2523 SKB_LINEAR_ASSERT(skb); in skb_put()
2524 skb->tail += len; in skb_put()
2525 skb->len += len; in skb_put()
2526 if (unlikely(skb->tail > skb->end)) in skb_put()
2527 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2534 * @skb: buffer to use
2541 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2543 skb->data -= len; in skb_push()
2544 skb->len += len; in skb_push()
2545 if (unlikely(skb->data < skb->head)) in skb_push()
2546 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2547 return skb->data; in skb_push()
2553 * @skb: buffer to use
2561 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2563 return skb_pull_inline(skb, len); in skb_pull()
2570 * @skb: buffer to use
2578 void *skb_pull_data(struct sk_buff *skb, size_t len) in skb_pull_data() argument
2580 void *data = skb->data; in skb_pull_data()
2582 if (skb->len < len) in skb_pull_data()
2585 skb_pull(skb, len); in skb_pull_data()
2593 * @skb: buffer to alter
2598 * The skb must be linear.
2600 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2602 if (skb->len > len) in skb_trim()
2603 __skb_trim(skb, len); in skb_trim()
2607 /* Trims skb to length len. It can change skb pointers.
2610 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2614 int offset = skb_headlen(skb); in ___pskb_trim()
2615 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2619 if (skb_cloned(skb) && in ___pskb_trim()
2620 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2628 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2635 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2638 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2641 skb_frag_unref(skb, i); in ___pskb_trim()
2643 if (skb_has_frag_list(skb)) in ___pskb_trim()
2644 skb_drop_fraglist(skb); in ___pskb_trim()
2648 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2680 if (len > skb_headlen(skb)) { in ___pskb_trim()
2681 skb->data_len -= skb->len - len; in ___pskb_trim()
2682 skb->len = len; in ___pskb_trim()
2684 skb->len = len; in ___pskb_trim()
2685 skb->data_len = 0; in ___pskb_trim()
2686 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2689 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2690 skb_condense(skb); in ___pskb_trim()
2697 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2699 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2700 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2702 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2703 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2705 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2706 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2707 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2712 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2717 * __pskb_pull_tail - advance tail of skb header
2718 * @skb: buffer to reallocate
2728 * or value of new tail of skb in the case of success.
2730 * All the pointers pointing into skb header may change and must be
2734 /* Moves tail of skb head forward, copying data from fragmented part,
2737 * 2. It may change skb pointers.
2741 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2743 /* If skb has not enough free space at tail, get new one in __pskb_pull_tail()
2745 * room at tail, reallocate without expansion only if skb is cloned. in __pskb_pull_tail()
2747 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2749 if (!skb_frags_readable(skb)) in __pskb_pull_tail()
2752 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2753 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2758 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2759 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2764 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2769 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2770 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2778 * Certainly, it is possible to add an offset to skb data, in __pskb_pull_tail()
2781 * further bloating skb head and crucify ourselves here instead. in __pskb_pull_tail()
2785 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2797 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2799 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2822 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2823 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2829 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2832 /* Success! Now we may commit changes to skb data. */ in __pskb_pull_tail()
2837 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2838 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2841 skb_frag_unref(skb, i); in __pskb_pull_tail()
2844 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2846 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2857 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2860 skb->tail += delta; in __pskb_pull_tail()
2861 skb->data_len -= delta; in __pskb_pull_tail()
2863 if (!skb->data_len) in __pskb_pull_tail()
2864 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2866 return skb_tail_pointer(skb); in __pskb_pull_tail()
2871 * skb_copy_bits - copy bits from skb to kernel buffer
2872 * @skb: source skb
2877 * Copy the specified number of bytes from the source skb to the
2885 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2887 int start = skb_headlen(skb); in skb_copy_bits()
2891 if (offset > (int)skb->len - len) in skb_copy_bits()
2898 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2905 if (!skb_frags_readable(skb)) in skb_copy_bits()
2908 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2910 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2939 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
3070 * Map linear and fragment data from the skb to spd. It reports true if the
3073 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
3081 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3085 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3086 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3087 skb_headlen(skb), in __skb_splice_bits()
3089 skb_head_is_locked(skb), in __skb_splice_bits()
3096 if (!skb_frags_readable(skb)) in __skb_splice_bits()
3099 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3100 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3111 skb_walk_frags(skb, iter) { in __skb_splice_bits()
3128 * Map data from the skb to a pipe. Should handle both the linear part,
3131 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
3146 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
3179 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
3183 struct sk_buff *head = skb; in __skb_send_sock()
3190 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
3194 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3195 kv.iov_base = skb->data + offset; in __skb_send_sock()
3210 /* All the data was skb head? */ in __skb_send_sock()
3215 offset -= skb_headlen(skb); in __skb_send_sock()
3218 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3219 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3227 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3228 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3259 if (skb == head) { in __skb_send_sock()
3260 if (skb_has_frag_list(skb)) { in __skb_send_sock()
3261 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3264 } else if (skb->next) { in __skb_send_sock()
3265 skb = skb->next; in __skb_send_sock()
3277 /* Send skb data on a socket. Socket must be locked. */
3278 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
3281 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); in skb_send_sock_locked()
3285 /* Send skb data on a socket. Socket must be unlocked. */
3286 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
3288 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); in skb_send_sock()
3292 * skb_store_bits - store bits from kernel buffer to skb
3293 * @skb: destination buffer
3299 * destination skb. This function handles all the messy bits of
3303 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3305 int start = skb_headlen(skb); in skb_store_bits()
3309 if (offset > (int)skb->len - len) in skb_store_bits()
3315 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3322 if (!skb_frags_readable(skb)) in skb_store_bits()
3325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3326 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3356 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
3383 /* Checksum skb data. */
3384 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
3387 int start = skb_headlen(skb); in __skb_checksum()
3397 skb->data + offset, copy, csum); in __skb_checksum()
3404 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in __skb_checksum()
3407 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3409 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3444 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
3471 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
3479 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
3485 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3488 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
3498 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3507 if (!skb_frags_readable(skb)) in skb_copy_and_csum_bits()
3510 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3515 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3517 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3546 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3573 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3577 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3580 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3581 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3582 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3584 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3585 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3590 /* This function assumes skb->csum already holds pseudo header's checksum,
3592 * __skb_checksum_validate_complete(). And, the original skb->csum must
3596 * zero. The new checksum is stored back into skb->csum unless the skb is
3599 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3604 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3606 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3610 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3611 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3612 * when moving skb->data around. in __skb_checksum_complete()
3615 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3616 !skb->csum_complete_sw) in __skb_checksum_complete()
3617 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3620 if (!skb_shared(skb)) { in __skb_checksum_complete()
3622 skb->csum = csum; in __skb_checksum_complete()
3623 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3624 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3625 skb->csum_valid = !sum; in __skb_checksum_complete()
3662 * Calculates the amount of linear headroom needed in the 'to' skb passed
3686 * skb_zerocopy - Zero copy skb to skb
3701 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3707 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3763 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3768 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3769 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3771 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3773 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3775 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3778 if (csstart != skb->len) in skb_copy_and_csum_dev()
3779 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3780 skb->len - csstart); in skb_copy_and_csum_dev()
3782 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3783 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3860 * skb_rbtree_purge - empty a skb rbtree
3875 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3878 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3879 sum += skb->truesize; in skb_rbtree_purge()
3880 kfree_skb(skb); in skb_rbtree_purge()
3887 struct sk_buff *skb, *next; in skb_errqueue_purge() local
3894 skb_queue_walk_safe(list, skb, next) { in skb_errqueue_purge()
3895 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3896 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3898 __skb_unlink(skb, list); in skb_errqueue_purge()
3899 __skb_queue_tail(&kill, skb); in skb_errqueue_purge()
3950 * @skb: buffer to remove
3956 * You must know what list the SKB is on.
3958 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3963 __skb_unlink(skb, list); in skb_unlink()
3988 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3994 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3997 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3998 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4000 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4001 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4002 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4003 skb1->data_len = skb->data_len; in skb_split_inside_header()
4005 skb->data_len = 0; in skb_split_inside_header()
4006 skb->len = len; in skb_split_inside_header()
4007 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
4010 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
4015 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4017 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4018 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4019 skb->len = len; in skb_split_no_header()
4020 skb->data_len = len - pos; in skb_split_no_header()
4023 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4026 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4037 skb_frag_ref(skb, i); in skb_split_no_header()
4040 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4041 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4045 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4050 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4054 * skb_split - Split fragmented skb to two parts at length len.
4055 * @skb: the buffer to split
4057 * @len: new length for skb
4059 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
4061 int pos = skb_headlen(skb); in skb_split()
4064 skb_zcopy_downgrade_managed(skb); in skb_split()
4066 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4067 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
4069 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
4071 skb_split_no_header(skb, skb1, len, pos); in skb_split()
4075 /* Shifting from/to a cloned skb is a no-go.
4079 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
4081 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
4085 * skb_shift - Shifts paged data partially from skb to another
4087 * @skb: buffer from which the paged data comes from
4091 * the length of the skb, from skb to tgt. Returns number bytes shifted.
4092 * It's up to caller to free skb if everything was shifted.
4096 * Skb cannot include anything else but paged data while tgt is allowed
4100 * specialized skb free'er to handle frags without up-to-date nr_frags.
4102 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
4107 BUG_ON(shiftlen > skb->len); in skb_shift()
4109 if (skb_headlen(skb)) in skb_shift()
4111 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
4114 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4115 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); in skb_shift()
4120 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4133 if (skb_prepare_for_shift(skb) || in skb_shift()
4138 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4151 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4152 if ((shiftlen == skb->len) && in skb_shift()
4153 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4156 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
4159 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4163 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4191 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4195 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4198 /* Reposition in the original skb */ in skb_shift()
4200 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4201 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4202 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4204 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4207 /* Most likely the tgt won't ever need its checksum anymore, skb on in skb_shift()
4211 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4213 skb_len_add(skb, -shiftlen); in skb_shift()
4220 * skb_prepare_seq_read - Prepare a sequential read of skb data
4221 * @skb: the buffer to read
4229 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
4234 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4242 * skb_seq_read - Sequentially read skb data
4247 * Reads a block of skb data at @consumed relative to the
4250 * of the block or 0 if the end of the skb data or the upper
4354 * skb_abort_seq_read - Abort a sequential read of skb data
4417 * skb_find_text - Find a text pattern in skb data
4418 * @skb: the buffer to look in
4423 * Finds a pattern in the skb data according to the specified
4428 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
4440 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
4447 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
4450 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4452 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
4453 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4455 skb_zcopy_downgrade_managed(skb); in skb_append_pagefrags()
4457 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
4467 * skb_pull_rcsum - pull skb and update receive checksum
4468 * @skb: buffer to update
4477 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
4479 unsigned char *data = skb->data; in skb_pull_rcsum()
4481 BUG_ON(len > skb->len); in skb_pull_rcsum()
4482 __skb_pull(skb, len); in skb_pull_rcsum()
4483 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
4484 return skb->data; in skb_pull_rcsum()
4500 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
4504 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4505 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
4512 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4515 err = skb_unclone(skb, GFP_ATOMIC); in skb_segment_list()
4519 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4539 skb->next = nskb; in skb_segment_list()
4555 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4556 __copy_skb_header(nskb, skb); in skb_segment_list()
4558 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4560 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4569 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4570 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4571 skb->len = skb->len - delta_len; in skb_segment_list()
4573 skb_gso_reset(skb); in skb_segment_list()
4575 skb->prev = tail; in skb_segment_list()
4577 if (skb_needs_linearize(skb, features) && in skb_segment_list()
4578 __skb_linearize(skb)) in skb_segment_list()
4581 skb_get(skb); in skb_segment_list()
4583 return skb; in skb_segment_list()
4586 kfree_skb_list(skb->next); in skb_segment_list()
4587 skb->next = NULL; in skb_segment_list()
4593 * skb_segment - Perform protocol segmentation on skb.
4597 * This function performs segmentation on the given skb. It returns
4662 * Try to split the SKB to multiple GSO SKBs in skb_segment()
5002 /* The SKB kmem_cache slab is critical for network performance. Never
5028 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5042 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
5045 int start = skb_headlen(skb); in __skb_to_sgvec()
5056 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5063 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5068 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5070 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5086 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
5115 * @skb: Socket buffer containing the buffers to be mapped
5125 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
5127 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
5138 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5139 * sglist without mark the sg which contain last skb data as the end.
5157 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
5160 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
5168 * @skb: The socket buffer to check.
5170 * @trailer: Returned pointer to the skb where the @tailbits space begins
5178 * set to point to the skb in which this space begins.
5183 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
5189 /* If skb is cloned or its head is paged, reallocate in skb_cow_data()
5193 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5194 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
5198 if (!skb_has_frag_list(skb)) { in skb_cow_data()
5204 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
5205 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5209 *trailer = skb; in skb_cow_data()
5216 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5229 /* If the skb is the last, worry about trailer. */ in skb_cow_data()
5260 * OK, link new skb, drop old one */ in skb_cow_data()
5276 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
5278 struct sock *sk = skb->sk; in sock_rmem_free()
5280 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5283 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
5288 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5295 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
5297 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5301 skb_orphan(skb); in sock_queue_err_skb()
5302 skb->sk = sk; in sock_queue_err_skb()
5303 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5304 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5305 skb_set_err_queue(skb); in sock_queue_err_skb()
5308 skb_dst_force(skb); in sock_queue_err_skb()
5310 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5317 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
5319 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5320 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5326 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
5334 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
5335 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
5342 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
5348 return skb; in sock_dequeue_err_skb()
5353 * skb_clone_sk - create clone of skb, and take reference to socket
5354 * @skb: the skb to clone
5365 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
5367 struct sock *sk = skb->sk; in skb_clone_sk()
5373 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
5386 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
5394 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5396 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
5402 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5404 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5409 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
5412 kfree_skb(skb); in __skb_complete_tx_timestamp()
5429 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
5432 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5441 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
5442 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
5448 kfree_skb(skb); in skb_complete_tx_timestamp()
5457 struct sk_buff *skb; in __skb_tstamp_tx() local
5477 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
5482 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
5484 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
5486 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { in __skb_tstamp_tx()
5487 kfree_skb(skb); in __skb_tstamp_tx()
5491 if (!skb) in __skb_tstamp_tx()
5495 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5497 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5501 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
5503 __net_timestamp(skb); in __skb_tstamp_tx()
5505 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5518 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5520 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5524 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5525 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5527 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5536 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5540 kfree_skb(skb); in skb_complete_wifi_ack()
5547 * @skb: the skb to set
5548 * @start: the number of bytes after skb->data to start checksumming.
5552 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5554 * This function checks and sets those values and skb->ip_summed: if this
5557 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5560 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5562 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5564 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5567 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5568 skb->csum_start = csum_start; in skb_partial_csum_set()
5569 skb->csum_offset = off; in skb_partial_csum_set()
5570 skb->transport_header = csum_start; in skb_partial_csum_set()
5575 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5578 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5584 if (max > skb->len) in skb_maybe_pull_tail()
5585 max = skb->len; in skb_maybe_pull_tail()
5587 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5590 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5598 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5606 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5608 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5612 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5615 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5617 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5621 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5632 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5641 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5647 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5650 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5657 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5662 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5663 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5664 skb->len - off, in skb_checksum_setup_ipv4()
5665 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5677 #define OPT_HDR(type, skb, off) \ argument
5678 (type *)(skb_network_header(skb) + (off))
5680 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5695 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5699 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5701 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5709 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5716 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5724 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5731 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5739 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5746 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5766 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5771 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5772 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5773 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5782 * @skb: the skb to set up
5785 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5789 switch (skb->protocol) { in skb_checksum_setup()
5791 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5795 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5808 * skb_checksum_maybe_trim - maybe trims the given skb
5809 * @skb: the skb to check
5812 * Checks whether the given skb has data beyond the given transport length.
5813 * If so, returns a cloned skb trimmed to this transport length.
5814 * Otherwise returns the provided skb. Returns NULL in error cases
5815 * (e.g. transport_len exceeds skb length or out-of-memory).
5817 * Caller needs to set the skb transport header and free any returned skb if it
5818 * differs from the provided skb.
5820 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5824 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5827 if (skb->len < len) in skb_checksum_maybe_trim()
5829 else if (skb->len == len) in skb_checksum_maybe_trim()
5830 return skb; in skb_checksum_maybe_trim()
5832 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5846 * skb_checksum_trimmed - validate checksum of an skb
5847 * @skb: the skb to check
5851 * Applies the given checksum function skb_chkf to the provided skb.
5852 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5854 * If the skb has data beyond the given transport length, then a
5855 * trimmed & cloned skb is checked and returned.
5857 * Caller needs to set the skb transport header and free any returned skb if it
5858 * differs from the provided skb.
5860 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5862 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5865 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5868 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
5885 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5893 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5896 skb->dev->name); in __skb_warn_lro_forwarding()
5900 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5903 skb_release_head_state(skb); in kfree_skb_partial()
5904 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial()
5906 __kfree_skb(skb); in kfree_skb_partial()
5912 * skb_try_coalesce - try to merge skb to prior one
5930 * pages within the same SKB. In theory we could take full in skb_try_coalesce()
5992 /* if the skb is not cloned this does nothing in skb_try_coalesce()
6010 * skb_scrub_packet - scrub an skb
6012 * @skb: buffer to clean
6018 * skb_scrub_packet can also be used to clean a skb before injecting it in
6020 * skb that could impact namespace isolation.
6022 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
6024 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6025 skb->skb_iif = 0; in skb_scrub_packet()
6026 skb->ignore_df = 0; in skb_scrub_packet()
6027 skb_dst_drop(skb); in skb_scrub_packet()
6028 skb_ext_reset(skb); in skb_scrub_packet()
6029 nf_reset_ct(skb); in skb_scrub_packet()
6030 nf_reset_trace(skb); in skb_scrub_packet()
6033 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6034 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6036 ipvs_reset(skb); in skb_scrub_packet()
6041 skb->mark = 0; in skb_scrub_packet()
6042 skb_clear_tstamp(skb); in skb_scrub_packet()
6046 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
6051 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
6052 kfree_skb(skb); in skb_reorder_vlan_header()
6056 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6058 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
6062 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
6064 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6068 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6069 return skb; in skb_reorder_vlan_header()
6072 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
6077 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
6079 return skb; in skb_vlan_untag()
6082 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
6083 if (unlikely(!skb)) in skb_vlan_untag()
6086 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
6089 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6091 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6093 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
6094 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
6096 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
6097 if (unlikely(!skb)) in skb_vlan_untag()
6100 skb_reset_network_header(skb); in skb_vlan_untag()
6101 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
6102 skb_reset_transport_header(skb); in skb_vlan_untag()
6103 skb_reset_mac_len(skb); in skb_vlan_untag()
6105 return skb; in skb_vlan_untag()
6108 kfree_skb(skb); in skb_vlan_untag()
6113 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) in skb_ensure_writable() argument
6115 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
6118 if (!skb_frags_readable(skb)) in skb_ensure_writable()
6121 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
6124 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
6128 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) in skb_ensure_writable_head_tail() argument
6138 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6139 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6141 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6142 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6144 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) in skb_ensure_writable_head_tail()
6148 return pskb_expand_head(skb, needed_headroom, needed_tailroom, in skb_ensure_writable_head_tail()
6154 * expects a non skb_vlan_tag_present skb with a vlan tag payload
6156 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
6158 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6162 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6167 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
6171 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6173 vlan_remove_tag(skb, vlan_tci); in __skb_vlan_pop()
6175 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6177 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
6178 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
6180 skb_reset_mac_len(skb); in __skb_vlan_pop()
6187 * Expects skb->data at mac header.
6189 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
6195 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
6196 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
6198 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6201 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6206 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6209 vlan_proto = skb->protocol; in skb_vlan_pop()
6210 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6214 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
6220 * Expects skb->data at mac header.
6222 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
6224 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
6225 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6229 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6234 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6235 skb_vlan_tag_get(skb)); in skb_vlan_push()
6239 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6240 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6242 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6244 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
6252 * @skb: Socket buffer to modify
6254 * Drop the Ethernet header of @skb.
6256 * Expects that skb->data points to the mac header and that no VLAN tags are
6261 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
6263 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
6264 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
6267 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
6268 skb_reset_mac_header(skb); in skb_eth_pop()
6269 skb_reset_mac_len(skb); in skb_eth_pop()
6278 * @skb: Socket buffer to modify
6282 * Prepend @skb with a new Ethernet header.
6284 * Expects that skb->data points to the mac header, which must be empty.
6288 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
6294 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
6297 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
6301 skb_push(skb, sizeof(*eth)); in skb_eth_push()
6302 skb_reset_mac_header(skb); in skb_eth_push()
6303 skb_reset_mac_len(skb); in skb_eth_push()
6305 eth = eth_hdr(skb); in skb_eth_push()
6308 eth->h_proto = skb->protocol; in skb_eth_push()
6310 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
6316 /* Update the ethertype of hdr and the skb csum value if required. */
6317 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
6320 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6323 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6333 * @skb: buffer
6340 * Expects skb->data at mac header.
6344 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
6354 if (skb->encapsulation) in skb_mpls_push()
6357 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
6361 if (!skb->inner_protocol) { in skb_mpls_push()
6362 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
6363 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6366 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
6367 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6369 skb_reset_mac_header(skb); in skb_mpls_push()
6370 skb_set_network_header(skb, mac_len); in skb_mpls_push()
6371 skb_reset_mac_len(skb); in skb_mpls_push()
6373 lse = mpls_hdr(skb); in skb_mpls_push()
6375 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
6378 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
6379 skb->protocol = mpls_proto; in skb_mpls_push()
6388 * @skb: buffer
6393 * Expects skb->data at mac header.
6397 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6402 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6405 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6409 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6410 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6413 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6414 skb_reset_mac_header(skb); in skb_mpls_pop()
6415 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6421 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6422 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6424 skb->protocol = next_proto; in skb_mpls_pop()
6433 * @skb: buffer
6436 * Expects skb->data at mac header.
6440 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6444 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6447 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6451 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6452 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6454 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6457 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6466 * @skb: buffer
6468 * Expects skb->data at mac header.
6472 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6477 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6480 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6483 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6491 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6496 * alloc_skb_with_frags - allocate skb with page frags
6504 * This can be used to allocate a paged skb, given a maximal order for frags.
6513 struct sk_buff *skb; in alloc_skb_with_frags() local
6522 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6523 if (!skb) in alloc_skb_with_frags()
6548 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); in alloc_skb_with_frags()
6550 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6553 return skb; in alloc_skb_with_frags()
6556 kfree_skb(skb); in alloc_skb_with_frags()
6561 /* carve out the first off bytes from skb when off < headlen */
6562 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6566 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_header()
6570 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6579 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6580 skb->len -= off; in pskb_carve_inside_header()
6583 skb_shinfo(skb), in pskb_carve_inside_header()
6585 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6586 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6588 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6593 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6594 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6595 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6596 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_header()
6601 skb_free_head(skb); in pskb_carve_inside_header()
6604 skb->head = data; in pskb_carve_inside_header()
6605 skb->data = data; in pskb_carve_inside_header()
6606 skb->head_frag = 0; in pskb_carve_inside_header()
6607 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6608 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6609 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6610 skb->cloned = 0; in pskb_carve_inside_header()
6611 skb->hdr_len = 0; in pskb_carve_inside_header()
6612 skb->nohdr = 0; in pskb_carve_inside_header()
6613 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6618 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6620 /* carve out the first eat bytes from skb's frag_list. May recurse into
6623 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6674 /* carve off first len bytes from skb. Split line (off) is in the
6675 * non-linear part of skb
6677 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6681 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6683 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6686 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6695 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6696 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6702 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6705 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6719 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6725 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6726 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6729 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6731 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6732 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6736 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_nonlinear()
6738 skb->head = data; in pskb_carve_inside_nonlinear()
6739 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6740 skb->data = data; in pskb_carve_inside_nonlinear()
6741 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6742 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6743 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6744 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6745 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6746 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6747 skb->len -= off; in pskb_carve_inside_nonlinear()
6748 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6749 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6753 /* remove len bytes from the beginning of the skb */
6754 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6756 int headlen = skb_headlen(skb); in pskb_carve()
6759 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6761 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6764 /* Extract to_copy bytes starting at off from skb, and return this in
6765 * a new skb
6767 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6770 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6786 * @skb: buffer
6788 * Can be used to save memory before skb is added to a busy queue.
6789 * If packet has bytes in frags and enough tail room in skb->head,
6793 * We do not reallocate skb->head thus can not fail.
6794 * Caller must re-evaluate skb->truesize if needed.
6796 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6798 if (skb->data_len) { in skb_condense()
6799 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6800 skb_cloned(skb) || !skb_frags_readable(skb)) in skb_condense()
6804 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6806 /* At this point, skb->truesize might be over estimated, in skb_condense()
6807 * because skb had a fragment, and fragments do not tell in skb_condense()
6809 * When we pulled its content into skb->head, fragment in skb_condense()
6811 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6813 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6824 * __skb_ext_alloc - allocate a new skb extensions storage
6829 * skb via __skb_ext_set().
6881 * __skb_ext_set - attach the specified extension storage to this skb
6882 * @skb: buffer
6890 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
6895 skb_ext_put(skb); in __skb_ext_set()
6899 skb->extensions = ext; in __skb_ext_set()
6900 skb->active_extensions = 1 << id; in __skb_ext_set()
6906 * @skb: buffer
6913 * If the skb was cloned, COW applies and the returned memory can be
6918 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6923 if (skb->active_extensions) { in skb_ext_add()
6924 old = skb->extensions; in skb_ext_add()
6926 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6946 skb->slow_gro = 1; in skb_ext_add()
6947 skb->extensions = new; in skb_ext_add()
6948 skb->active_extensions |= 1 << id; in skb_ext_add()
6971 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6973 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6975 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6976 if (skb->active_extensions == 0) { in __skb_ext_del()
6977 skb->extensions = NULL; in __skb_ext_del()
7016 static void kfree_skb_napi_cache(struct sk_buff *skb) in kfree_skb_napi_cache() argument
7018 /* if SKB is a clone, don't handle this case */ in kfree_skb_napi_cache()
7019 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7020 __kfree_skb(skb); in kfree_skb_napi_cache()
7025 __napi_kfree_skb(skb, SKB_CONSUMED); in kfree_skb_napi_cache()
7030 * skb_attempt_defer_free - queue skb for remote freeing
7031 * @skb: buffer
7033 * Put @skb in a per-cpu list, using the cpu which
7034 * allocated the skb/pages to reduce false sharing
7037 void skb_attempt_defer_free(struct sk_buff *skb) in skb_attempt_defer_free() argument
7039 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7047 nodefer: kfree_skb_napi_cache(skb); in skb_attempt_defer_free()
7051 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); in skb_attempt_defer_free()
7052 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7065 skb->next = sd->defer_list; in skb_attempt_defer_free()
7067 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()
7077 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, in skb_splice_csum_page() argument
7086 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7091 * @skb: The buffer to add pages to
7104 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, in skb_splice_from_iter() argument
7117 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7139 ret = skb_append_pagefrags(skb, page, off, part, in skb_splice_from_iter()
7146 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7147 skb_splice_csum_page(skb, page, off, part); in skb_splice_from_iter()
7160 skb_len_add(skb, spliced); in skb_splice_from_iter()