Lines Matching +full:mctp +full:- +full:handling

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * Corey Minyard <wf-[email protected]>
54 #include <net/dropreason-core.h>
105 * - qdisc return codes
106 * - driver transmit return codes
107 * - errno values
111 * the driver transmit return codes though - when qdiscs are used, the actual
118 /* qdisc ->enqueue() return codes. */
128 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
148 * - successful transmission (rc == NETDEV_TX_OK) in dev_xmit_complete()
149 * - error while transmitting (rc < 0) in dev_xmit_complete()
150 * - error while queueing to a different device (rc & NET_XMIT_MASK) in dev_xmit_complete()
159 * Compute the worst-case header length according to the protocols
220 /* per-cpu stats, allocated on demand.
261 #define netdev_hw_addr_list_count(l) ((l)->count)
264 list_for_each_entry(ha, &(l)->list, list)
266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
269 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
272 if ((_ha)->sync_cnt)
274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
277 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
280 if ((_ha)->sync_cnt)
289 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
291 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
297 * dev->hard_header_len ? (dev->hard_header_len +
298 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
307 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
308 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
349 * Structure for per-NAPI config
365 * to the per-CPU poll_list, and whoever clears that bit
388 /* all fields past this point are write-protected by netdev_lock */
393 /* control-path-only fields follow */
405 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
409 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
437 * enum rx_handler_result - Possible return values for rx_handlers.
441 * case skb->dev was changed by rx_handler.
449 * to register a second rx_handler will return -EBUSY.
462 * If the rx_handler changed skb->dev, to divert the skb to another
468 * are registered on exact device (ptype->dev == skb->dev).
470 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
491 return test_bit(NAPI_STATE_DISABLE, &n->state); in napi_disable_pending()
496 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); in napi_prefer_busy_poll()
500 * napi_is_scheduled - test if NAPI is scheduled
503 * This check is "best-effort". With no locking implemented,
519 return test_bit(NAPI_STATE_SCHED, &n->state); in napi_is_scheduled()
525 * napi_schedule - schedule NAPI poll
545 * napi_schedule_irqoff - schedule NAPI poll
557 * napi_complete_done - NAPI processing complete
582 * napi_synchronize - wait until NAPI is not running
592 while (test_bit(NAPI_STATE_SCHED, &n->state)) in napi_synchronize()
599 * napi_if_scheduled_mark_missed - if napi is running, set the
610 val = READ_ONCE(n->state); in napi_if_scheduled_mark_missed()
619 } while (!try_cmpxchg(&n->state, &val, new)); in napi_if_scheduled_mark_missed()
652 * read-mostly part
676 * write-mostly part
691 * slow- / control-path part
735 return q->numa_node; in netdev_queue_numa_node_read()
744 q->numa_node = node; in netdev_queue_numa_node_write()
753 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
773 - sizeof(struct xps_map)) / sizeof(u16))
779 * in nr_ids. This will help not accessing out-of-bound memory.
783 * not crossing its upper bound, as the original dev->num_tc can be updated in
839 return a->id_len == b->id_len && in netdev_phys_item_id_same()
840 memcmp(a->id, b->id, a->id_len) == 0; in netdev_phys_item_id_same()
1050 * corner cases, but the stack really does a non-trivial amount
1087 * Old-style ioctl entry point. This is used internally by the
1111 * for dev->watchdog ticks.
1118 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1121 * (which should normally be dev->stats) and return a pointer to
1124 * 3. Update dev->stats asynchronously and atomically, and define
1145 * SR-IOV management functions.
1179 * so the underlying device can perform whatever needed clean-ups to
1206 * FC-GS Fabric Device Management Information(FDMI) specification.
1237 * Adjusts the requested feature flags according to device-specific
1244 * Must return >0 or -errno if it changed dev->features itself.
1291 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1295 * network cables) or protocol-dependent mechanisms (eg
1322 * Called when a user wants to set a max-rate limitation of specific
1346 * no frames were xmit'ed and core-caller will free all frames.
1638 * enum netdev_priv_flags - &struct net_device priv_flags
1654 * release skb->dst
1656 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1744 * struct net_device - The DEVICE structure.
1747 * data with strictly "high-level" data, and it has to know about
1773 * @ptype_all: Device-specific packet handlers for all protocols
1774 * @ptype_specific: Device-specific, protocol-specific packet handlers
1778 * @hw_features: User-changeable features
1780 * @wanted_features: User-requested features
1813 * discovery handling. Necessary for e.g. 6LoWPAN.
1822 * @priv_len: Size of the ->priv flexible array
1876 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1879 * @mctp_ptr: MCTP specific data
1897 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1937 * @ml_priv: Mid-layer private
1938 * @ml_priv_type: Mid-layer private type
1942 * means the driver is handling statistics allocation/
1957 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1959 * @stat_ops: Optional ops for queue-aware statistics
1983 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1999 * @net_notifier_list: List of per-net netdev notifier block
2012 * dev->addr_list_lock.
2038 * @napi_config: An array of napi_config structures containing per-NAPI
2045 * dev_list, one per address-family.
2058 /* TX read-mostly hotpath */
2073 /* Note : dev->mtu is often read without holding a lock.
2092 /* TXRX read-mostly hotpath */
2106 /* RX read-mostly hotpath */
2155 /* Read-mostly cache-line for fast-path access */
2243 /* Protocol-specific pointers */
2285 * and shinfo->gso_segs is a 16bit field.
2313 /* These may be needed for future network-power-down code. */
2342 /* mid-layer private */
2371 * and shinfo->gso_segs is a 16bit field.
2414 /** @cfg: net_device queue-related configuration */
2459 * @lock: netdev-scope lock, protects a small selection of fields.
2500 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
2501 ((dev)->devlink_port = (port)); \
2506 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2516 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2522 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2523 return -EINVAL; in netdev_set_prio_tc_map()
2525 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2537 return dev->num_tc; in netdev_get_num_tc()
2564 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2571 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); in netdev_get_tx_queue()
2572 return &dev->_tx[index]; in netdev_get_tx_queue()
2589 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2590 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2600 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2601 lockdep_set_class(&(dev)->addr_list_lock, \
2603 for (i = 0; i < (dev)->num_tx_queues; i++) \
2604 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2619 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2624 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2625 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2631 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2637 if (dev->ml_priv_type != type) in netdev_get_ml_priv()
2640 return dev->ml_priv; in netdev_get_ml_priv()
2647 WARN(dev->ml_priv_type && dev->ml_priv_type != type, in netdev_set_ml_priv()
2649 dev->ml_priv_type, type); in netdev_set_ml_priv()
2650 WARN(!dev->ml_priv_type && dev->ml_priv, in netdev_set_ml_priv()
2653 dev->ml_priv = ml_priv; in netdev_set_ml_priv()
2654 dev->ml_priv_type = type; in netdev_set_ml_priv()
2663 return read_pnet(&dev->nd_net); in dev_net()
2669 return read_pnet_rcu(&dev->nd_net); in dev_net_rcu()
2675 write_pnet(&dev->nd_net, net); in dev_net_set()
2679 * netdev_priv - access network device private data
2686 return (void *)dev->priv; in netdev_priv()
2692 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2695 * fine-grained identification of different network device types. For
2698 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2706 mutex_lock(&dev->lock); in netdev_lock()
2711 mutex_unlock(&dev->lock); in netdev_unlock()
2716 lockdep_assert_held(&dev->lock); in netdev_assert_locked()
2721 if (dev->reg_state == NETREG_REGISTERED || in netdev_assert_locked_or_invisible()
2722 dev->reg_state == NETREG_UNREGISTERING) in netdev_assert_locked_or_invisible()
2728 napi->irq = irq; in netif_napi_set_irq_locked()
2733 netdev_lock(napi->dev); in netif_napi_set_irq()
2735 netdev_unlock(napi->dev); in netif_napi_set_irq()
2758 * netif_napi_add() - initialize a NAPI context
2764 * *any* of the other NAPI-related functions.
2786 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); in netif_napi_add_tx_weight()
2794 napi->index = index; in netif_napi_add_config_locked()
2795 napi->config = &dev->napi_config[index]; in netif_napi_add_config_locked()
2800 * netif_napi_add_config - initialize a NAPI context with persistent config
2816 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2835 * __netif_napi_del - remove a NAPI context
2844 netdev_lock(napi->dev); in __netif_napi_del()
2846 netdev_unlock(napi->dev); in __netif_napi_del()
2856 * netif_napi_del - remove a NAPI context
2901 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2930 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_rx_add()
2932 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_rx_add()
2933 u64_stats_add(&tstats->rx_bytes, len); in dev_sw_netstats_rx_add()
2934 u64_stats_inc(&tstats->rx_packets); in dev_sw_netstats_rx_add()
2935 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_rx_add()
2942 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_tx_add()
2944 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_tx_add()
2945 u64_stats_add(&tstats->tx_bytes, len); in dev_sw_netstats_tx_add()
2946 u64_stats_add(&tstats->tx_packets, packets); in dev_sw_netstats_tx_add()
2947 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_tx_add()
2952 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); in dev_lstats_add()
2954 u64_stats_update_begin(&lstats->syncp); in dev_lstats_add()
2955 u64_stats_add(&lstats->bytes, len); in dev_lstats_add()
2956 u64_stats_inc(&lstats->packets); in dev_lstats_add()
2957 u64_stats_update_end(&lstats->syncp); in dev_lstats_add()
2963 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); in dev_dstats_rx_add()
2965 u64_stats_update_begin(&dstats->syncp); in dev_dstats_rx_add()
2966 u64_stats_inc(&dstats->rx_packets); in dev_dstats_rx_add()
2967 u64_stats_add(&dstats->rx_bytes, len); in dev_dstats_rx_add()
2968 u64_stats_update_end(&dstats->syncp); in dev_dstats_rx_add()
2973 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); in dev_dstats_rx_dropped()
2975 u64_stats_update_begin(&dstats->syncp); in dev_dstats_rx_dropped()
2976 u64_stats_inc(&dstats->rx_drops); in dev_dstats_rx_dropped()
2977 u64_stats_update_end(&dstats->syncp); in dev_dstats_rx_dropped()
2983 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); in dev_dstats_tx_add()
2985 u64_stats_update_begin(&dstats->syncp); in dev_dstats_tx_add()
2986 u64_stats_inc(&dstats->tx_packets); in dev_dstats_tx_add()
2987 u64_stats_add(&dstats->tx_bytes, len); in dev_dstats_tx_add()
2988 u64_stats_update_end(&dstats->syncp); in dev_dstats_tx_add()
2993 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); in dev_dstats_tx_dropped()
2995 u64_stats_update_begin(&dstats->syncp); in dev_dstats_tx_dropped()
2996 u64_stats_inc(&dstats->tx_drops); in dev_dstats_tx_dropped()
2997 u64_stats_update_end(&dstats->syncp); in dev_dstats_tx_dropped()
3008 u64_stats_init(&stat->syncp); \
3025 u64_stats_init(&stat->syncp); \
3072 - we can use this eg to kick tcp sessions
3200 info->dev = dev; in netdev_notifier_info_init()
3201 info->extack = NULL; in netdev_notifier_info_init()
3207 return info->dev; in netdev_notifier_info_to_dev()
3213 return info->extack; in netdev_notifier_info_to_extack()
3221 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3223 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3225 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3227 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3229 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3231 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3234 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3241 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
3250 lh = dev->dev_list.next; in next_net_device()
3251 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device()
3260 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
3261 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device_rcu()
3266 return list_empty(&net->dev_base_head) ? NULL : in first_net_device()
3267 net_device_entry(net->dev_base_head.next); in first_net_device()
3272 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); in first_net_device_rcu()
3274 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in first_net_device_rcu()
3362 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
3365 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
3371 const struct net_device *dev = skb->dev; in dev_parse_header()
3373 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
3375 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
3380 const struct net_device *dev = skb->dev; in dev_parse_header_protocol()
3382 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
3384 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
3391 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
3393 if (len < dev->min_header_len) in dev_validate_header()
3397 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
3401 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
3402 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
3409 return dev->header_ops && dev->header_ops->create; in dev_has_header()
3413 * Incoming packets are placed on per-CPU queues
3478 return current->net_xmit.recursion; in dev_recursion_level()
3490 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3496 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3500 * netif_start_queue - allow transmit
3514 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3523 * netif_wake_queue - restart transmit
3538 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3547 WRITE_ONCE(dev_queue->trans_start, jiffies); in netif_tx_stop_queue()
3553 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3557 * netif_stop_queue - stop transmitted packets
3572 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3576 * netif_queue_stopped - test if transmit queue is flowblocked
3588 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3594 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3600 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3604 * netdev_queue_set_dql_min_limit - set dql minimum limit
3617 dev_queue->dql.min_limit = min_limit; in netdev_queue_set_dql_min_limit()
3624 /* Non-BQL migrated drivers will return 0, too. */ in netdev_queue_dql_avail()
3625 return dql_avail(&txq->dql); in netdev_queue_dql_avail()
3632 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3641 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3646 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3655 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3660 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3673 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3675 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3679 WRITE_ONCE(dev_queue->trans_start, jiffies); in netdev_tx_sent_queue()
3684 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3694 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3695 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3711 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3720 * netdev_sent_queue - report the number of bytes queued to hardware
3743 * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3758 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3767 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3770 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3776 * netdev_completed_queue - report bytes and packets completed by device
3794 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); in netdev_tx_reset_queue()
3795 dql_reset(&q->dql); in netdev_tx_reset_queue()
3800 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3811 * netdev_reset_queue - reset the packets and bytes count of a network device
3823 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3832 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3834 dev->name, queue_index, in netdev_cap_txqueue()
3835 dev->real_num_tx_queues); in netdev_cap_txqueue()
3843 * netif_running - test if up
3850 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3861 * netif_start_subqueue - allow sending packets on subqueue
3875 * netif_stop_subqueue - stop sending packets on subqueue
3888 * __netif_subqueue_stopped - test status of subqueue
3903 * netif_subqueue_stopped - test status of subqueue
3916 * netif_wake_subqueue - allow sending packets on subqueue
3936 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3952 * netif_attr_test_online - Test for online CPU/Rx queue
3972 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3983 /* -1 is a legal arg here. */ in netif_attrmask_next()
3984 if (n != -1) in netif_attrmask_next()
3994 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
4007 /* -1 is a legal arg here. */ in netif_attrmask_next_and()
4008 if (n != -1) in netif_attrmask_next_and()
4037 * netif_is_multiqueue - test if device has multiple transmit queues
4044 return dev->num_tx_queues > 1; in netif_is_multiqueue()
4055 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
4124 kfree_skb(napi->skb); in napi_free_frags()
4125 napi->skb = NULL; in napi_free_frags()
4203 if (!(dev->flags & IFF_UP)) in __is_skb_forwardable()
4209 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; in __is_skb_forwardable()
4210 if (skb->len <= len) in __is_skb_forwardable()
4247 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); in DEV_CORE_STATS_INC()
4248 skb->priority = 0; in DEV_CORE_STATS_INC()
4259 this_cpu_dec(*dev->pcpu_refcnt); in __dev_put()
4261 refcount_dec(&dev->dev_refcnt); in __dev_put()
4270 this_cpu_inc(*dev->pcpu_refcnt); in __dev_hold()
4272 refcount_inc(&dev->dev_refcnt); in __dev_hold()
4282 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); in __netdev_tracker_alloc()
4293 refcount_dec(&dev->refcnt_tracker.no_tracker); in netdev_tracker_alloc()
4302 ref_tracker_free(&dev->refcnt_tracker, tracker); in netdev_tracker_free()
4325 * dev_hold - get reference to device
4337 * dev_put - release reference to device
4376 * linkwatch_sync_dev - sync linkwatch for the given device
4385 * netif_carrier_ok - test if carrier present
4392 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
4404 * netif_dormant_on - mark device as dormant.
4411 * in a "pending" state, waiting for some external event. For "on-
4417 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
4422 * netif_dormant_off - set device as not dormant.
4429 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
4434 * netif_dormant - test if device is dormant
4441 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
4446 * netif_testing_on - mark device as under test.
4457 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_on()
4462 * netif_testing_off - set device as not under test.
4469 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_off()
4474 * netif_testing - test if device is under test
4481 return test_bit(__LINK_STATE_TESTING, &dev->state); in netif_testing()
4486 * netif_oper_up - test if device is operational
4493 unsigned int operstate = READ_ONCE(dev->operstate); in netif_oper_up()
4500 * netif_device_present - is device available or removed
4507 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
4562 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4563 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4564 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4565 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4566 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4567 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4568 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4569 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4570 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4571 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4572 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4573 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4574 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4575 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4576 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4586 return (1U << debug_value) - 1; in netif_msg_init()
4591 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
4593 WRITE_ONCE(txq->xmit_lock_owner, cpu); in __netif_tx_lock()
4598 __acquire(&txq->_xmit_lock); in __netif_tx_acquire()
4604 __release(&txq->_xmit_lock); in __netif_tx_release()
4609 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
4611 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_lock_bh()
4616 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
4620 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_trylock()
4628 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock()
4629 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
4635 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock_bh()
4636 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
4640 * txq->trans_start can be read locklessly from dev_watchdog()
4644 if (txq->xmit_lock_owner != -1) in txq_trans_update()
4645 WRITE_ONCE(txq->trans_start, jiffies); in txq_trans_update()
4652 if (READ_ONCE(txq->trans_start) != now) in txq_trans_cond_update()
4653 WRITE_ONCE(txq->trans_start, now); in txq_trans_cond_update()
4656 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4665 * netif_tx_lock - grab network device transmit lock
4687 if (!(dev)->lltx) { \
4695 (!(dev)->lltx ? \
4700 if (!(dev)->lltx) { \
4714 spin_lock(&dev->tx_global_lock); in netif_tx_disable()
4715 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4722 spin_unlock(&dev->tx_global_lock); in netif_tx_disable()
4731 nest_level = dev->nested_level; in netif_addr_lock()
4733 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock()
4741 nest_level = dev->nested_level; in netif_addr_lock_bh()
4744 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock_bh()
4749 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4754 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4762 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4771 /* Support for loadable net-drivers */
4788 /* General hardware address lists handling functions */
4817 /* Functions used for device addresses handling */
4829 __dev_addr_set(dev, addr, dev->addr_len); in dev_addr_set()
4837 /* Functions used for unicast addresses handling */
4848 * __dev_uc_sync - Synchronize device's unicast list
4862 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4866 * __dev_uc_unsync - Remove synchronized addresses from device
4876 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4879 /* Functions used for multicast addresses handling */
4892 * __dev_mc_sync - Synchronize device's multicast list
4906 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4910 * __dev_mc_unsync - Remove synchronized addresses from device
4920 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4963 for (iter = &(dev)->adj_list.upper, \
4984 for (iter = (dev)->adj_list.lower.next, \
4990 for (iter = &(dev)->adj_list.lower, \
4999 for (iter = (dev)->adj_list.lower.next, \
5116 const struct net_device_ops *ops = dev->netdev_ops; in netdev_get_tstamp()
5118 if (ops->ndo_get_tstamp) in netdev_get_tstamp()
5119 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); in netdev_get_tstamp()
5121 return hwtstamps->hwtstamp; in netdev_get_tstamp()
5137 current->net_xmit.more = more; in netdev_xmit_set_more()
5142 return current->net_xmit.more; in netdev_xmit_more()
5151 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
5157 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
5192 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
5250 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && in skb_gso_ok()
5258 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && in netif_needs_gso()
5259 (skb->ip_summed != CHECKSUM_UNNECESSARY))); in netif_needs_gso()
5271 return skb->protocol == htons(ETH_P_IPV6) ? in netif_get_gro_max_size()
5272 READ_ONCE(dev->gro_max_size) : in netif_get_gro_max_size()
5273 READ_ONCE(dev->gro_ipv4_max_size); in netif_get_gro_max_size()
5280 return skb->protocol == htons(ETH_P_IPV6) ? in netif_get_gso_max_size()
5281 READ_ONCE(dev->gso_max_size) : in netif_get_gso_max_size()
5282 READ_ONCE(dev->gso_ipv4_max_size); in netif_get_gso_max_size()
5287 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
5292 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
5297 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
5302 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
5307 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
5312 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
5317 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
5322 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
5327 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
5334 return dev->ifindex; in dev_sdif()
5341 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
5346 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
5351 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
5356 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
5371 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
5376 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
5391 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
5396 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
5401 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
5407 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
5425 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
5427 return dev->name; in netdev_name()
5432 u8 reg_state = READ_ONCE(dev->reg_state); in netdev_reg_state()
5443 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); in netdev_reg_state()
5448 MODULE_ALIAS("netdev-" device)
5483 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5489 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5490 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5492 atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5493 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)