Lines Matching +full:tse +full:- +full:mdio

1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
71 static int debug = -1;
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 static int phyaddr = -1;
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
153 ret = clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
156 ret = clk_prepare_enable(priv->plat->pclk); in stmmac_bus_clks_config()
158 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
161 if (priv->plat->clks_config) { in stmmac_bus_clks_config()
162 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
164 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
165 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
170 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
171 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
172 if (priv->plat->clks_config) in stmmac_bus_clks_config()
173 priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
181 * stmmac_verify_args - verify the driver parameters.
201 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
202 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
207 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
210 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
211 napi_disable(&ch->rxtx_napi); in __stmmac_disable_all_queues()
216 napi_disable(&ch->rx_napi); in __stmmac_disable_all_queues()
218 napi_disable(&ch->tx_napi); in __stmmac_disable_all_queues()
223 * stmmac_disable_all_queues - Disable all queues
228 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
234 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
235 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
245 * stmmac_enable_all_queues - Enable all queues
250 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
251 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
256 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
259 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
260 napi_enable(&ch->rxtx_napi); in stmmac_enable_all_queues()
265 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
267 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
273 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
274 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
275 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
280 netif_carrier_off(priv->dev); in stmmac_global_err()
281 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
286 * stmmac_clk_csr_set - dynamically set the MDC clock
293 * changed at run-time and it is fixed (as reported in the driver
301 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
310 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
312 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
314 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
316 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
318 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
320 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
322 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
324 priv->clk_csr = STMMAC_CSR_300_500M; in stmmac_clk_csr_set()
326 priv->clk_csr = STMMAC_CSR_500_800M; in stmmac_clk_csr_set()
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { in stmmac_clk_csr_set()
331 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
333 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
335 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
337 priv->clk_csr = 0; in stmmac_clk_csr_set()
340 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
342 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
344 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
346 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
348 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
350 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
352 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
367 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
376 * stmmac_rx_dirty - Get RX queue dirty
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
385 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
386 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
395 stmmac_set_eee_lpi_timer(priv, priv->hw, 0); in stmmac_disable_hw_lpi_timer()
400 stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer); in stmmac_enable_hw_lpi_timer()
405 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_eee_tx_busy()
410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_eee_tx_busy()
412 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_eee_tx_busy()
421 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_restart_sw_lpi_timer()
425 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
438 if (!priv->tx_path_in_lpi_mode) in stmmac_try_to_start_sw_lpi()
439 stmmac_set_eee_mode(priv, priv->hw, in stmmac_try_to_start_sw_lpi()
440 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); in stmmac_try_to_start_sw_lpi()
444 * stmmac_stop_sw_lpi - stop transmitting LPI
446 * Description: When using software-controlled LPI, stop transmitting LPI state.
450 stmmac_reset_eee_mode(priv, priv->hw); in stmmac_stop_sw_lpi()
451 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_stop_sw_lpi()
452 priv->tx_path_in_lpi_mode = false; in stmmac_stop_sw_lpi()
456 * stmmac_eee_ctrl_timer - EEE TX SW timer.
470 * stmmac_eee_init - init EEE
480 priv->eee_active = active; in stmmac_eee_init()
483 if (!priv->dma_cap.eee) { in stmmac_eee_init()
484 priv->eee_enabled = false; in stmmac_eee_init()
488 mutex_lock(&priv->lock); in stmmac_eee_init()
491 if (!priv->eee_active) { in stmmac_eee_init()
492 if (priv->eee_enabled) { in stmmac_eee_init()
493 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_eee_init()
494 priv->eee_sw_timer_en = false; in stmmac_eee_init()
496 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
497 stmmac_set_eee_timer(priv, priv->hw, 0, in stmmac_eee_init()
499 if (priv->hw->xpcs) in stmmac_eee_init()
500 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
501 priv->plat->mult_fact_100ns, in stmmac_eee_init()
504 priv->eee_enabled = false; in stmmac_eee_init()
505 mutex_unlock(&priv->lock); in stmmac_eee_init()
509 if (priv->eee_active && !priv->eee_enabled) { in stmmac_eee_init()
510 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_eee_init()
512 if (priv->hw->xpcs) in stmmac_eee_init()
513 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
514 priv->plat->mult_fact_100ns, in stmmac_eee_init()
518 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { in stmmac_eee_init()
520 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
521 priv->tx_path_in_lpi_mode = false; in stmmac_eee_init()
522 priv->eee_sw_timer_en = false; in stmmac_eee_init()
526 priv->eee_sw_timer_en = true; in stmmac_eee_init()
531 priv->eee_enabled = true; in stmmac_eee_init()
533 mutex_unlock(&priv->lock); in stmmac_eee_init()
534 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_eee_init()
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
552 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
556 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
561 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
563 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
568 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
573 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
595 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
598 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
602 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
603 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
605 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
607 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
610 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
612 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
617 * stmmac_hwtstamp_set - control hardware timestamping.
625 * 0 on success and an appropriate -ve integer on failure.
640 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
641 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
642 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
643 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
645 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
648 if (copy_from_user(&config, ifr->ifr_data, in stmmac_hwtstamp_set()
650 return -EFAULT; in stmmac_hwtstamp_set()
652 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
657 return -ERANGE; in stmmac_hwtstamp_set()
659 if (priv->adv_ts) { in stmmac_hwtstamp_set()
740 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
780 return -ERANGE; in stmmac_hwtstamp_set()
793 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
794 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
796 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
798 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
799 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
805 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
807 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
809 return copy_to_user(ifr->ifr_data, &config, in stmmac_hwtstamp_set()
810 sizeof(config)) ? -EFAULT : 0; in stmmac_hwtstamp_set()
814 * stmmac_hwtstamp_get - read hardware timestamping.
825 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
827 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
828 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
830 return copy_to_user(ifr->ifr_data, config, in stmmac_hwtstamp_get()
831 sizeof(*config)) ? -EFAULT : 0; in stmmac_hwtstamp_get()
835 * stmmac_init_tstamp_counter - init hardware timestamping counter
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
851 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_tstamp_counter()
852 return -EOPNOTSUPP; in stmmac_init_tstamp_counter()
854 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
855 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
858 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
859 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
864 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
872 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
873 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
879 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
886 * stmmac_init_ptp - init PTP
894 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
897 if (priv->plat->ptp_clk_freq_config) in stmmac_init_ptp()
898 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_ptp()
904 priv->adv_ts = 0; in stmmac_init_ptp()
906 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
907 priv->adv_ts = 1; in stmmac_init_ptp()
909 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
910 priv->adv_ts = 1; in stmmac_init_ptp()
912 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
913 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
915 if (priv->adv_ts) in stmmac_init_ptp()
916 netdev_info(priv->dev, in stmmac_init_ptp()
917 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_ptp()
919 priv->hwts_tx_en = 0; in stmmac_init_ptp()
920 priv->hwts_rx_en = 0; in stmmac_init_ptp()
922 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_init_ptp()
930 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
935 * stmmac_mac_flow_ctrl - Configure flow control in all queues
942 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
944 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, in stmmac_mac_flow_ctrl()
945 priv->pause, tx_cnt); in stmmac_mac_flow_ctrl()
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_get_caps()
953 /* Refresh the MAC-specific capabilities */ in stmmac_mac_get_caps()
956 config->mac_capabilities = priv->hw->link.caps; in stmmac_mac_get_caps()
958 if (priv->plat->max_speed) in stmmac_mac_get_caps()
959 phylink_limit_mac_speed(config, priv->plat->max_speed); in stmmac_mac_get_caps()
961 return config->mac_capabilities; in stmmac_mac_get_caps()
967 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs()
970 if (priv->plat->select_pcs) { in stmmac_mac_select_pcs()
971 pcs = priv->plat->select_pcs(priv, interface); in stmmac_mac_select_pcs()
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
990 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
991 if (priv->dma_cap.eee) in stmmac_mac_link_down()
992 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
1004 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
1007 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_mac_link_up()
1008 priv->plat->serdes_powerup) in stmmac_mac_link_up()
1009 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); in stmmac_mac_link_up()
1011 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1012 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
1017 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1020 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
1023 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
1031 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
1034 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
1037 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
1040 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
1043 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1046 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1049 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1057 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1060 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1063 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
1066 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
1073 priv->speed = speed; in stmmac_mac_link_up()
1075 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
1076 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); in stmmac_mac_link_up()
1079 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
1081 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1085 priv->flow_ctrl = FLOW_AUTO; in stmmac_mac_link_up()
1087 priv->flow_ctrl = FLOW_RX; in stmmac_mac_link_up()
1089 priv->flow_ctrl = FLOW_TX; in stmmac_mac_link_up()
1091 priv->flow_ctrl = FLOW_OFF; in stmmac_mac_link_up()
1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1098 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1099 if (priv->dma_cap.eee) in stmmac_mac_link_up()
1100 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1105 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_mac_link_up()
1111 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_disable_tx_lpi()
1119 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_enable_tx_lpi()
1121 priv->tx_lpi_timer = timer; in stmmac_mac_enable_tx_lpi()
1138 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1146 int interface = priv->plat->mac_interface; in stmmac_check_pcs_mode()
1148 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1153 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1154 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1156 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1157 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1163 * stmmac_init_phy - PHY initialization
1177 if (!phylink_expects_phy(priv->phylink)) in stmmac_init_phy()
1180 fwnode = priv->plat->port_node; in stmmac_init_phy()
1182 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1189 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1193 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1197 netdev_err(priv->dev, "no phy found\n"); in stmmac_init_phy()
1198 return -ENODEV; in stmmac_init_phy()
1201 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1203 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1204 return -ENODEV; in stmmac_init_phy()
1207 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1210 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1222 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) { in stmmac_init_phy()
1223 eee.tx_lpi_timer = priv->tx_lpi_timer; in stmmac_init_phy()
1224 phylink_ethtool_set_eee(priv->phylink, &eee); in stmmac_init_phy()
1228 if (!priv->plat->pmt) { in stmmac_init_phy()
1231 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1232 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1233 device_set_wakeup_enable(priv->device, !!wol.wolopts); in stmmac_init_phy()
1242 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1247 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1248 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1249 priv->phylink_config.mac_managed_pm = true; in stmmac_phy_setup()
1252 priv->phylink_config.mac_requires_rxc = true; in stmmac_phy_setup()
1254 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) in stmmac_phy_setup()
1255 priv->phylink_config.eee_rx_clk_stop_enable = true; in stmmac_phy_setup()
1257 mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1259 priv->phylink_config.default_an_inband = in stmmac_phy_setup()
1260 mdio_bus_data->default_an_inband; in stmmac_phy_setup()
1265 __set_bit(mode, priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1268 if (priv->hw->xpcs) in stmmac_phy_setup()
1269 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs); in stmmac_phy_setup()
1271 pcs = priv->hw->phylink_pcs; in stmmac_phy_setup()
1274 phy_interface_or(priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1275 priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1276 pcs->supported_interfaces); in stmmac_phy_setup()
1278 if (priv->dma_cap.eee) { in stmmac_phy_setup()
1280 memcpy(priv->phylink_config.lpi_interfaces, in stmmac_phy_setup()
1281 priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1282 sizeof(priv->phylink_config.lpi_interfaces)); in stmmac_phy_setup()
1285 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) | in stmmac_phy_setup()
1287 priv->phylink_config.lpi_timer_default = eee_timer * 1000; in stmmac_phy_setup()
1288 priv->phylink_config.eee_enabled_default = true; in stmmac_phy_setup()
1291 fwnode = priv->plat->port_node; in stmmac_phy_setup()
1293 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1295 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1300 priv->phylink = phylink; in stmmac_phy_setup()
1307 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1314 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_display_rx_rings()
1318 if (priv->extend_desc) { in stmmac_display_rx_rings()
1319 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1322 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1327 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1328 rx_q->dma_rx_phy, desc_size); in stmmac_display_rx_rings()
1335 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1342 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_display_tx_rings()
1346 if (priv->extend_desc) { in stmmac_display_tx_rings()
1347 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1349 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in stmmac_display_tx_rings()
1350 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1353 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1357 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1358 tx_q->dma_tx_phy, desc_size); in stmmac_display_tx_rings()
1399 * stmmac_clear_rx_descriptors - clear RX descriptors
1410 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1414 for (i = 0; i < dma_conf->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1415 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1416 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1417 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1418 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1419 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1421 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1422 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1423 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1424 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1428 * stmmac_clear_tx_descriptors - clear tx descriptors
1439 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1443 for (i = 0; i < dma_conf->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1444 int last = (i == (dma_conf->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1447 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1448 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1449 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1450 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1452 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1454 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1459 * stmmac_clear_descriptors - clear descriptors
1468 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1469 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1482 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1497 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_init_rx_buffers()
1498 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1501 if (priv->dma_cap.host_dma_width <= 32) in stmmac_init_rx_buffers()
1504 if (!buf->page) { in stmmac_init_rx_buffers()
1505 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1506 if (!buf->page) in stmmac_init_rx_buffers()
1507 return -ENOMEM; in stmmac_init_rx_buffers()
1508 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1511 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1512 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1513 if (!buf->sec_page) in stmmac_init_rx_buffers()
1514 return -ENOMEM; in stmmac_init_rx_buffers()
1516 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1517 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1519 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1520 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1523 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_init_rx_buffers()
1525 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1526 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1533 * stmmac_free_rx_buffer - free RX dma buffers
1542 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1544 if (buf->page) in stmmac_free_rx_buffer()
1545 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1546 buf->page = NULL; in stmmac_free_rx_buffer()
1548 if (buf->sec_page) in stmmac_free_rx_buffer()
1549 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1550 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1554 * stmmac_free_tx_buffer - free RX dma buffers
1564 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_free_tx_buffer()
1566 if (tx_q->tx_skbuff_dma[i].buf && in stmmac_free_tx_buffer()
1567 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { in stmmac_free_tx_buffer()
1568 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1569 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1570 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1571 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1574 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1575 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1576 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1580 if (tx_q->xdpf[i] && in stmmac_free_tx_buffer()
1581 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_free_tx_buffer()
1582 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { in stmmac_free_tx_buffer()
1583 xdp_return_frame(tx_q->xdpf[i]); in stmmac_free_tx_buffer()
1584 tx_q->xdpf[i] = NULL; in stmmac_free_tx_buffer()
1587 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_free_tx_buffer()
1588 tx_q->xsk_frames_done++; in stmmac_free_tx_buffer()
1590 if (tx_q->tx_skbuff[i] && in stmmac_free_tx_buffer()
1591 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_free_tx_buffer()
1592 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1593 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1596 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1597 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1601 * dma_free_rx_skbufs - free RX dma buffers
1610 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_skbufs()
1613 for (i = 0; i < dma_conf->dma_rx_size; i++) in dma_free_rx_skbufs()
1621 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers()
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers()
1628 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1629 p = &((rx_q->dma_erx + i)->basic); in stmmac_alloc_rx_buffers()
1631 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers()
1638 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers()
1645 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1654 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_xskbufs()
1657 for (i = 0; i < dma_conf->dma_rx_size; i++) { in dma_free_rx_xskbufs()
1658 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in dma_free_rx_xskbufs()
1660 if (!buf->xdp) in dma_free_rx_xskbufs()
1663 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1664 buf->xdp = NULL; in dma_free_rx_xskbufs()
1672 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers_zc()
1681 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers_zc()
1686 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1687 p = (struct dma_desc *)(rx_q->dma_erx + i); in stmmac_alloc_rx_buffers_zc()
1689 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers_zc()
1691 buf = &rx_q->buf_pool[i]; in stmmac_alloc_rx_buffers_zc()
1693 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1694 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1695 return -ENOMEM; in stmmac_alloc_rx_buffers_zc()
1697 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
1699 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers_zc()
1707 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1710 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1714 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1727 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __init_dma_rx_desc_rings()
1730 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1732 (u32)rx_q->dma_rx_phy); in __init_dma_rx_desc_rings()
1736 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1738 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1740 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1741 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1744 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1745 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1746 rx_q->queue_index); in __init_dma_rx_desc_rings()
1747 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1749 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1751 rx_q->page_pool)); in __init_dma_rx_desc_rings()
1752 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1753 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1754 rx_q->queue_index); in __init_dma_rx_desc_rings()
1757 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1759 * xdpsock TX-only. in __init_dma_rx_desc_rings()
1765 return -ENOMEM; in __init_dma_rx_desc_rings()
1769 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1770 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1771 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1772 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1773 dma_conf->dma_rx_size, 1); in __init_dma_rx_desc_rings()
1775 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1776 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1777 dma_conf->dma_rx_size, 0); in __init_dma_rx_desc_rings()
1788 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1793 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1806 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in init_dma_rx_desc_rings()
1808 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1813 rx_q->buf_alloc_num = 0; in init_dma_rx_desc_rings()
1814 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1816 queue--; in init_dma_rx_desc_rings()
1823 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1835 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __init_dma_tx_desc_rings()
1838 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1840 (u32)tx_q->dma_tx_phy); in __init_dma_tx_desc_rings()
1843 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1844 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1845 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1846 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1847 dma_conf->dma_tx_size, 1); in __init_dma_tx_desc_rings()
1848 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in __init_dma_tx_desc_rings()
1849 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1850 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1851 dma_conf->dma_tx_size, 0); in __init_dma_tx_desc_rings()
1854 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1856 for (i = 0; i < dma_conf->dma_tx_size; i++) { in __init_dma_tx_desc_rings()
1859 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1860 p = &((tx_q->dma_etx + i)->basic); in __init_dma_tx_desc_rings()
1861 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __init_dma_tx_desc_rings()
1862 p = &((tx_q->dma_entx + i)->basic); in __init_dma_tx_desc_rings()
1864 p = tx_q->dma_tx + i; in __init_dma_tx_desc_rings()
1868 tx_q->tx_skbuff_dma[i].buf = 0; in __init_dma_tx_desc_rings()
1869 tx_q->tx_skbuff_dma[i].map_as_page = false; in __init_dma_tx_desc_rings()
1870 tx_q->tx_skbuff_dma[i].len = 0; in __init_dma_tx_desc_rings()
1871 tx_q->tx_skbuff_dma[i].last_segment = false; in __init_dma_tx_desc_rings()
1872 tx_q->tx_skbuff[i] = NULL; in __init_dma_tx_desc_rings()
1885 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1894 * init_dma_desc_rings - init the RX/TX descriptor rings
1924 * dma_free_tx_skbufs - free TX dma buffers
1933 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in dma_free_tx_skbufs()
1936 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1938 for (i = 0; i < dma_conf->dma_tx_size; i++) in dma_free_tx_skbufs()
1941 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
1942 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in dma_free_tx_skbufs()
1943 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1944 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs()
1949 * stmmac_free_tx_skbufs - free TX skb buffers
1954 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1958 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1962 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1971 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __free_dma_rx_desc_resources()
1974 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources()
1979 rx_q->buf_alloc_num = 0; in __free_dma_rx_desc_resources()
1980 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources()
1983 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
1984 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1986 rx_q->dma_rx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1988 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1990 rx_q->dma_erx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1992 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) in __free_dma_rx_desc_resources()
1993 xdp_rxq_info_unreg(&rx_q->xdp_rxq); in __free_dma_rx_desc_resources()
1995 kfree(rx_q->buf_pool); in __free_dma_rx_desc_resources()
1996 if (rx_q->page_pool) in __free_dma_rx_desc_resources()
1997 page_pool_destroy(rx_q->page_pool); in __free_dma_rx_desc_resources()
2003 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
2012 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __free_dma_tx_desc_resources()
2028 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
2030 addr = tx_q->dma_etx; in __free_dma_tx_desc_resources()
2031 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in __free_dma_tx_desc_resources()
2033 addr = tx_q->dma_entx; in __free_dma_tx_desc_resources()
2036 addr = tx_q->dma_tx; in __free_dma_tx_desc_resources()
2039 size *= dma_conf->dma_tx_size; in __free_dma_tx_desc_resources()
2041 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
2043 kfree(tx_q->tx_skbuff_dma); in __free_dma_tx_desc_resources()
2044 kfree(tx_q->tx_skbuff); in __free_dma_tx_desc_resources()
2050 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
2059 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2065 * reception, for example, it pre-allocated the RX socket buffer in order to
2066 * allow zero-copy mechanism.
2072 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __alloc_dma_rx_desc_resources()
2073 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
2080 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz + in __alloc_dma_rx_desc_resources()
2084 rx_q->queue_index = queue; in __alloc_dma_rx_desc_resources()
2085 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2086 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE; in __alloc_dma_rx_desc_resources()
2089 pp_params.pool_size = dma_conf->dma_rx_size; in __alloc_dma_rx_desc_resources()
2091 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2092 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2095 pp_params.max_len = dma_conf->dma_buf_sz; in __alloc_dma_rx_desc_resources()
2097 if (priv->sph) { in __alloc_dma_rx_desc_resources()
2102 rx_q->page_pool = page_pool_create(&pp_params); in __alloc_dma_rx_desc_resources()
2103 if (IS_ERR(rx_q->page_pool)) { in __alloc_dma_rx_desc_resources()
2104 ret = PTR_ERR(rx_q->page_pool); in __alloc_dma_rx_desc_resources()
2105 rx_q->page_pool = NULL; in __alloc_dma_rx_desc_resources()
2109 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, in __alloc_dma_rx_desc_resources()
2110 sizeof(*rx_q->buf_pool), in __alloc_dma_rx_desc_resources()
2112 if (!rx_q->buf_pool) in __alloc_dma_rx_desc_resources()
2113 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2115 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2116 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2117 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2119 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2121 if (!rx_q->dma_erx) in __alloc_dma_rx_desc_resources()
2122 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2125 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2126 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2128 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2130 if (!rx_q->dma_rx) in __alloc_dma_rx_desc_resources()
2131 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2135 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2136 napi_id = ch->rxtx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2138 napi_id = ch->rx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2140 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2141 rx_q->queue_index, in __alloc_dma_rx_desc_resources()
2144 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2145 return -EINVAL; in __alloc_dma_rx_desc_resources()
2154 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2174 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2180 * reception, for example, it pre-allocated the RX socket buffer in order to
2181 * allow zero-copy mechanism.
2187 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __alloc_dma_tx_desc_resources()
2191 tx_q->queue_index = queue; in __alloc_dma_tx_desc_resources()
2192 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2194 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2195 sizeof(*tx_q->tx_skbuff_dma), in __alloc_dma_tx_desc_resources()
2197 if (!tx_q->tx_skbuff_dma) in __alloc_dma_tx_desc_resources()
2198 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2200 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2203 if (!tx_q->tx_skbuff) in __alloc_dma_tx_desc_resources()
2204 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2206 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2208 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2213 size *= dma_conf->dma_tx_size; in __alloc_dma_tx_desc_resources()
2215 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2216 &tx_q->dma_tx_phy, GFP_KERNEL); in __alloc_dma_tx_desc_resources()
2218 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2220 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2221 tx_q->dma_etx = addr; in __alloc_dma_tx_desc_resources()
2222 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2223 tx_q->dma_entx = addr; in __alloc_dma_tx_desc_resources()
2225 tx_q->dma_tx = addr; in __alloc_dma_tx_desc_resources()
2233 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2252 * alloc_dma_desc_resources - alloc TX/RX resources.
2257 * reception, for example, it pre-allocated the RX socket buffer in order to
2258 * allow zero-copy mechanism.
2275 * free_dma_desc_resources - free dma desc resources
2292 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2298 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2303 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2304 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2309 * stmmac_start_rx_dma - start RX DMA channel
2317 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2318 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2322 * stmmac_start_tx_dma - start TX DMA channel
2330 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2331 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2335 * stmmac_stop_rx_dma - stop RX DMA channel
2343 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2344 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2348 * stmmac_stop_tx_dma - stop TX DMA channel
2356 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2357 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2362 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2363 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2368 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2371 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2372 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2373 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2378 * stmmac_start_all_dma - start all RX and TX DMA channels
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2397 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2404 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2405 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2416 * stmmac_dma_operation_mode - HW DMA operation mode
2419 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2423 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2424 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2425 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2426 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2433 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2435 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2438 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { in stmmac_dma_operation_mode()
2443 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2446 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2456 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2464 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2467 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2469 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2472 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode()
2473 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_dma_operation_mode()
2474 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2478 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2479 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2485 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2487 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2496 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); in stmmac_xsk_request_timestamp()
2497 *meta_req->set_ic = true; in stmmac_xsk_request_timestamp()
2503 struct stmmac_priv *priv = tx_compl->priv; in stmmac_xsk_fill_timestamp()
2504 struct dma_desc *desc = tx_compl->desc; in stmmac_xsk_fill_timestamp()
2508 if (!priv->hwts_tx_en) in stmmac_xsk_fill_timestamp()
2513 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_xsk_fill_timestamp()
2515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_xsk_fill_timestamp()
2520 ns -= priv->plat->cdc_error_adj; in stmmac_xsk_fill_timestamp()
2534 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2535 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2536 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_zc()
2537 struct xsk_buff_pool *pool = tx_q->xsk_pool; in stmmac_xdp_xmit_zc()
2538 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2544 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_zc()
2549 while (budget-- > 0) { in stmmac_xdp_xmit_zc()
2559 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2567 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_zc()
2568 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_zc()
2569 xdp_desc.len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_zc()
2570 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_zc()
2574 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2575 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_zc()
2576 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_zc()
2577 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_zc()
2579 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_zc()
2585 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; in stmmac_xdp_xmit_zc()
2591 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_xdp_xmit_zc()
2592 tx_q->xdpf[entry] = NULL; in stmmac_xdp_xmit_zc()
2594 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_zc()
2595 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; in stmmac_xdp_xmit_zc()
2596 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_zc()
2597 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_zc()
2601 tx_q->tx_count_frames++; in stmmac_xdp_xmit_zc()
2603 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2605 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2616 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_zc()
2622 true, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2625 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_zc()
2628 &tx_q->tx_skbuff_dma[entry].xsk_meta); in stmmac_xdp_xmit_zc()
2630 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2631 entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2633 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2634 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); in stmmac_xdp_xmit_zc()
2635 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2652 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2655 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2661 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2666 * stmmac_tx_clean - to manage the transmission completion
2678 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2679 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tx_clean()
2684 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2686 tx_q->xsk_frames_done = 0; in stmmac_tx_clean()
2688 entry = tx_q->dirty_tx; in stmmac_tx_clean()
2691 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2697 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_tx_clean()
2698 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2699 xdpf = tx_q->xdpf[entry]; in stmmac_tx_clean()
2701 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2703 skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
2709 if (priv->extend_desc) in stmmac_tx_clean()
2710 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
2711 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
2712 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
2714 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
2716 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2740 } else if (tx_q->xsk_pool && in stmmac_tx_clean()
2741 xp_tx_metadata_enabled(tx_q->xsk_pool)) { in stmmac_tx_clean()
2747 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, in stmmac_tx_clean()
2753 if (likely(tx_q->tx_skbuff_dma[entry].buf && in stmmac_tx_clean()
2754 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { in stmmac_tx_clean()
2755 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2756 dma_unmap_page(priv->device, in stmmac_tx_clean()
2757 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2758 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2761 dma_unmap_single(priv->device, in stmmac_tx_clean()
2762 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2763 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2765 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2766 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2767 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2772 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2773 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2776 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { in stmmac_tx_clean()
2778 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2782 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2784 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2787 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_tx_clean()
2788 tx_q->xsk_frames_done++; in stmmac_tx_clean()
2790 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2793 bytes_compl += skb->len; in stmmac_tx_clean()
2795 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2799 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2801 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2803 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2805 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2808 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2812 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2814 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2817 if (tx_q->xsk_pool) { in stmmac_tx_clean()
2820 if (tx_q->xsk_frames_done) in stmmac_tx_clean()
2821 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in stmmac_tx_clean()
2823 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) in stmmac_tx_clean()
2824 xsk_set_tx_need_wakeup(tx_q->xsk_pool); in stmmac_tx_clean()
2828 * available), return "budget - 1" to reenable TX IRQ. in stmmac_tx_clean()
2834 xmits = budget - 1; in stmmac_tx_clean()
2839 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode) in stmmac_tx_clean()
2843 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2846 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_tx_clean()
2847 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); in stmmac_tx_clean()
2848 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); in stmmac_tx_clean()
2849 u64_stats_inc(&txq_stats->napi.tx_clean); in stmmac_tx_clean()
2850 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_tx_clean()
2852 priv->xstats.tx_errors += tx_errors; in stmmac_tx_clean()
2854 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2861 * stmmac_tx_err - to manage the tx error
2869 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2871 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2874 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2875 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2877 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2878 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2881 priv->xstats.tx_errors++; in stmmac_tx_err()
2882 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2886 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2892 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2898 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2899 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2900 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2901 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2902 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2903 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2906 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2908 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2914 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2915 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2922 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2923 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2924 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2934 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2935 &priv->xstats, chan, dir); in stmmac_napi_check()
2936 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2937 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2938 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2943 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; in stmmac_napi_check()
2944 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_napi_check()
2946 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2948 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2949 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2950 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2955 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2957 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2958 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2959 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2968 * stmmac_dma_interrupt - DMA ISR
2976 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2977 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
3011 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
3013 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
3014 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
3015 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
3017 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
3021 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3031 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
3035 * stmmac_check_ether_addr - check if the MAC addr is valid
3045 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
3046 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
3048 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
3050 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
3051 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
3052 priv->dev->dev_addr); in stmmac_check_ether_addr()
3057 * stmmac_init_dma_engine - DMA init.
3066 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
3067 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
3074 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
3075 dev_err(priv->device, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
3076 return -EINVAL; in stmmac_init_dma_engine()
3079 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
3080 priv->plat->dma_cfg->atds = 1; in stmmac_init_dma_engine()
3082 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
3084 dev_err(priv->device, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
3089 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); in stmmac_init_dma_engine()
3091 if (priv->plat->axi) in stmmac_init_dma_engine()
3092 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
3096 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
3097 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
3102 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
3104 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3105 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
3107 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
3108 (rx_q->buf_alloc_num * in stmmac_init_dma_engine()
3110 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3111 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
3116 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
3118 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3119 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
3121 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
3122 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3123 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
3131 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
3132 u32 tx_coal_timer = priv->tx_coal_timer[queue]; in stmmac_tx_timer_arm()
3139 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer_arm()
3140 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer_arm()
3147 hrtimer_start(&tx_q->txtimer, in stmmac_tx_timer_arm()
3151 hrtimer_try_to_cancel(&tx_q->txtimer); in stmmac_tx_timer_arm()
3155 * stmmac_tx_timer - mitigation sw timer for tx.
3163 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
3167 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
3168 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer()
3173 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
3174 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3175 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
3183 * stmmac_init_coalesce - init mitigation options.
3192 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3193 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3197 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3199 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3200 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3202 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_init_coalesce()
3203 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_init_coalesce()
3207 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3212 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3213 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3218 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3219 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3223 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3224 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3228 * stmmac_set_tx_queue_weight - Set TX queue weight
3234 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3239 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3240 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3245 * stmmac_configure_cbs - Configure CBS in TX queue
3251 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3257 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3261 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3262 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3263 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3264 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3265 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3271 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3277 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3282 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3283 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3288 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3294 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3299 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3302 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3303 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3308 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3314 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3319 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3322 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3323 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3328 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3334 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3340 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3343 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3344 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3350 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3351 priv->rss.enable = false; in stmmac_mac_config_rss()
3355 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3356 priv->rss.enable = true; in stmmac_mac_config_rss()
3358 priv->rss.enable = false; in stmmac_mac_config_rss()
3360 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3361 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3365 * stmmac_mtl_configuration - Configure MTL
3371 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3372 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3379 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3380 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3384 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3385 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3416 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3417 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3418 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3419 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3421 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3426 * stmmac_hw_setup - setup mac in a usable state.
3435 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3441 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3442 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3448 if (priv->hw->phylink_pcs) in stmmac_hw_setup()
3449 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); in stmmac_hw_setup()
3454 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3460 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3463 if (priv->hw->pcs) { in stmmac_hw_setup()
3464 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3468 priv->hw->ps = speed; in stmmac_hw_setup()
3470 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3471 priv->hw->ps = 0; in stmmac_hw_setup()
3476 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3484 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3486 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3487 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3488 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3492 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3500 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
3502 netdev_warn(priv->dev, in stmmac_hw_setup()
3508 if (ret == -EOPNOTSUPP) in stmmac_hw_setup()
3509 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
3511 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
3515 if (priv->use_riwt) { in stmmac_hw_setup()
3519 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3520 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3522 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3523 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3527 if (priv->hw->pcs) in stmmac_hw_setup()
3528 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3534 if (priv->tso) { in stmmac_hw_setup()
3536 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3538 /* TSO and TBS cannot co-exist */ in stmmac_hw_setup()
3539 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_hw_setup()
3542 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3547 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3549 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3553 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3554 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3558 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3559 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
3561 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3565 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3566 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3571 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_hw_setup()
3580 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
3591 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3594 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3595 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3596 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3597 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3600 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3603 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3604 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3605 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3606 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3610 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3611 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3614 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3615 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3618 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3619 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3622 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3623 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3626 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) in stmmac_free_irq()
3627 free_irq(priv->sfty_irq, dev); in stmmac_free_irq()
3630 free_irq(dev->irq, dev); in stmmac_free_irq()
3649 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3650 sprintf(int_name, "%s:%s", dev->name, "mac"); in stmmac_request_irq_multi_msi()
3651 ret = request_irq(dev->irq, stmmac_mac_interrupt, in stmmac_request_irq_multi_msi()
3654 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3656 __func__, dev->irq, ret); in stmmac_request_irq_multi_msi()
3664 priv->wol_irq_disabled = true; in stmmac_request_irq_multi_msi()
3665 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3666 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3667 sprintf(int_name, "%s:%s", dev->name, "wol"); in stmmac_request_irq_multi_msi()
3668 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3672 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3674 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3683 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3684 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3685 sprintf(int_name, "%s:%s", dev->name, "lpi"); in stmmac_request_irq_multi_msi()
3686 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3690 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3692 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3701 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3702 int_name = priv->int_name_sfty; in stmmac_request_irq_multi_msi()
3703 sprintf(int_name, "%s:%s", dev->name, "safety"); in stmmac_request_irq_multi_msi()
3704 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_multi_msi()
3707 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3709 __func__, priv->sfty_irq, ret); in stmmac_request_irq_multi_msi()
3718 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3719 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3720 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); in stmmac_request_irq_multi_msi()
3721 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3725 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3727 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3736 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3737 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3738 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); in stmmac_request_irq_multi_msi()
3739 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3743 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3745 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3752 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3755 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3758 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3759 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); in stmmac_request_irq_multi_msi()
3760 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3762 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3764 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3765 "%s: alloc rx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3766 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3771 irq_set_affinity_hint(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3776 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3779 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3782 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3783 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); in stmmac_request_irq_multi_msi()
3784 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3786 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3788 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3789 "%s: alloc tx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3790 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3795 irq_set_affinity_hint(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3812 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_request_irq_single()
3813 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3815 netdev_err(priv->dev, in stmmac_request_irq_single()
3817 __func__, dev->irq, ret); in stmmac_request_irq_single()
3825 priv->wol_irq_disabled = true; in stmmac_request_irq_single()
3826 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3827 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3828 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3830 netdev_err(priv->dev, in stmmac_request_irq_single()
3832 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3839 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3840 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3841 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3843 netdev_err(priv->dev, in stmmac_request_irq_single()
3845 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3854 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_single()
3855 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_single()
3856 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3858 netdev_err(priv->dev, in stmmac_request_irq_single()
3860 __func__, priv->sfty_irq, ret); in stmmac_request_irq_single()
3879 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) in stmmac_request_irq()
3888 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3904 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3906 return ERR_PTR(-ENOMEM); in stmmac_setup_dma_desc()
3916 dma_conf->dma_buf_sz = bfsize; in stmmac_setup_dma_desc()
3920 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3921 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3923 if (!dma_conf->dma_tx_size) in stmmac_setup_dma_desc()
3924 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_setup_dma_desc()
3925 if (!dma_conf->dma_rx_size) in stmmac_setup_dma_desc()
3926 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_setup_dma_desc()
3929 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3930 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; in stmmac_setup_dma_desc()
3931 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3933 /* Setup per-TXQ tbs flag before TX descriptor alloc */ in stmmac_setup_dma_desc()
3934 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_setup_dma_desc()
3939 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3944 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3946 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
3961 * __stmmac_open - open entry point of the driver
3967 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3974 int mode = priv->plat->phy_interface; in __stmmac_open()
3979 if (!priv->tx_lpi_timer) in __stmmac_open()
3980 priv->tx_lpi_timer = eee_timer * 1000; in __stmmac_open()
3982 ret = pm_runtime_resume_and_get(priv->device); in __stmmac_open()
3986 if ((!priv->hw->xpcs || in __stmmac_open()
3987 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { in __stmmac_open()
3990 netdev_err(priv->dev, in __stmmac_open()
3997 buf_sz = dma_conf->dma_buf_sz; in __stmmac_open()
3999 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) in __stmmac_open()
4000 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; in __stmmac_open()
4001 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
4005 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in __stmmac_open()
4006 priv->plat->serdes_powerup) { in __stmmac_open()
4007 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
4009 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
4017 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
4023 phylink_start(priv->phylink); in __stmmac_open()
4025 phylink_speed_up(priv->phylink); in __stmmac_open()
4032 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
4038 phylink_stop(priv->phylink); in __stmmac_open()
4040 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
4041 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
4045 phylink_disconnect_phy(priv->phylink); in __stmmac_open()
4047 pm_runtime_put(priv->device); in __stmmac_open()
4057 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
4070 * stmmac_release - close entry point of the driver
4080 if (device_may_wakeup(priv->device)) in stmmac_release()
4081 phylink_speed_down(priv->phylink, false); in stmmac_release()
4083 phylink_stop(priv->phylink); in stmmac_release()
4084 phylink_disconnect_phy(priv->phylink); in stmmac_release()
4088 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
4089 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_release()
4100 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_release()
4103 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_release()
4106 if (priv->plat->serdes_powerdown) in stmmac_release()
4107 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in stmmac_release()
4112 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_release()
4114 pm_runtime_put(priv->device); in stmmac_release()
4126 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
4130 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
4137 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
4138 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
4140 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
4146 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
4151 * stmmac_tso_allocator - close entry point of the driver
4164 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
4174 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
4175 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4176 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
4178 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
4179 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
4181 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
4183 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
4193 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
4199 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4202 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4204 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_flush_tx_descriptors()
4215 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_flush_tx_descriptors()
4216 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4220 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4228 * --------
4229 * | DES0 |---> buffer1 = L2/L3/L4 header
4230 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4231 * | | width is 32-bit, but we never use it.
4232 * | | Also can be used as the most-significant 8-bits or 16-bits of
4233 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4234 * | | or 48-bit, and we always use it.
4235 * | DES2 |---> buffer1 len
4236 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4237 * --------
4238 * --------
4239 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4240 * | DES1 |---> same as the First Descriptor
4241 * | DES2 |---> buffer1 len
4243 * --------
4247 * --------
4248 * | DES0 |---> buffer1 = Split TCP Payload
4249 * | DES1 |---> same as the First Descriptor
4250 * | DES2 |---> buffer1 len
4252 * --------
4272 * TSO engine will be un-tagged by mistake. in stmmac_tso_xmit()
4277 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4282 nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
4285 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4286 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tso_xmit()
4287 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
4290 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
4300 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
4302 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4305 netdev_err(priv->dev, in stmmac_tso_xmit()
4312 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
4314 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
4317 if (mss != tx_q->mss) { in stmmac_tso_xmit()
4318 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4319 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4321 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4324 tx_q->mss = mss; in stmmac_tso_xmit()
4325 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
4326 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4327 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
4333 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
4334 skb->data_len); in stmmac_tso_xmit()
4337 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
4338 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
4340 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4341 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
4343 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
4347 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4349 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4357 * non-paged SKB data, the DMA buffer address should be saved to in stmmac_tso_xmit()
4358 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, in stmmac_tso_xmit()
4359 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee in stmmac_tso_xmit()
4363 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf in stmmac_tso_xmit()
4368 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4369 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); in stmmac_tso_xmit()
4370 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; in stmmac_tso_xmit()
4371 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4375 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
4377 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4380 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4384 (i == nfrags - 1), queue); in stmmac_tso_xmit()
4386 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4387 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
4388 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
4389 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4392 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
4395 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
4396 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4399 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
4400 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
4402 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4404 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4406 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4408 else if ((tx_q->tx_count_frames % in stmmac_tso_xmit()
4409 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4415 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4416 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4418 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4420 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
4429 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4432 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4434 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4437 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_tso_xmit()
4438 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_tso_xmit()
4439 u64_stats_inc(&txq_stats->q.tx_tso_frames); in stmmac_tso_xmit()
4440 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit()
4442 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_tso_xmit()
4443 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_tso_xmit()
4445 if (priv->sarc_type) in stmmac_tso_xmit()
4446 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4450 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
4451 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4453 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
4459 tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
4460 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
4475 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
4476 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
4478 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
4481 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
4489 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4491 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4496 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4518 * stmmac_xmit - Tx entry point of the driver
4532 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
4533 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
4542 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4543 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xmit()
4544 first_tx = tx_q->cur_tx; in stmmac_xmit()
4546 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4550 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4553 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4557 if (priv->est && priv->est->enable && in stmmac_xmit()
4558 priv->est->max_sdu[queue] && in stmmac_xmit()
4559 skb->len > priv->est->max_sdu[queue]){ in stmmac_xmit()
4560 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xmit()
4566 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4569 netdev_err(priv->dev, in stmmac_xmit()
4579 entry = tx_q->cur_tx; in stmmac_xmit()
4581 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
4583 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
4588 * Packets that won't trigger the COE e.g. most DSA-tagged packets will in stmmac_xmit()
4592 (priv->plat->tx_queues_cfg[queue].coe_unsupported || in stmmac_xmit()
4599 if (likely(priv->extend_desc)) in stmmac_xmit()
4600 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4601 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4602 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4604 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4611 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4614 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4618 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
4623 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
4625 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
4627 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4628 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
4630 if (likely(priv->extend_desc)) in stmmac_xmit()
4631 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4632 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4633 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4635 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4637 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4639 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4642 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
4646 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
4647 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
4648 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
4649 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4653 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4657 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
4658 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4661 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
4665 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
4666 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
4668 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4670 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4672 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4674 else if ((tx_q->tx_count_frames % in stmmac_xmit()
4675 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4681 if (likely(priv->extend_desc)) in stmmac_xmit()
4682 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
4683 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4684 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4686 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
4688 tx_q->tx_count_frames = 0; in stmmac_xmit()
4697 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4698 tx_q->cur_tx = entry; in stmmac_xmit()
4701 netdev_dbg(priv->dev, in stmmac_xmit()
4703 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
4706 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4707 print_pkt(skb->data, skb->len); in stmmac_xmit()
4711 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4713 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4716 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xmit()
4717 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_xmit()
4719 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xmit()
4720 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xmit()
4722 if (priv->sarc_type) in stmmac_xmit()
4723 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4734 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4736 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4739 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
4740 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4741 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_xmit()
4745 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
4746 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
4748 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
4749 priv->hwts_tx_en)) { in stmmac_xmit()
4751 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
4757 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4758 skb->len); in stmmac_xmit()
4761 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
4762 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
4764 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
4770 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
4772 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xmit()
4780 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4783 priv->xstats.tx_dropped++; in stmmac_xmit()
4790 __be16 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
4794 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
4796 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
4798 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
4799 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
4806 * stmmac_rx_refill - refill used skb preallocated buffers
4810 * that is based on zero-copy.
4814 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4816 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
4819 if (priv->dma_cap.host_dma_width <= 32) in stmmac_rx_refill()
4822 while (dirty-- > 0) { in stmmac_rx_refill()
4823 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
4827 if (priv->extend_desc) in stmmac_rx_refill()
4828 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
4830 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
4832 if (!buf->page) { in stmmac_rx_refill()
4833 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4834 if (!buf->page) in stmmac_rx_refill()
4838 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4839 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4840 if (!buf->sec_page) in stmmac_rx_refill()
4843 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
4846 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_rx_refill()
4848 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4849 if (priv->sph) in stmmac_rx_refill()
4850 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4852 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4855 rx_q->rx_count_frames++; in stmmac_rx_refill()
4856 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4857 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4858 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
4860 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4861 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
4862 if (!priv->use_riwt) in stmmac_rx_refill()
4868 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4870 rx_q->dirty_rx = entry; in stmmac_rx_refill()
4871 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
4872 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
4873 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4881 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
4884 if (priv->sph && len) in stmmac_rx_buf1_len()
4889 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4890 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4896 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4901 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4908 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
4912 if (!priv->sph) in stmmac_rx_buf2_len()
4917 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4922 return plen - len; in stmmac_rx_buf2_len()
4928 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_xdpf()
4929 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4930 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_xdpf()
4938 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_xdpf()
4939 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_xdpf()
4940 xdpf->len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_xdpf()
4941 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_xdpf()
4945 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4946 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_xdpf()
4947 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_xdpf()
4948 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_xdpf()
4950 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_xdpf()
4953 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4954 xdpf->len, DMA_TO_DEVICE); in stmmac_xdp_xmit_xdpf()
4955 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
4958 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; in stmmac_xdp_xmit_xdpf()
4960 struct page *page = virt_to_page(xdpf->data); in stmmac_xdp_xmit_xdpf()
4963 xdpf->headroom; in stmmac_xdp_xmit_xdpf()
4964 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
4965 xdpf->len, DMA_BIDIRECTIONAL); in stmmac_xdp_xmit_xdpf()
4967 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; in stmmac_xdp_xmit_xdpf()
4970 tx_q->tx_skbuff_dma[entry].buf = dma_addr; in stmmac_xdp_xmit_xdpf()
4971 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_xdpf()
4972 tx_q->tx_skbuff_dma[entry].len = xdpf->len; in stmmac_xdp_xmit_xdpf()
4973 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_xdpf()
4974 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_xdpf()
4976 tx_q->xdpf[entry] = xdpf; in stmmac_xdp_xmit_xdpf()
4980 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
4981 true, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
4982 xdpf->len); in stmmac_xdp_xmit_xdpf()
4984 tx_q->tx_count_frames++; in stmmac_xdp_xmit_xdpf()
4986 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
4992 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_xdpf()
4994 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4995 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xdp_xmit_xdpf()
4996 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4999 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_xdpf()
5001 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
5002 tx_q->cur_tx = entry; in stmmac_xdp_xmit_xdpf()
5015 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
5016 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
5034 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
5037 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_back()
5065 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
5071 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5074 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5090 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
5098 return ERR_PTR(-res); in stmmac_xdp_run_prog()
5119 unsigned int metasize = xdp->data - xdp->data_meta; in stmmac_construct_skb_zc()
5120 unsigned int datasize = xdp->data_end - xdp->data; in stmmac_construct_skb_zc()
5123 skb = napi_alloc_skb(&ch->rxtx_napi, in stmmac_construct_skb_zc()
5124 xdp->data_end - xdp->data_hard_start); in stmmac_construct_skb_zc()
5128 skb_reserve(skb, xdp->data - xdp->data_hard_start); in stmmac_construct_skb_zc()
5129 memcpy(__skb_put(skb, datasize), xdp->data, datasize); in stmmac_construct_skb_zc()
5140 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_dispatch_skb_zc()
5141 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
5142 unsigned int len = xdp->data_end - xdp->data; in stmmac_dispatch_skb_zc()
5144 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc()
5150 priv->xstats.rx_dropped++; in stmmac_dispatch_skb_zc()
5155 if (priv->hw->hw_vlan_en) in stmmac_dispatch_skb_zc()
5157 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_dispatch_skb_zc()
5160 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
5161 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
5166 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_dispatch_skb_zc()
5172 napi_gro_receive(&ch->rxtx_napi, skb); in stmmac_dispatch_skb_zc()
5174 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5175 u64_stats_inc(&rxq_stats->napi.rx_pkt_n); in stmmac_dispatch_skb_zc()
5176 u64_stats_add(&rxq_stats->napi.rx_bytes, len); in stmmac_dispatch_skb_zc()
5177 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5182 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
5183 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill_zc()
5189 while (budget-- > 0 && entry != rx_q->cur_rx) { in stmmac_rx_refill_zc()
5190 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill_zc()
5194 if (!buf->xdp) { in stmmac_rx_refill_zc()
5195 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_rx_refill_zc()
5196 if (!buf->xdp) { in stmmac_rx_refill_zc()
5202 if (priv->extend_desc) in stmmac_rx_refill_zc()
5203 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill_zc()
5205 rx_desc = rx_q->dma_rx + entry; in stmmac_rx_refill_zc()
5207 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_rx_refill_zc()
5212 rx_q->rx_count_frames++; in stmmac_rx_refill_zc()
5213 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5214 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
5215 rx_q->rx_count_frames = 0; in stmmac_rx_refill_zc()
5217 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5218 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill_zc()
5219 if (!priv->use_riwt) in stmmac_rx_refill_zc()
5225 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
5229 rx_q->dirty_rx = entry; in stmmac_rx_refill_zc()
5230 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill_zc()
5231 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill_zc()
5232 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
5250 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx_zc()
5251 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
5254 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5265 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
5266 if (priv->extend_desc) { in stmmac_rx_zc()
5267 rx_head = (void *)rx_q->dma_erx; in stmmac_rx_zc()
5270 rx_head = (void *)rx_q->dma_rx; in stmmac_rx_zc()
5274 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5275 rx_q->dma_rx_phy, desc_size); in stmmac_rx_zc()
5285 if (!count && rx_q->state_saved) { in stmmac_rx_zc()
5286 error = rx_q->state.error; in stmmac_rx_zc()
5287 len = rx_q->state.len; in stmmac_rx_zc()
5289 rx_q->state_saved = false; in stmmac_rx_zc()
5300 buf = &rx_q->buf_pool[entry]; in stmmac_rx_zc()
5308 if (priv->extend_desc) in stmmac_rx_zc()
5309 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_zc()
5311 p = rx_q->dma_rx + entry; in stmmac_rx_zc()
5314 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx_zc()
5320 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx_zc()
5321 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5322 next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5324 if (priv->extend_desc) in stmmac_rx_zc()
5325 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx_zc()
5327 np = rx_q->dma_rx + next_entry; in stmmac_rx_zc()
5332 if (!buf->xdp) in stmmac_rx_zc()
5335 if (priv->extend_desc) in stmmac_rx_zc()
5336 stmmac_rx_extended_status(priv, &priv->xstats, in stmmac_rx_zc()
5337 rx_q->dma_erx + entry); in stmmac_rx_zc()
5339 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5340 buf->xdp = NULL; in stmmac_rx_zc()
5343 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5356 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5357 buf->xdp = NULL; in stmmac_rx_zc()
5363 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); in stmmac_rx_zc()
5364 ctx->priv = priv; in stmmac_rx_zc()
5365 ctx->desc = p; in stmmac_rx_zc()
5366 ctx->ndesc = np; in stmmac_rx_zc()
5374 buf1_len -= ETH_FCS_LEN; in stmmac_rx_zc()
5375 len -= ETH_FCS_LEN; in stmmac_rx_zc()
5379 buf->xdp->data_end = buf->xdp->data + buf1_len; in stmmac_rx_zc()
5380 xsk_buff_dma_sync_for_cpu(buf->xdp); in stmmac_rx_zc()
5382 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5383 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5387 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5388 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5391 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5400 buf->xdp = NULL; in stmmac_rx_zc()
5406 rx_q->state_saved = true; in stmmac_rx_zc()
5407 rx_q->state.error = error; in stmmac_rx_zc()
5408 rx_q->state.len = len; in stmmac_rx_zc()
5413 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5414 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx_zc()
5415 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5417 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx_zc()
5418 priv->xstats.rx_errors += rx_errors; in stmmac_rx_zc()
5420 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { in stmmac_rx_zc()
5422 xsk_set_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5424 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5433 * stmmac_rx - manage the receive process
5443 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx()
5444 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5445 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5447 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
5448 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
5456 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); in stmmac_rx()
5457 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5458 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); in stmmac_rx()
5463 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5464 if (priv->extend_desc) { in stmmac_rx()
5465 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
5468 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
5472 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5473 rx_q->dma_rx_phy, desc_size); in stmmac_rx()
5483 if (!count && rx_q->state_saved) { in stmmac_rx()
5484 skb = rx_q->state.skb; in stmmac_rx()
5485 error = rx_q->state.error; in stmmac_rx()
5486 len = rx_q->state.len; in stmmac_rx()
5488 rx_q->state_saved = false; in stmmac_rx()
5501 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
5503 if (priv->extend_desc) in stmmac_rx()
5504 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
5506 p = rx_q->dma_rx + entry; in stmmac_rx()
5509 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx()
5514 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
5515 priv->dma_conf.dma_rx_size); in stmmac_rx()
5516 next_entry = rx_q->cur_rx; in stmmac_rx()
5518 if (priv->extend_desc) in stmmac_rx()
5519 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
5521 np = rx_q->dma_rx + next_entry; in stmmac_rx()
5525 if (priv->extend_desc) in stmmac_rx()
5526 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5528 page_pool_put_page(rx_q->page_pool, buf->page, 0, true); in stmmac_rx()
5529 buf->page = NULL; in stmmac_rx()
5531 if (!priv->hwts_rx_en) in stmmac_rx()
5554 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
5555 len -= ETH_FCS_LEN; in stmmac_rx()
5557 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
5558 len -= ETH_FCS_LEN; in stmmac_rx()
5565 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5567 net_prefetch(page_address(buf->page) + in stmmac_rx()
5568 buf->page_offset); in stmmac_rx()
5570 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); in stmmac_rx()
5571 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), in stmmac_rx()
5572 buf->page_offset, buf1_len, true); in stmmac_rx()
5574 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5575 buf->page_offset; in stmmac_rx()
5585 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5586 buf->page_offset; in stmmac_rx()
5591 unsigned int xdp_res = -PTR_ERR(skb); in stmmac_rx()
5594 page_pool_put_page(rx_q->page_pool, in stmmac_rx()
5597 buf->page = NULL; in stmmac_rx()
5613 buf->page = NULL; in stmmac_rx()
5625 buf1_len = ctx.xdp.data_end - ctx.xdp.data; in stmmac_rx()
5627 skb = napi_build_skb(page_address(buf->page), in stmmac_rx()
5628 rx_q->napi_skb_frag_size); in stmmac_rx()
5630 page_pool_recycle_direct(rx_q->page_pool, in stmmac_rx()
5631 buf->page); in stmmac_rx()
5638 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start; in stmmac_rx()
5642 buf->page = NULL; in stmmac_rx()
5644 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5646 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5647 buf->page, buf->page_offset, buf1_len, in stmmac_rx()
5648 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5649 buf->page = NULL; in stmmac_rx()
5653 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5655 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5656 buf->sec_page, 0, buf2_len, in stmmac_rx()
5657 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5658 buf->sec_page = NULL; in stmmac_rx()
5671 if (priv->hw->hw_vlan_en) in stmmac_rx()
5673 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_rx()
5676 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5678 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5683 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
5689 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
5698 rx_q->state_saved = true; in stmmac_rx()
5699 rx_q->state.skb = skb; in stmmac_rx()
5700 rx_q->state.error = error; in stmmac_rx()
5701 rx_q->state.len = len; in stmmac_rx()
5708 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx()
5709 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); in stmmac_rx()
5710 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); in stmmac_rx()
5711 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx()
5712 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx()
5714 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx()
5715 priv->xstats.rx_errors += rx_errors; in stmmac_rx()
5724 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
5726 u32 chan = ch->index; in stmmac_napi_poll_rx()
5729 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rx()
5730 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5731 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rx()
5732 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5738 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
5739 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5740 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
5750 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
5753 u32 chan = ch->index; in stmmac_napi_poll_tx()
5756 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_tx()
5757 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5758 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_tx()
5759 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5767 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
5768 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5769 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
5783 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx()
5788 u32 chan = ch->index; in stmmac_napi_poll_rxtx()
5790 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rxtx()
5791 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5792 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5793 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5795 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_rxtx()
5796 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5797 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5798 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5817 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5821 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5822 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5829 return min(rxtx_done, budget - 1); in stmmac_napi_poll_rxtx()
5849 * stmmac_set_rx_mode - entry point for multicast addressing
5861 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5865 * stmmac_change_mtu - entry point to change MTU size for the device.
5872 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5878 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5884 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5886 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5889 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5890 return -EINVAL; in stmmac_change_mtu()
5897 return -EINVAL; in stmmac_change_mtu()
5900 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5904 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5915 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5924 WRITE_ONCE(dev->mtu, mtu); in stmmac_change_mtu()
5935 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5938 if (!priv->plat->tx_coe) in stmmac_fix_features()
5946 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5950 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5952 priv->tso = true; in stmmac_fix_features()
5954 priv->tso = false; in stmmac_fix_features()
5967 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
5969 priv->hw->rx_csum = 0; in stmmac_set_features()
5973 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
5975 if (priv->sph_cap) { in stmmac_set_features()
5976 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
5979 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
5980 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
5984 priv->hw->hw_vlan_en = true; in stmmac_set_features()
5986 priv->hw->hw_vlan_en = false; in stmmac_set_features()
5988 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_set_features()
5995 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
5996 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
6001 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
6004 if (priv->irq_wake) in stmmac_common_interrupt()
6005 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
6007 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
6008 stmmac_est_irq_status(priv, priv, priv->dev, in stmmac_common_interrupt()
6009 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
6015 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
6016 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
6021 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
6023 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
6027 stmmac_host_mtl_irq_status(priv, priv->hw, queue); in stmmac_common_interrupt()
6030 if (priv->hw->pcs && in stmmac_common_interrupt()
6031 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { in stmmac_common_interrupt()
6032 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
6033 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
6035 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
6043 * stmmac_interrupt - main ISR
6050 * o Core interrupts to manage: remote wake-up, management counter, LPI
6059 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
6063 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) in stmmac_interrupt()
6081 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
6096 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
6109 int chan = tx_q->queue_index; in stmmac_msi_intr_tx()
6117 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
6136 int chan = rx_q->queue_index; in stmmac_msi_intr_rx()
6143 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
6152 * stmmac_ioctl - Entry point for the Ioctl
6163 int ret = -EOPNOTSUPP; in stmmac_ioctl()
6166 return -EINVAL; in stmmac_ioctl()
6172 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
6191 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
6193 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
6237 return -EOPNOTSUPP; in stmmac_setup_tc()
6244 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
6256 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
6264 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6272 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6275 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6297 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
6298 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
6300 p = &(++ep)->basic; in sysfs_display_ring()
6308 struct net_device *dev = seq->private; in stmmac_rings_status_show()
6310 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6311 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6314 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
6318 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6322 if (priv->extend_desc) { in stmmac_rings_status_show()
6324 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
6325 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6328 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
6329 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6334 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6338 if (priv->extend_desc) { in stmmac_rings_status_show()
6340 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
6341 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6342 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
6344 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
6345 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6371 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
6374 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6384 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6386 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6388 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6389 if (priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6392 priv->dma_cap.multi_addr); in stmmac_dma_cap_show()
6395 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6397 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6400 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6401 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", in stmmac_dma_cap_show()
6402 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6404 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6406 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6408 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6409 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
6410 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6411 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
6412 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6413 if (priv->plat->has_xgmac) in stmmac_dma_cap_show()
6415 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); in stmmac_dma_cap_show()
6416 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
6417 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6418 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6420 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6421 if (priv->synopsys_id >= DWMAC_CORE_4_00 || in stmmac_dma_cap_show()
6422 priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6424 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6427 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6429 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6431 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6434 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6436 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6438 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6440 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6442 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6443 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6444 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6445 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? in stmmac_dma_cap_show()
6446 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); in stmmac_dma_cap_show()
6447 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6449 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6451 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); in stmmac_dma_cap_show()
6453 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6455 priv->dma_cap.host_dma_width); in stmmac_dma_cap_show()
6457 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6459 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6461 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6463 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6465 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6467 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6469 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6471 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6473 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6474 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
6475 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6477 priv->dma_cap.tbs_ch_num); in stmmac_dma_cap_show()
6478 seq_printf(seq, "\tPer-Stream Filtering: %s\n", in stmmac_dma_cap_show()
6479 priv->dma_cap.sgfsel ? "Y" : "N"); in stmmac_dma_cap_show()
6481 BIT(priv->dma_cap.ttsfd) >> 1); in stmmac_dma_cap_show()
6483 priv->dma_cap.numtc); in stmmac_dma_cap_show()
6485 priv->dma_cap.dcben ? "Y" : "N"); in stmmac_dma_cap_show()
6487 priv->dma_cap.advthword ? "Y" : "N"); in stmmac_dma_cap_show()
6489 priv->dma_cap.ptoen ? "Y" : "N"); in stmmac_dma_cap_show()
6490 seq_printf(seq, "\tOne-Step Timestamping: %s\n", in stmmac_dma_cap_show()
6491 priv->dma_cap.osten ? "Y" : "N"); in stmmac_dma_cap_show()
6492 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", in stmmac_dma_cap_show()
6493 priv->dma_cap.pfcen ? "Y" : "N"); in stmmac_dma_cap_show()
6495 BIT(priv->dma_cap.frpes) << 6); in stmmac_dma_cap_show()
6497 BIT(priv->dma_cap.frpbs) << 6); in stmmac_dma_cap_show()
6499 priv->dma_cap.frppipe_num); in stmmac_dma_cap_show()
6501 priv->dma_cap.nrvf_num ? in stmmac_dma_cap_show()
6502 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); in stmmac_dma_cap_show()
6504 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); in stmmac_dma_cap_show()
6506 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); in stmmac_dma_cap_show()
6507 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", in stmmac_dma_cap_show()
6508 priv->dma_cap.cbtisel ? "Y" : "N"); in stmmac_dma_cap_show()
6510 priv->dma_cap.aux_snapshot_n); in stmmac_dma_cap_show()
6511 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", in stmmac_dma_cap_show()
6512 priv->dma_cap.pou_ost_en ? "Y" : "N"); in stmmac_dma_cap_show()
6514 priv->dma_cap.edma ? "Y" : "N"); in stmmac_dma_cap_show()
6516 priv->dma_cap.ediffc ? "Y" : "N"); in stmmac_dma_cap_show()
6518 priv->dma_cap.vxn ? "Y" : "N"); in stmmac_dma_cap_show()
6520 priv->dma_cap.dbgmem ? "Y" : "N"); in stmmac_dma_cap_show()
6522 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); in stmmac_dma_cap_show()
6535 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
6540 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); in stmmac_device_event()
6558 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6561 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6565 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6575 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6610 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6617 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6619 return -EOPNOTSUPP; in stmmac_vlan_update()
6625 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6634 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_add_vid()
6641 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6644 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6648 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6649 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6654 pm_runtime_put(priv->device); in stmmac_vlan_rx_add_vid()
6665 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6672 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6674 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6675 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6683 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6692 switch (bpf->command) { in stmmac_bpf()
6694 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6696 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6697 bpf->xsk.queue_id); in stmmac_bpf()
6699 return -EOPNOTSUPP; in stmmac_bpf()
6712 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6713 return -ENETDOWN; in stmmac_xdp_xmit()
6716 return -EINVAL; in stmmac_xdp_xmit()
6719 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6722 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit()
6747 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6750 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_rx_queue()
6751 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6752 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_rx_queue()
6755 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6760 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6761 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6766 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6768 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6772 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6774 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6775 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6780 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6782 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6783 rx_q->dma_rx_phy, rx_q->queue_index); in stmmac_enable_rx_queue()
6785 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * in stmmac_enable_rx_queue()
6787 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6788 rx_q->rx_tail_addr, rx_q->queue_index); in stmmac_enable_rx_queue()
6790 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_enable_rx_queue()
6791 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_enable_rx_queue()
6792 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6794 rx_q->queue_index); in stmmac_enable_rx_queue()
6796 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6797 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6798 rx_q->queue_index); in stmmac_enable_rx_queue()
6803 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_rx_queue()
6804 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6805 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_rx_queue()
6810 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6813 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_tx_queue()
6814 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6815 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_tx_queue()
6818 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6823 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6824 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6828 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6830 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6834 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6836 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6837 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6842 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6844 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6845 tx_q->dma_tx_phy, tx_q->queue_index); in stmmac_enable_tx_queue()
6847 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_enable_tx_queue()
6848 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6850 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_enable_tx_queue()
6851 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6852 tx_q->tx_tail_addr, tx_q->queue_index); in stmmac_enable_tx_queue()
6856 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_tx_queue()
6857 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6858 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_tx_queue()
6872 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6873 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6882 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6885 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6897 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6898 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6907 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6914 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6925 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6926 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6930 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6934 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6936 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6937 rx_q->dma_rx_phy, chan); in stmmac_xdp_open()
6939 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_xdp_open()
6940 (rx_q->buf_alloc_num * in stmmac_xdp_open()
6942 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6943 rx_q->rx_tail_addr, chan); in stmmac_xdp_open()
6945 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_xdp_open()
6946 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_xdp_open()
6947 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6949 rx_q->queue_index); in stmmac_xdp_open()
6951 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6952 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
6953 rx_q->queue_index); in stmmac_xdp_open()
6956 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
6961 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
6963 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6964 tx_q->dma_tx_phy, chan); in stmmac_xdp_open()
6966 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_xdp_open()
6967 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6968 tx_q->tx_tail_addr, chan); in stmmac_xdp_open()
6970 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_xdp_open()
6971 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_xdp_open()
6975 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
6993 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
6994 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
6998 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
7010 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
7011 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
7012 return -ENETDOWN; in stmmac_xsk_wakeup()
7015 return -EINVAL; in stmmac_xsk_wakeup()
7017 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
7018 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
7019 return -EINVAL; in stmmac_xsk_wakeup()
7021 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
7022 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
7023 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
7025 if (!rx_q->xsk_pool && !tx_q->xsk_pool) in stmmac_xsk_wakeup()
7026 return -EINVAL; in stmmac_xsk_wakeup()
7028 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { in stmmac_xsk_wakeup()
7029 /* EQoS does not have per-DMA channel SW interrupt, in stmmac_xsk_wakeup()
7030 * so we schedule RX Napi straight-away. in stmmac_xsk_wakeup()
7032 if (likely(napi_schedule_prep(&ch->rxtx_napi))) in stmmac_xsk_wakeup()
7033 __napi_schedule(&ch->rxtx_napi); in stmmac_xsk_wakeup()
7042 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_get_stats64()
7043 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_get_stats64()
7048 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; in stmmac_get_stats64()
7053 start = u64_stats_fetch_begin(&txq_stats->q_syncp); in stmmac_get_stats64()
7054 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); in stmmac_get_stats64()
7055 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); in stmmac_get_stats64()
7057 start = u64_stats_fetch_begin(&txq_stats->napi_syncp); in stmmac_get_stats64()
7058 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); in stmmac_get_stats64()
7059 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); in stmmac_get_stats64()
7061 stats->tx_packets += tx_packets; in stmmac_get_stats64()
7062 stats->tx_bytes += tx_bytes; in stmmac_get_stats64()
7066 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; in stmmac_get_stats64()
7071 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); in stmmac_get_stats64()
7072 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); in stmmac_get_stats64()
7073 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); in stmmac_get_stats64()
7074 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); in stmmac_get_stats64()
7076 stats->rx_packets += rx_packets; in stmmac_get_stats64()
7077 stats->rx_bytes += rx_bytes; in stmmac_get_stats64()
7080 stats->rx_dropped = priv->xstats.rx_dropped; in stmmac_get_stats64()
7081 stats->rx_errors = priv->xstats.rx_errors; in stmmac_get_stats64()
7082 stats->tx_dropped = priv->xstats.tx_dropped; in stmmac_get_stats64()
7083 stats->tx_errors = priv->xstats.tx_errors; in stmmac_get_stats64()
7084 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; in stmmac_get_stats64()
7085 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; in stmmac_get_stats64()
7086 stats->rx_length_errors = priv->xstats.rx_length; in stmmac_get_stats64()
7087 stats->rx_crc_errors = priv->xstats.rx_crc_errors; in stmmac_get_stats64()
7088 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; in stmmac_get_stats64()
7089 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; in stmmac_get_stats64()
7115 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
7117 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
7120 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
7123 netif_trans_update(priv->dev); in stmmac_reset_subtask()
7124 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
7127 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7128 dev_close(priv->dev); in stmmac_reset_subtask()
7129 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
7130 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7131 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
7141 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
7145 * stmmac_hw_init - Init the MAC device
7156 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
7157 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) in stmmac_hw_init()
7159 priv->chain_mode = chain_mode; in stmmac_hw_init()
7167 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
7168 if (priv->hw_cap_support) { in stmmac_hw_init()
7169 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
7176 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
7177 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
7178 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); in stmmac_hw_init()
7179 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
7180 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
7181 priv->hw->multicast_filter_bins = in stmmac_hw_init()
7182 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
7183 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
7184 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
7188 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
7189 priv->plat->tx_coe = 0; in stmmac_hw_init()
7191 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
7194 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
7196 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
7197 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
7198 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
7199 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
7202 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
7205 if (priv->plat->rx_coe) { in stmmac_hw_init()
7206 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
7207 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
7208 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
7209 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
7211 if (priv->plat->tx_coe) in stmmac_hw_init()
7212 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
7214 if (priv->plat->pmt) { in stmmac_hw_init()
7215 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
7216 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
7219 if (priv->dma_cap.tsoen) in stmmac_hw_init()
7220 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
7222 if (priv->dma_cap.number_rx_queues && in stmmac_hw_init()
7223 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) { in stmmac_hw_init()
7224 dev_warn(priv->device, in stmmac_hw_init()
7226 priv->plat->rx_queues_to_use); in stmmac_hw_init()
7227 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues; in stmmac_hw_init()
7229 if (priv->dma_cap.number_tx_queues && in stmmac_hw_init()
7230 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) { in stmmac_hw_init()
7231 dev_warn(priv->device, in stmmac_hw_init()
7233 priv->plat->tx_queues_to_use); in stmmac_hw_init()
7234 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues; in stmmac_hw_init()
7237 if (priv->dma_cap.rx_fifo_size && in stmmac_hw_init()
7238 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) { in stmmac_hw_init()
7239 dev_warn(priv->device, in stmmac_hw_init()
7241 priv->plat->rx_fifo_size); in stmmac_hw_init()
7242 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size; in stmmac_hw_init()
7244 if (priv->dma_cap.tx_fifo_size && in stmmac_hw_init()
7245 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) { in stmmac_hw_init()
7246 dev_warn(priv->device, in stmmac_hw_init()
7248 priv->plat->tx_fifo_size); in stmmac_hw_init()
7249 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size; in stmmac_hw_init()
7252 priv->hw->vlan_fail_q_en = in stmmac_hw_init()
7253 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); in stmmac_hw_init()
7254 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
7257 if (priv->hwif_quirks) { in stmmac_hw_init()
7258 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
7268 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
7269 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
7270 priv->use_riwt = 1; in stmmac_hw_init()
7271 dev_info(priv->device, in stmmac_hw_init()
7283 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
7286 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
7288 ch->priv_data = priv; in stmmac_napi_add()
7289 ch->index = queue; in stmmac_napi_add()
7290 spin_lock_init(&ch->lock); in stmmac_napi_add()
7292 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
7293 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); in stmmac_napi_add()
7295 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7296 netif_napi_add_tx(dev, &ch->tx_napi, in stmmac_napi_add()
7299 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
7300 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7301 netif_napi_add(dev, &ch->rxtx_napi, in stmmac_napi_add()
7312 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
7315 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
7317 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
7318 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
7319 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
7320 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
7321 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
7322 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
7323 netif_napi_del(&ch->rxtx_napi); in stmmac_napi_del()
7338 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
7339 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
7341 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_reinit_queues()
7342 priv->rss.table[i] = ethtool_rxfh_indir_default(i, in stmmac_reinit_queues()
7361 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
7362 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
7373 struct dma_desc *desc_contains_ts = ctx->desc; in stmmac_xdp_rx_timestamp()
7374 struct stmmac_priv *priv = ctx->priv; in stmmac_xdp_rx_timestamp()
7375 struct dma_desc *ndesc = ctx->ndesc; in stmmac_xdp_rx_timestamp()
7376 struct dma_desc *desc = ctx->desc; in stmmac_xdp_rx_timestamp()
7379 if (!priv->hwts_rx_en) in stmmac_xdp_rx_timestamp()
7380 return -ENODATA; in stmmac_xdp_rx_timestamp()
7383 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_xdp_rx_timestamp()
7387 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { in stmmac_xdp_rx_timestamp()
7388 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); in stmmac_xdp_rx_timestamp()
7389 ns -= priv->plat->cdc_error_adj; in stmmac_xdp_rx_timestamp()
7394 return -ENODATA; in stmmac_xdp_rx_timestamp()
7423 return -ENOMEM; in stmmac_dvr_probe()
7428 priv->device = device; in stmmac_dvr_probe()
7429 priv->dev = ndev; in stmmac_dvr_probe()
7432 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); in stmmac_dvr_probe()
7434 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); in stmmac_dvr_probe()
7435 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); in stmmac_dvr_probe()
7438 priv->xstats.pcpu_stats = in stmmac_dvr_probe()
7440 if (!priv->xstats.pcpu_stats) in stmmac_dvr_probe()
7441 return -ENOMEM; in stmmac_dvr_probe()
7444 priv->pause = pause; in stmmac_dvr_probe()
7445 priv->plat = plat_dat; in stmmac_dvr_probe()
7446 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7447 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7448 priv->plat->dma_cfg->multi_msi_en = in stmmac_dvr_probe()
7449 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); in stmmac_dvr_probe()
7451 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7452 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7453 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7454 priv->sfty_irq = res->sfty_irq; in stmmac_dvr_probe()
7455 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7456 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7458 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7460 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7462 if (!is_zero_ether_addr(res->mac)) in stmmac_dvr_probe()
7463 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7465 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7470 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7471 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7472 return -ENOMEM; in stmmac_dvr_probe()
7475 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7476 if (!priv->wq) { in stmmac_dvr_probe()
7477 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7478 ret = -ENOMEM; in stmmac_dvr_probe()
7482 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7484 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_dvr_probe()
7490 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7492 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7493 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7494 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7498 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7499 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7502 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7503 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7504 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7517 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7518 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7522 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
7524 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; in stmmac_dvr_probe()
7525 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; in stmmac_dvr_probe()
7527 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
7529 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in stmmac_dvr_probe()
7534 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
7537 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7538 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
7539 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7540 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
7541 priv->tso = true; in stmmac_dvr_probe()
7542 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7545 if (priv->dma_cap.sphen && in stmmac_dvr_probe()
7546 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { in stmmac_dvr_probe()
7547 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
7548 priv->sph_cap = true; in stmmac_dvr_probe()
7549 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7550 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7558 if (priv->plat->host_dma_width) in stmmac_dvr_probe()
7559 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; in stmmac_dvr_probe()
7561 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; in stmmac_dvr_probe()
7563 if (priv->dma_cap.host_dma_width) { in stmmac_dvr_probe()
7565 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); in stmmac_dvr_probe()
7567 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", in stmmac_dvr_probe()
7568 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); in stmmac_dvr_probe()
7575 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7579 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7583 priv->dma_cap.host_dma_width = 32; in stmmac_dvr_probe()
7587 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
7588 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
7591 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
7592 if (priv->plat->has_gmac4) { in stmmac_dvr_probe()
7593 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in stmmac_dvr_probe()
7594 priv->hw->hw_vlan_en = true; in stmmac_dvr_probe()
7596 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7597 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
7598 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
7600 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7601 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
7602 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7603 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
7606 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7608 priv->xstats.threshold = tc; in stmmac_dvr_probe()
7611 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7612 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7613 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7614 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7616 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7617 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
7619 ndev->vlan_features |= ndev->features; in stmmac_dvr_probe()
7621 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
7622 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
7623 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7624 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
7625 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7626 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
7628 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
7629 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
7630 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
7632 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7633 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7634 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7635 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7636 dev_warn(priv->device, in stmmac_dvr_probe()
7638 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7641 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ in stmmac_dvr_probe()
7643 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in stmmac_dvr_probe()
7648 mutex_init(&priv->lock); in stmmac_dvr_probe()
7654 * changed at run-time and it is fixed. Viceversa the driver'll try to in stmmac_dvr_probe()
7658 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
7659 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
7672 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7673 "MDIO bus (id: %d) registration failed\n", in stmmac_dvr_probe()
7674 priv->plat->bus_id); in stmmac_dvr_probe()
7678 if (priv->plat->speed_mode_2500) in stmmac_dvr_probe()
7679 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); in stmmac_dvr_probe()
7693 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7702 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7703 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7713 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7721 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7723 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7740 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7745 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_dvr_remove()
7751 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7752 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7753 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7754 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7759 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7760 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7761 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7769 * stmmac_suspend - suspend callback
7784 mutex_lock(&priv->lock); in stmmac_suspend()
7790 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7791 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7793 if (priv->eee_sw_timer_en) { in stmmac_suspend()
7794 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7795 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7801 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7802 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7805 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7806 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7807 priv->irq_wake = 1; in stmmac_suspend()
7809 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7810 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7813 mutex_unlock(&priv->lock); in stmmac_suspend()
7816 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7817 phylink_suspend(priv->phylink, true); in stmmac_suspend()
7819 if (device_may_wakeup(priv->device)) in stmmac_suspend()
7820 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7821 phylink_suspend(priv->phylink, false); in stmmac_suspend()
7826 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_suspend()
7828 priv->speed = SPEED_UNKNOWN; in stmmac_suspend()
7835 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7837 rx_q->cur_rx = 0; in stmmac_reset_rx_queue()
7838 rx_q->dirty_rx = 0; in stmmac_reset_rx_queue()
7843 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7845 tx_q->cur_tx = 0; in stmmac_reset_tx_queue()
7846 tx_q->dirty_tx = 0; in stmmac_reset_tx_queue()
7847 tx_q->mss = 0; in stmmac_reset_tx_queue()
7849 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7853 * stmmac_reset_queues_param - reset queue parameters
7858 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7859 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7870 * stmmac_resume - resume callback
7885 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
7890 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7891 mutex_lock(&priv->lock); in stmmac_resume()
7892 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7893 mutex_unlock(&priv->lock); in stmmac_resume()
7894 priv->irq_wake = 0; in stmmac_resume()
7896 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7898 if (priv->mii) in stmmac_resume()
7899 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7902 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_resume()
7903 priv->plat->serdes_powerup) { in stmmac_resume()
7904 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7905 priv->plat->bsp_priv); in stmmac_resume()
7912 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7913 phylink_resume(priv->phylink); in stmmac_resume()
7915 phylink_resume(priv->phylink); in stmmac_resume()
7916 if (device_may_wakeup(priv->device)) in stmmac_resume()
7917 phylink_speed_up(priv->phylink); in stmmac_resume()
7922 mutex_lock(&priv->lock); in stmmac_resume()
7927 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
7933 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
7938 mutex_unlock(&priv->lock); in stmmac_resume()