Lines Matching full:eth

292 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)  in mtk_w32()  argument
294 __raw_writel(val, eth->base + reg); in mtk_w32()
297 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
299 return __raw_readl(eth->base + reg); in mtk_r32()
302 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) in mtk_m32() argument
306 val = mtk_r32(eth, reg); in mtk_m32()
309 mtk_w32(eth, val, reg); in mtk_m32()
313 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
318 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
325 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
329 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
334 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
338 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
346 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
353 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
358 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
362 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
370 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
374 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
382 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
389 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
393 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
397 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
404 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
408 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
411 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
416 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
420 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
428 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
432 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
439 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
443 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
449 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
451 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
457 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
459 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
464 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
466 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
472 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
474 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
477 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
485 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
491 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
497 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
498 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
500 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
504 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
507 static void mtk_setup_bridge_switch(struct mtk_eth *eth) in mtk_setup_bridge_switch() argument
510 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), in mtk_setup_bridge_switch()
514 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, in mtk_setup_bridge_switch()
525 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
530 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
533 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
544 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
549 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
559 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
560 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
568 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
574 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
622 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
625 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
636 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
638 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
645 dev_err(eth->dev, in mtk_mac_config()
651 if (mtk_is_netsys_v3_or_greater(eth) && in mtk_mac_config()
656 mtk_setup_bridge_switch(eth); in mtk_mac_config()
662 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
667 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
676 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
682 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
709 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
712 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
723 if (mtk_is_netsys_v1(eth)) in mtk_set_queue_speed()
775 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
826 static void mtk_mdio_config(struct mtk_eth *eth) in mtk_mdio_config() argument
831 val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider); in mtk_mdio_config()
834 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_config()
835 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); in mtk_mdio_config()
839 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); in mtk_mdio_config()
842 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
849 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
851 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
860 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
861 if (!eth->mii_bus) { in mtk_mdio_init()
866 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
867 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
868 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
869 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
870 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
871 eth->mii_bus->priv = eth; in mtk_mdio_init()
872 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
874 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
878 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
884 eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); in mtk_mdio_init()
885 mtk_mdio_config(eth); in mtk_mdio_init()
886 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider); in mtk_mdio_init()
887 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
894 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
896 if (!eth->mii_bus) in mtk_mdio_cleanup()
899 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
902 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
907 spin_lock_irqsave(&eth->tx_irq_lock, flags); in mtk_tx_irq_disable()
908 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
909 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
910 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); in mtk_tx_irq_disable()
913 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
918 spin_lock_irqsave(&eth->tx_irq_lock, flags); in mtk_tx_irq_enable()
919 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
920 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
921 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); in mtk_tx_irq_enable()
924 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
929 spin_lock_irqsave(&eth->rx_irq_lock, flags); in mtk_rx_irq_disable()
930 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
931 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
932 spin_unlock_irqrestore(&eth->rx_irq_lock, flags); in mtk_rx_irq_disable()
935 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
940 spin_lock_irqsave(&eth->rx_irq_lock, flags); in mtk_rx_irq_enable()
941 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
942 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
943 spin_unlock_irqrestore(&eth->rx_irq_lock, flags); in mtk_rx_irq_enable()
950 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
960 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
981 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
985 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
993 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
1016 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_stats_update_mac()
1046 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
1051 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1053 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1054 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1055 spin_unlock(&eth->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1114 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1124 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_rx_get_desc()
1144 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1146 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1152 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) in mtk_init_fq_dma()
1153 eth->scratch_ring = eth->sram_base; in mtk_init_fq_dma()
1155 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1157 &eth->phy_scratch_ring, in mtk_init_fq_dma()
1160 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1163 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); in mtk_init_fq_dma()
1167 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1169 if (unlikely(!eth->scratch_head[j])) in mtk_init_fq_dma()
1172 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1173 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1176 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1182 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; in mtk_init_fq_dma()
1185 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1193 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_init_fq_dma()
1202 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1203 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1204 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1205 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1234 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1237 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1239 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1244 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1251 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1258 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1288 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1292 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1315 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1346 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1355 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1383 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) in mtk_tx_set_dma_desc_v2()
1401 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1403 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_set_dma_desc()
1424 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1425 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1442 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1444 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1451 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1484 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1487 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1500 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1532 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1538 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1548 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1561 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1570 eth->soc->tx.dma_max_len); in mtk_cal_txd_req()
1579 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1584 if (!eth->netdev[i]) in mtk_queue_stopped()
1586 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1593 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1598 if (!eth->netdev[i]) in mtk_wake_queue()
1600 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1607 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1608 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_start_xmit()
1617 spin_lock(&eth->page_lock); in mtk_start_xmit()
1619 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_start_xmit()
1622 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1625 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1627 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1634 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1652 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1657 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1663 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1669 if (!eth->hwlro) in mtk_get_rx_ring()
1670 return &eth->rx_ring[0]; in mtk_get_rx_ring()
1675 ring = &eth->rx_ring[i]; in mtk_get_rx_ring()
1677 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_get_rx_ring()
1687 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1692 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1693 ring = &eth->rx_ring[0]; in mtk_update_rx_cpu_idx()
1694 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1697 ring = &eth->rx_ring[i]; in mtk_update_rx_cpu_idx()
1700 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1706 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1708 return mtk_is_netsys_v2_or_greater(eth); in mtk_page_pool_enabled()
1711 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1720 .dev = eth->dma_dev, in mtk_create_page_pool()
1727 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1733 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id, in mtk_create_page_pool()
1734 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1774 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1779 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_xdp_frame_map()
1784 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1786 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1795 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1805 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1811 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1815 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1816 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_xdp_submit_frame()
1829 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_xdp_submit_frame()
1836 spin_lock(&eth->page_lock); in mtk_xdp_submit_frame()
1840 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1850 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1898 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1903 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1907 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1914 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1926 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1936 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1943 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1956 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1967 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1987 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
2020 struct mtk_eth *eth) in mtk_poll_rx() argument
2040 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
2045 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_poll_rx()
2048 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
2052 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_poll_rx()
2066 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2072 !eth->netdev[mac])) in mtk_poll_rx()
2075 netdev = eth->netdev[mac]; in mtk_poll_rx()
2076 ppe_idx = eth->mac[mac]->ppe_idx; in mtk_poll_rx()
2078 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_poll_rx()
2097 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2106 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2135 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2136 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2138 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2145 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2148 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2165 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_poll_rx()
2181 if (*rxdcsum & eth->soc->rx.dma_l4_valid) in mtk_poll_rx()
2190 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && in mtk_poll_rx()
2194 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2195 eth->dsa_meta[port]) in mtk_poll_rx()
2196 skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst); in mtk_poll_rx()
2200 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); in mtk_poll_rx()
2209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2214 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && in mtk_poll_rx()
2228 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2231 eth->rx_packets += done; in mtk_poll_rx()
2232 eth->rx_bytes += bytes; in mtk_poll_rx()
2233 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2235 net_dim(&eth->rx_dim, &dim_sample); in mtk_poll_rx()
2251 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2259 eth->tx_packets++; in mtk_poll_tx_done()
2260 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2262 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2281 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2284 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2285 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx_qdma()
2292 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2305 eth->soc->tx.desc_size); in mtk_poll_tx_qdma()
2311 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2316 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2326 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2331 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2334 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx_pdma()
2341 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2351 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2354 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2356 desc = ring->dma + cpu * eth->soc->tx.desc_size; in mtk_poll_tx_pdma()
2369 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2371 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx()
2375 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2376 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2378 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2383 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2385 net_dim(&eth->tx_dim, &dim_sample); in mtk_poll_tx()
2387 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2389 mtk_wake_queue(eth); in mtk_poll_tx()
2394 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2396 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2399 mtk_stats_update(eth); in mtk_handle_status_irq()
2400 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2407 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2408 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2412 mtk_handle_status_irq(eth); in mtk_napi_tx()
2413 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2414 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2416 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2417 dev_info(eth->dev, in mtk_napi_tx()
2419 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2420 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2426 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2430 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2437 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2438 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2441 mtk_handle_status_irq(eth); in mtk_napi_rx()
2446 mtk_w32(eth, eth->soc->rx.irq_done_mask, in mtk_napi_rx()
2448 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2451 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2452 dev_info(eth->dev, in mtk_napi_rx()
2454 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2455 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2461 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2462 eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2465 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2470 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2472 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2473 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_tx_alloc()
2490 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; in mtk_tx_alloc()
2491 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; in mtk_tx_alloc()
2493 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2508 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_tx_alloc()
2521 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2545 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2546 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2547 mtk_w32(eth, in mtk_tx_alloc()
2550 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2554 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2561 if (mtk_is_netsys_v1(eth)) in mtk_tx_alloc()
2563 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2567 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2568 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_alloc()
2569 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2571 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2572 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2573 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2574 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2583 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2585 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2586 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_tx_clean()
2591 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2596 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2603 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2610 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2612 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2613 const struct mtk_soc_data *soc = eth->soc; in mtk_rx_alloc()
2618 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_rx_alloc()
2626 ring = &eth->rx_ring_qdma; in mtk_rx_alloc()
2628 ring = &eth->rx_ring[ring_no]; in mtk_rx_alloc()
2646 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2649 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2657 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || in mtk_rx_alloc()
2659 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2660 rx_dma_size * eth->soc->rx.desc_size, in mtk_rx_alloc()
2663 struct mtk_tx_ring *tx_ring = &eth->tx_ring; in mtk_rx_alloc()
2666 eth->soc->tx.desc_size * (ring_no + 1); in mtk_rx_alloc()
2668 eth->soc->tx.desc_size * (ring_no + 1); in mtk_rx_alloc()
2679 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_alloc()
2694 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2695 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2697 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2711 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2716 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_rx_alloc()
2739 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2741 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2743 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2746 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2748 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2750 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2753 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2758 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) in mtk_rx_clean() argument
2770 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_clean()
2774 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2777 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2786 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2787 ring->dma_size * eth->soc->rx.desc_size, in mtk_rx_clean()
2800 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2824 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2825 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2826 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2836 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2839 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2842 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2854 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2855 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2860 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2866 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2870 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2880 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2883 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2886 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2890 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2893 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2895 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2898 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2901 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2905 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2908 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2910 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2932 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2945 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2956 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2967 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2975 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2982 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
3066 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
3072 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3073 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3075 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3077 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3081 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3086 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
3091 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
3094 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3098 err = mtk_init_fq_dma(eth); in mtk_dma_init()
3103 err = mtk_tx_alloc(eth); in mtk_dma_init()
3107 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3108 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
3113 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
3117 if (eth->hwlro) { in mtk_dma_init()
3119 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
3123 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3128 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3132 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3133 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3134 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3140 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3142 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3146 if (eth->netdev[i]) in mtk_dma_free()
3147 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
3148 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { in mtk_dma_free()
3149 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3151 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3152 eth->scratch_ring = NULL; in mtk_dma_free()
3153 eth->phy_scratch_ring = 0; in mtk_dma_free()
3155 mtk_tx_clean(eth); in mtk_dma_free()
3156 mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); in mtk_dma_free()
3157 mtk_rx_clean(eth, &eth->rx_ring_qdma, false); in mtk_dma_free()
3159 if (eth->hwlro) { in mtk_dma_free()
3160 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3162 mtk_rx_clean(eth, &eth->rx_ring[i], false); in mtk_dma_free()
3166 kfree(eth->scratch_head[i]); in mtk_dma_free()
3167 eth->scratch_head[i] = NULL; in mtk_dma_free()
3171 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3173 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3183 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3185 if (test_bit(MTK_RESETTING, &eth->state)) in mtk_tx_timeout()
3188 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3191 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3192 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3194 schedule_work(&eth->pending_work); in mtk_tx_timeout()
3199 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3201 eth->rx_events++; in mtk_handle_irq_rx()
3202 if (likely(napi_schedule_prep(&eth->rx_napi))) { in mtk_handle_irq_rx()
3203 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_handle_irq_rx()
3204 __napi_schedule(&eth->rx_napi); in mtk_handle_irq_rx()
3212 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3214 eth->tx_events++; in mtk_handle_irq_tx()
3215 if (likely(napi_schedule_prep(&eth->tx_napi))) { in mtk_handle_irq_tx()
3216 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3217 __napi_schedule(&eth->tx_napi); in mtk_handle_irq_tx()
3225 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3226 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3228 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3229 eth->soc->rx.irq_done_mask) { in mtk_handle_irq()
3230 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3231 eth->soc->rx.irq_done_mask) in mtk_handle_irq()
3234 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3235 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3246 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3248 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3249 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3250 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3251 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3252 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3256 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3259 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3262 err = mtk_dma_init(eth); in mtk_start_dma()
3264 mtk_dma_free(eth); in mtk_start_dma()
3268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3269 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3274 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_start_dma()
3280 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3282 mtk_w32(eth, in mtk_start_dma()
3287 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3295 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) in mtk_gdm_config() argument
3299 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3302 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); in mtk_gdm_config()
3312 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) in mtk_gdm_config()
3315 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); in mtk_gdm_config()
3332 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3366 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3374 struct mtk_eth *eth = mac->hw; in mtk_open() local
3378 ppe_num = eth->soc->ppe_num; in mtk_open()
3388 if (!refcount_read(&eth->dma_refcnt)) { in mtk_open()
3389 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3393 err = mtk_start_dma(eth); in mtk_open()
3399 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3400 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3403 if (!eth->netdev[i]) in mtk_open()
3406 target_mac = netdev_priv(eth->netdev[i]); in mtk_open()
3420 mtk_gdm_config(eth, target_mac->id, gdm_config); in mtk_open()
3423 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_open()
3424 mtk_w32(eth, 0, MTK_RST_GL); in mtk_open()
3426 napi_enable(&eth->tx_napi); in mtk_open()
3427 napi_enable(&eth->rx_napi); in mtk_open()
3428 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3429 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); in mtk_open()
3430 refcount_set(&eth->dma_refcnt, 1); in mtk_open()
3432 refcount_inc(&eth->dma_refcnt); in mtk_open()
3438 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_open()
3441 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3442 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3443 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3454 eth->dsa_meta[i] = md_dst; in mtk_open()
3460 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3463 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3465 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); in mtk_open()
3471 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3477 spin_lock_bh(&eth->page_lock); in mtk_stop_dma()
3478 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3479 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3481 spin_unlock_bh(&eth->page_lock); in mtk_stop_dma()
3485 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3497 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3507 if (!refcount_dec_and_test(&eth->dma_refcnt)) in mtk_stop()
3511 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); in mtk_stop()
3513 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3514 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_stop()
3515 napi_disable(&eth->tx_napi); in mtk_stop()
3516 napi_disable(&eth->rx_napi); in mtk_stop()
3518 cancel_work_sync(&eth->rx_dim.work); in mtk_stop()
3519 cancel_work_sync(&eth->tx_dim.work); in mtk_stop()
3521 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3522 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3523 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3525 mtk_dma_free(eth); in mtk_stop()
3527 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3528 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3537 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3541 if (eth->hwlro) { in mtk_xdp_setup()
3551 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3555 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3575 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3577 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3582 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3588 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3593 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3596 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3601 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3610 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3618 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3619 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3623 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3625 spin_lock_bh(&eth->dim_lock); in mtk_dim_rx()
3627 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3637 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3639 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3641 spin_unlock_bh(&eth->dim_lock); in mtk_dim_rx()
3649 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3650 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3654 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3656 spin_lock_bh(&eth->dim_lock); in mtk_dim_tx()
3658 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3668 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3669 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3670 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3672 spin_unlock_bh(&eth->dim_lock); in mtk_dim_tx()
3679 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3701 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3705 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3706 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3708 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_reset()
3711 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3714 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3718 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_reset()
3721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3727 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3729 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_hw_reset()
3730 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3732 else if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3733 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3737 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3741 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3745 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3749 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3751 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3753 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3754 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3758 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_warm_reset()
3760 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3762 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3766 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_warm_reset()
3768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3774 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3777 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3779 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3783 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3786 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3788 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3792 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3794 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3802 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3806 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3808 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3811 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3814 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3815 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3816 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3818 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3819 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3820 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3827 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3828 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3830 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3831 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3832 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3833 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3834 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3835 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3840 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3841 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3848 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3849 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3850 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3851 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3854 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3855 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3861 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3862 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3863 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3865 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3873 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3876 if (test_bit(MTK_RESETTING, &eth->state)) in mtk_hw_reset_monitor_work()
3880 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3881 schedule_work(&eth->pending_work); in mtk_hw_reset_monitor_work()
3884 schedule_delayed_work(&eth->reset.monitor_work, in mtk_hw_reset_monitor_work()
3888 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3892 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3895 if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state)) in mtk_hw_init()
3899 pm_runtime_enable(eth->dev); in mtk_hw_init()
3900 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3902 ret = mtk_clk_enable(eth); in mtk_hw_init()
3907 if (eth->ethsys) in mtk_hw_init()
3908 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3909 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3911 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3912 ret = device_reset(eth->dev); in mtk_hw_init()
3914 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3919 mtk_dim_rx(&eth->rx_dim.work); in mtk_hw_init()
3920 mtk_dim_tx(&eth->tx_dim.work); in mtk_hw_init()
3923 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3924 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3932 mtk_hw_warm_reset(eth); in mtk_hw_init()
3934 mtk_hw_reset(eth); in mtk_hw_init()
3937 if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_init()
3938 mtk_mdio_config(eth); in mtk_hw_init()
3940 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3942 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3943 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3946 if (eth->pctl) { in mtk_hw_init()
3948 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3951 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3954 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3962 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3967 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3975 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3976 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3977 if (mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3978 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3979 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3981 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3985 mtk_dim_rx(&eth->rx_dim.work); in mtk_hw_init()
3986 mtk_dim_tx(&eth->tx_dim.work); in mtk_hw_init()
3989 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3990 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3993 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3994 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3995 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3996 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3997 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3999 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
4001 mtk_w32(eth, 0x00000302, PSE_DROP_CFG); in mtk_hw_init()
4004 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); in mtk_hw_init()
4005 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); in mtk_hw_init()
4008 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); in mtk_hw_init()
4015 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
4016 } else if (!mtk_is_netsys_v1(eth)) { in mtk_hw_init()
4018 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
4021 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); in mtk_hw_init()
4024 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
4027 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
4028 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
4029 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
4030 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
4031 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
4032 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
4033 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
4034 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
4037 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
4038 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
4039 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
4040 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
4041 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
4042 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
4043 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
4044 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
4047 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
4048 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
4049 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
4050 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
4051 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
4052 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
4059 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4060 pm_runtime_disable(eth->dev); in mtk_hw_init()
4066 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
4068 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state)) in mtk_hw_deinit()
4071 mtk_clk_disable(eth); in mtk_hw_deinit()
4073 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4074 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4082 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
4085 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
4086 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
4093 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
4095 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4123 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
4130 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_prepare_for_reset()
4132 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_prepare_for_reset()
4133 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4135 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4137 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_prepare_for_reset()
4141 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4142 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4145 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
4149 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
4150 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
4156 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
4162 set_bit(MTK_RESETTING, &eth->state); in mtk_pending_work()
4164 mtk_prepare_for_reset(eth); in mtk_pending_work()
4169 mtk_prepare_for_reset(eth); in mtk_pending_work()
4173 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4176 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4182 if (eth->dev->pins) in mtk_pending_work()
4183 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4184 eth->dev->pins->default_state); in mtk_pending_work()
4185 mtk_hw_init(eth, true); in mtk_pending_work()
4189 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4192 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4193 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4195 dev_close(eth->netdev[i]); in mtk_pending_work()
4201 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_pending_work()
4203 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_pending_work()
4204 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4209 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_pending_work()
4212 clear_bit(MTK_RESETTING, &eth->state); in mtk_pending_work()
4219 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4224 if (!eth->netdev[i]) in mtk_free_dev()
4226 free_netdev(eth->netdev[i]); in mtk_free_dev()
4229 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4230 if (!eth->dsa_meta[i]) in mtk_free_dev()
4232 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4238 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4244 if (!eth->netdev[i]) in mtk_unreg_dev()
4246 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4247 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4249 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4255 static void mtk_sgmii_destroy(struct mtk_eth *eth) in mtk_sgmii_destroy() argument
4260 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4263 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4265 mtk_sgmii_destroy(eth); in mtk_cleanup()
4266 mtk_unreg_dev(eth); in mtk_cleanup()
4267 mtk_free_dev(eth); in mtk_cleanup()
4268 cancel_work_sync(&eth->pending_work); in mtk_cleanup()
4269 cancel_delayed_work_sync(&eth->reset.monitor_work); in mtk_cleanup()
4368 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4373 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4374 struct mtk_rx_ring *ring = &eth->rx_ring[i]; in mtk_ethtool_pp_stats()
4542 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4553 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4559 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4563 if (eth->netdev[id]) { in mtk_add_mac()
4564 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4568 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4571 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4572 if (!eth->netdev[id]) { in mtk_add_mac()
4573 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4576 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4577 eth->mac[id] = mac; in mtk_add_mac()
4579 mac->hw = eth; in mtk_add_mac()
4582 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4588 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4589 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4590 eth->netdev[id]->dev_addr); in mtk_add_mac()
4596 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4600 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4607 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_add_mac()
4615 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4623 mac->phylink_config.dev = &eth->netdev[id]->dev; in mtk_add_mac()
4649 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4685 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4686 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4687 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4688 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4690 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4691 if (eth->hwlro) in mtk_add_mac()
4692 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4694 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4696 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4697 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4699 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4700 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4702 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4703 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4705 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4707 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4712 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4713 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4721 free_netdev(eth->netdev[id]); in mtk_add_mac()
4725 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4734 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4744 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4754 static int mtk_sgmii_init(struct mtk_eth *eth) in mtk_sgmii_init() argument
4762 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
4776 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
4777 eth->soc->ana_rgc3, in mtk_sgmii_init()
4788 struct mtk_eth *eth; in mtk_probe() local
4791 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4792 if (!eth) in mtk_probe()
4795 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4797 eth->dev = &pdev->dev; in mtk_probe()
4798 eth->dma_dev = &pdev->dev; in mtk_probe()
4799 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4800 if (IS_ERR(eth->base)) in mtk_probe()
4801 return PTR_ERR(eth->base); in mtk_probe()
4803 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4804 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4806 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4811 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4812 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); in mtk_probe()
4813 if (IS_ERR(eth->sram_base)) in mtk_probe()
4814 return PTR_ERR(eth->sram_base); in mtk_probe()
4816 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4820 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
4831 spin_lock_init(&eth->page_lock); in mtk_probe()
4832 spin_lock_init(&eth->tx_irq_lock); in mtk_probe()
4833 spin_lock_init(&eth->rx_irq_lock); in mtk_probe()
4834 spin_lock_init(&eth->dim_lock); in mtk_probe()
4836 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4837 INIT_WORK(&eth->rx_dim.work, mtk_dim_rx); in mtk_probe()
4838 INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4840 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4841 INIT_WORK(&eth->tx_dim.work, mtk_dim_tx); in mtk_probe()
4843 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4844 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4846 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4848 return PTR_ERR(eth->ethsys); in mtk_probe()
4852 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4853 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4855 if (IS_ERR(eth->infra)) { in mtk_probe()
4857 return PTR_ERR(eth->infra); in mtk_probe()
4871 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4872 err = mtk_sgmii_init(eth); in mtk_probe()
4878 if (eth->soc->required_pctl) { in mtk_probe()
4879 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4881 if (IS_ERR(eth->pctl)) { in mtk_probe()
4883 err = PTR_ERR(eth->pctl); in mtk_probe()
4888 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_probe()
4894 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4895 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4901 eth->phy_scratch_ring = res_sram->start; in mtk_probe()
4903 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4908 if (eth->soc->offload_version) { in mtk_probe()
4914 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4922 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4924 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4930 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4931 eth->irq[i] = eth->irq[0]; in mtk_probe()
4933 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4934 if (eth->irq[i] < 0) { in mtk_probe()
4940 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4941 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4943 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4944 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4948 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4954 eth->clks[i] = NULL; in mtk_probe()
4958 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4959 INIT_WORK(&eth->pending_work, mtk_pending_work); in mtk_probe()
4961 err = mtk_hw_init(eth, false); in mtk_probe()
4965 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4969 "mediatek,eth-mac")) in mtk_probe()
4975 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4983 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4985 dev_name(eth->dev), eth); in mtk_probe()
4987 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4989 dev_name(eth->dev), eth); in mtk_probe()
4993 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4995 dev_name(eth->dev), eth); in mtk_probe()
5001 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
5002 err = mtk_mdio_init(eth); in mtk_probe()
5007 if (eth->soc->offload_version) { in mtk_probe()
5008 u8 ppe_num = eth->soc->ppe_num; in mtk_probe()
5010 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); in mtk_probe()
5012 u32 ppe_addr = eth->soc->reg_map->ppe_base; in mtk_probe()
5015 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
5017 if (!eth->ppe[i]) { in mtk_probe()
5021 err = mtk_eth_offload_init(eth, i); in mtk_probe()
5029 if (!eth->netdev[i]) in mtk_probe()
5032 err = register_netdev(eth->netdev[i]); in mtk_probe()
5034 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
5037 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
5039 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
5045 eth->dummy_dev = alloc_netdev_dummy(0); in mtk_probe()
5046 if (!eth->dummy_dev) { in mtk_probe()
5048 dev_err(eth->dev, "failed to allocated dummy device\n"); in mtk_probe()
5051 netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx); in mtk_probe()
5052 netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx); in mtk_probe()
5054 platform_set_drvdata(pdev, eth); in mtk_probe()
5055 schedule_delayed_work(&eth->reset.monitor_work, in mtk_probe()
5061 mtk_unreg_dev(eth); in mtk_probe()
5063 mtk_ppe_deinit(eth); in mtk_probe()
5064 mtk_mdio_cleanup(eth); in mtk_probe()
5066 mtk_free_dev(eth); in mtk_probe()
5068 mtk_hw_deinit(eth); in mtk_probe()
5072 mtk_sgmii_destroy(eth); in mtk_probe()
5079 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
5085 if (!eth->netdev[i]) in mtk_remove()
5087 mtk_stop(eth->netdev[i]); in mtk_remove()
5088 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5093 mtk_hw_deinit(eth); in mtk_remove()
5095 netif_napi_del(&eth->tx_napi); in mtk_remove()
5096 netif_napi_del(&eth->rx_napi); in mtk_remove()
5097 mtk_cleanup(eth); in mtk_remove()
5098 free_netdev(eth->dummy_dev); in mtk_remove()
5099 mtk_mdio_cleanup(eth); in mtk_remove()
5353 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5354 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5355 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5356 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5357 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5358 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5359 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5360 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5361 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },