1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2009-2016 John Crispin <[email protected]>
5 * Copyright (C) 2009-2016 Felix Fietkau <[email protected]>
6 * Copyright (C) 2013-2016 Michael Lee <[email protected]>
7 */
8
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 sizeof(u64) }
44
45 static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .adma_rx_dbg0 = 0x0a38,
58 .int_grp = 0x0a50,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .hred = 0x1a44,
73 .ctx_ptr = 0x1b00,
74 .dtx_ptr = 0x1b04,
75 .crx_ptr = 0x1b10,
76 .drx_ptr = 0x1b14,
77 .fq_head = 0x1b20,
78 .fq_tail = 0x1b24,
79 .fq_count = 0x1b28,
80 .fq_blen = 0x1b2c,
81 },
82 .gdm1_cnt = 0x2400,
83 .gdma_to_ppe = {
84 [0] = 0x4444,
85 },
86 .ppe_base = 0x0c00,
87 .wdma_base = {
88 [0] = 0x2800,
89 [1] = 0x2c00,
90 },
91 .pse_iq_sta = 0x0110,
92 .pse_oq_sta = 0x0118,
93 };
94
95 static const struct mtk_reg_map mt7628_reg_map = {
96 .tx_irq_mask = 0x0a28,
97 .tx_irq_status = 0x0a20,
98 .pdma = {
99 .rx_ptr = 0x0900,
100 .rx_cnt_cfg = 0x0904,
101 .pcrx_ptr = 0x0908,
102 .glo_cfg = 0x0a04,
103 .rst_idx = 0x0a08,
104 .delay_irq = 0x0a0c,
105 .irq_status = 0x0a20,
106 .irq_mask = 0x0a28,
107 .int_grp = 0x0a50,
108 },
109 };
110
111 static const struct mtk_reg_map mt7986_reg_map = {
112 .tx_irq_mask = 0x461c,
113 .tx_irq_status = 0x4618,
114 .pdma = {
115 .rx_ptr = 0x4100,
116 .rx_cnt_cfg = 0x4104,
117 .pcrx_ptr = 0x4108,
118 .glo_cfg = 0x4204,
119 .rst_idx = 0x4208,
120 .delay_irq = 0x420c,
121 .irq_status = 0x4220,
122 .irq_mask = 0x4228,
123 .adma_rx_dbg0 = 0x4238,
124 .int_grp = 0x4250,
125 },
126 .qdma = {
127 .qtx_cfg = 0x4400,
128 .qtx_sch = 0x4404,
129 .rx_ptr = 0x4500,
130 .rx_cnt_cfg = 0x4504,
131 .qcrx_ptr = 0x4508,
132 .glo_cfg = 0x4604,
133 .rst_idx = 0x4608,
134 .delay_irq = 0x460c,
135 .fc_th = 0x4610,
136 .int_grp = 0x4620,
137 .hred = 0x4644,
138 .ctx_ptr = 0x4700,
139 .dtx_ptr = 0x4704,
140 .crx_ptr = 0x4710,
141 .drx_ptr = 0x4714,
142 .fq_head = 0x4720,
143 .fq_tail = 0x4724,
144 .fq_count = 0x4728,
145 .fq_blen = 0x472c,
146 .tx_sch_rate = 0x4798,
147 },
148 .gdm1_cnt = 0x1c00,
149 .gdma_to_ppe = {
150 [0] = 0x3333,
151 [1] = 0x4444,
152 },
153 .ppe_base = 0x2000,
154 .wdma_base = {
155 [0] = 0x4800,
156 [1] = 0x4c00,
157 },
158 .pse_iq_sta = 0x0180,
159 .pse_oq_sta = 0x01a0,
160 };
161
162 static const struct mtk_reg_map mt7988_reg_map = {
163 .tx_irq_mask = 0x461c,
164 .tx_irq_status = 0x4618,
165 .pdma = {
166 .rx_ptr = 0x6900,
167 .rx_cnt_cfg = 0x6904,
168 .pcrx_ptr = 0x6908,
169 .glo_cfg = 0x6a04,
170 .rst_idx = 0x6a08,
171 .delay_irq = 0x6a0c,
172 .irq_status = 0x6a20,
173 .irq_mask = 0x6a28,
174 .adma_rx_dbg0 = 0x6a38,
175 .int_grp = 0x6a50,
176 },
177 .qdma = {
178 .qtx_cfg = 0x4400,
179 .qtx_sch = 0x4404,
180 .rx_ptr = 0x4500,
181 .rx_cnt_cfg = 0x4504,
182 .qcrx_ptr = 0x4508,
183 .glo_cfg = 0x4604,
184 .rst_idx = 0x4608,
185 .delay_irq = 0x460c,
186 .fc_th = 0x4610,
187 .int_grp = 0x4620,
188 .hred = 0x4644,
189 .ctx_ptr = 0x4700,
190 .dtx_ptr = 0x4704,
191 .crx_ptr = 0x4710,
192 .drx_ptr = 0x4714,
193 .fq_head = 0x4720,
194 .fq_tail = 0x4724,
195 .fq_count = 0x4728,
196 .fq_blen = 0x472c,
197 .tx_sch_rate = 0x4798,
198 },
199 .gdm1_cnt = 0x1c00,
200 .gdma_to_ppe = {
201 [0] = 0x3333,
202 [1] = 0x4444,
203 [2] = 0xcccc,
204 },
205 .ppe_base = 0x2000,
206 .wdma_base = {
207 [0] = 0x4800,
208 [1] = 0x4c00,
209 [2] = 0x5000,
210 },
211 .pse_iq_sta = 0x0180,
212 .pse_oq_sta = 0x01a0,
213 };
214
215 /* strings used by ethtool */
216 static const struct mtk_ethtool_stats {
217 char str[ETH_GSTRING_LEN];
218 u32 offset;
219 } mtk_ethtool_stats[] = {
220 MTK_ETHTOOL_STAT(tx_bytes),
221 MTK_ETHTOOL_STAT(tx_packets),
222 MTK_ETHTOOL_STAT(tx_skip),
223 MTK_ETHTOOL_STAT(tx_collisions),
224 MTK_ETHTOOL_STAT(rx_bytes),
225 MTK_ETHTOOL_STAT(rx_packets),
226 MTK_ETHTOOL_STAT(rx_overflow),
227 MTK_ETHTOOL_STAT(rx_fcs_errors),
228 MTK_ETHTOOL_STAT(rx_short_errors),
229 MTK_ETHTOOL_STAT(rx_long_errors),
230 MTK_ETHTOOL_STAT(rx_checksum_errors),
231 MTK_ETHTOOL_STAT(rx_flow_control_packets),
232 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
233 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
234 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
235 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
236 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
237 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
238 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
239 };
240
241 static const char * const mtk_clks_source_name[] = {
242 "ethif",
243 "sgmiitop",
244 "esw",
245 "gp0",
246 "gp1",
247 "gp2",
248 "gp3",
249 "xgp1",
250 "xgp2",
251 "xgp3",
252 "crypto",
253 "fe",
254 "trgpll",
255 "sgmii_tx250m",
256 "sgmii_rx250m",
257 "sgmii_cdr_ref",
258 "sgmii_cdr_fb",
259 "sgmii2_tx250m",
260 "sgmii2_rx250m",
261 "sgmii2_cdr_ref",
262 "sgmii2_cdr_fb",
263 "sgmii_ck",
264 "eth2pll",
265 "wocpu0",
266 "wocpu1",
267 "netsys0",
268 "netsys1",
269 "ethwarp_wocpu2",
270 "ethwarp_wocpu1",
271 "ethwarp_wocpu0",
272 "top_usxgmii0_sel",
273 "top_usxgmii1_sel",
274 "top_sgm0_sel",
275 "top_sgm1_sel",
276 "top_xfi_phy0_xtal_sel",
277 "top_xfi_phy1_xtal_sel",
278 "top_eth_gmii_sel",
279 "top_eth_refck_50m_sel",
280 "top_eth_sys_200m_sel",
281 "top_eth_sys_sel",
282 "top_eth_xgmii_sel",
283 "top_eth_mii_sel",
284 "top_netsys_sel",
285 "top_netsys_500m_sel",
286 "top_netsys_pao_2x_sel",
287 "top_netsys_sync_250m_sel",
288 "top_netsys_ppefb_250m_sel",
289 "top_netsys_warp_sel",
290 };
291
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)292 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
293 {
294 __raw_writel(val, eth->base + reg);
295 }
296
mtk_r32(struct mtk_eth * eth,unsigned reg)297 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
298 {
299 return __raw_readl(eth->base + reg);
300 }
301
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)302 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
303 {
304 u32 val;
305
306 val = mtk_r32(eth, reg);
307 val &= ~mask;
308 val |= set;
309 mtk_w32(eth, val, reg);
310 return reg;
311 }
312
mtk_mdio_busy_wait(struct mtk_eth * eth)313 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
314 {
315 unsigned long t_start = jiffies;
316
317 while (1) {
318 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
319 return 0;
320 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
321 break;
322 cond_resched();
323 }
324
325 dev_err(eth->dev, "mdio: MDIO timeout\n");
326 return -ETIMEDOUT;
327 }
328
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)329 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
330 u32 write_data)
331 {
332 int ret;
333
334 ret = mtk_mdio_busy_wait(eth);
335 if (ret < 0)
336 return ret;
337
338 mtk_w32(eth, PHY_IAC_ACCESS |
339 PHY_IAC_START_C22 |
340 PHY_IAC_CMD_WRITE |
341 PHY_IAC_REG(phy_reg) |
342 PHY_IAC_ADDR(phy_addr) |
343 PHY_IAC_DATA(write_data),
344 MTK_PHY_IAC);
345
346 ret = mtk_mdio_busy_wait(eth);
347 if (ret < 0)
348 return ret;
349
350 return 0;
351 }
352
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)353 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
354 u32 devad, u32 phy_reg, u32 write_data)
355 {
356 int ret;
357
358 ret = mtk_mdio_busy_wait(eth);
359 if (ret < 0)
360 return ret;
361
362 mtk_w32(eth, PHY_IAC_ACCESS |
363 PHY_IAC_START_C45 |
364 PHY_IAC_CMD_C45_ADDR |
365 PHY_IAC_REG(devad) |
366 PHY_IAC_ADDR(phy_addr) |
367 PHY_IAC_DATA(phy_reg),
368 MTK_PHY_IAC);
369
370 ret = mtk_mdio_busy_wait(eth);
371 if (ret < 0)
372 return ret;
373
374 mtk_w32(eth, PHY_IAC_ACCESS |
375 PHY_IAC_START_C45 |
376 PHY_IAC_CMD_WRITE |
377 PHY_IAC_REG(devad) |
378 PHY_IAC_ADDR(phy_addr) |
379 PHY_IAC_DATA(write_data),
380 MTK_PHY_IAC);
381
382 ret = mtk_mdio_busy_wait(eth);
383 if (ret < 0)
384 return ret;
385
386 return 0;
387 }
388
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)389 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
390 {
391 int ret;
392
393 ret = mtk_mdio_busy_wait(eth);
394 if (ret < 0)
395 return ret;
396
397 mtk_w32(eth, PHY_IAC_ACCESS |
398 PHY_IAC_START_C22 |
399 PHY_IAC_CMD_C22_READ |
400 PHY_IAC_REG(phy_reg) |
401 PHY_IAC_ADDR(phy_addr),
402 MTK_PHY_IAC);
403
404 ret = mtk_mdio_busy_wait(eth);
405 if (ret < 0)
406 return ret;
407
408 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
409 }
410
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)411 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
412 u32 devad, u32 phy_reg)
413 {
414 int ret;
415
416 ret = mtk_mdio_busy_wait(eth);
417 if (ret < 0)
418 return ret;
419
420 mtk_w32(eth, PHY_IAC_ACCESS |
421 PHY_IAC_START_C45 |
422 PHY_IAC_CMD_C45_ADDR |
423 PHY_IAC_REG(devad) |
424 PHY_IAC_ADDR(phy_addr) |
425 PHY_IAC_DATA(phy_reg),
426 MTK_PHY_IAC);
427
428 ret = mtk_mdio_busy_wait(eth);
429 if (ret < 0)
430 return ret;
431
432 mtk_w32(eth, PHY_IAC_ACCESS |
433 PHY_IAC_START_C45 |
434 PHY_IAC_CMD_C45_READ |
435 PHY_IAC_REG(devad) |
436 PHY_IAC_ADDR(phy_addr),
437 MTK_PHY_IAC);
438
439 ret = mtk_mdio_busy_wait(eth);
440 if (ret < 0)
441 return ret;
442
443 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
444 }
445
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)446 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
447 int phy_reg, u16 val)
448 {
449 struct mtk_eth *eth = bus->priv;
450
451 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
452 }
453
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)454 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
455 int devad, int phy_reg, u16 val)
456 {
457 struct mtk_eth *eth = bus->priv;
458
459 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
460 }
461
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)462 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
463 {
464 struct mtk_eth *eth = bus->priv;
465
466 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
467 }
468
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)469 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
470 int phy_reg)
471 {
472 struct mtk_eth *eth = bus->priv;
473
474 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
475 }
476
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)477 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
478 phy_interface_t interface)
479 {
480 u32 val;
481
482 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
483 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
484
485 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
486 ETHSYS_TRGMII_MT7621_MASK, val);
487
488 return 0;
489 }
490
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)491 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
492 phy_interface_t interface)
493 {
494 int ret;
495
496 if (interface == PHY_INTERFACE_MODE_TRGMII) {
497 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
498 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
499 if (ret)
500 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
501 return;
502 }
503
504 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
505 }
506
mtk_setup_bridge_switch(struct mtk_eth * eth)507 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
508 {
509 /* Force Port1 XGMAC Link Up */
510 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
511 MTK_XGMAC_STS(MTK_GMAC1_ID));
512
513 /* Adjust GSW bridge IPG to 11 */
514 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
515 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
516 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
517 MTK_GSW_CFG);
518 }
519
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)520 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
521 phy_interface_t interface)
522 {
523 struct mtk_mac *mac = container_of(config, struct mtk_mac,
524 phylink_config);
525 struct mtk_eth *eth = mac->hw;
526 unsigned int sid;
527
528 if (interface == PHY_INTERFACE_MODE_SGMII ||
529 phy_interface_mode_is_8023z(interface)) {
530 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
531 0 : mac->id;
532
533 return eth->sgmii_pcs[sid];
534 }
535
536 return NULL;
537 }
538
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)539 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
540 const struct phylink_link_state *state)
541 {
542 struct mtk_mac *mac = container_of(config, struct mtk_mac,
543 phylink_config);
544 struct mtk_eth *eth = mac->hw;
545 int val, ge_mode, err = 0;
546 u32 i;
547
548 /* MT76x8 has no hardware settings between for the MAC */
549 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
550 mac->interface != state->interface) {
551 /* Setup soc pin functions */
552 switch (state->interface) {
553 case PHY_INTERFACE_MODE_TRGMII:
554 case PHY_INTERFACE_MODE_RGMII_TXID:
555 case PHY_INTERFACE_MODE_RGMII_RXID:
556 case PHY_INTERFACE_MODE_RGMII_ID:
557 case PHY_INTERFACE_MODE_RGMII:
558 case PHY_INTERFACE_MODE_MII:
559 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
560 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
561 if (err)
562 goto init_err;
563 }
564 break;
565 case PHY_INTERFACE_MODE_1000BASEX:
566 case PHY_INTERFACE_MODE_2500BASEX:
567 case PHY_INTERFACE_MODE_SGMII:
568 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
569 if (err)
570 goto init_err;
571 break;
572 case PHY_INTERFACE_MODE_GMII:
573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
574 err = mtk_gmac_gephy_path_setup(eth, mac->id);
575 if (err)
576 goto init_err;
577 }
578 break;
579 case PHY_INTERFACE_MODE_INTERNAL:
580 break;
581 default:
582 goto err_phy;
583 }
584
585 /* Setup clock for 1st gmac */
586 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
587 !phy_interface_mode_is_8023z(state->interface) &&
588 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
589 if (MTK_HAS_CAPS(mac->hw->soc->caps,
590 MTK_TRGMII_MT7621_CLK)) {
591 if (mt7621_gmac0_rgmii_adjust(mac->hw,
592 state->interface))
593 goto err_phy;
594 } else {
595 mtk_gmac0_rgmii_adjust(mac->hw,
596 state->interface);
597
598 /* mt7623_pad_clk_setup */
599 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
600 mtk_w32(mac->hw,
601 TD_DM_DRVP(8) | TD_DM_DRVN(8),
602 TRGMII_TD_ODT(i));
603
604 /* Assert/release MT7623 RXC reset */
605 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
606 TRGMII_RCK_CTRL);
607 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
608 }
609 }
610
611 switch (state->interface) {
612 case PHY_INTERFACE_MODE_MII:
613 case PHY_INTERFACE_MODE_GMII:
614 ge_mode = 1;
615 break;
616 default:
617 ge_mode = 0;
618 break;
619 }
620
621 /* put the gmac into the right mode */
622 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
623 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
624 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
625 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
626
627 mac->interface = state->interface;
628 }
629
630 /* SGMII */
631 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
632 phy_interface_mode_is_8023z(state->interface)) {
633 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
634 * being setup done.
635 */
636 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
637
638 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
639 SYSCFG0_SGMII_MASK,
640 ~(u32)SYSCFG0_SGMII_MASK);
641
642 /* Save the syscfg0 value for mac_finish */
643 mac->syscfg0 = val;
644 } else if (phylink_autoneg_inband(mode)) {
645 dev_err(eth->dev,
646 "In-band mode not supported in non SGMII mode!\n");
647 return;
648 }
649
650 /* Setup gmac */
651 if (mtk_is_netsys_v3_or_greater(eth) &&
652 mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
653 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
654 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
655
656 mtk_setup_bridge_switch(eth);
657 }
658
659 return;
660
661 err_phy:
662 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
663 mac->id, phy_modes(state->interface));
664 return;
665
666 init_err:
667 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
668 mac->id, phy_modes(state->interface), err);
669 }
670
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)671 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
672 phy_interface_t interface)
673 {
674 struct mtk_mac *mac = container_of(config, struct mtk_mac,
675 phylink_config);
676 struct mtk_eth *eth = mac->hw;
677 u32 mcr_cur, mcr_new;
678
679 /* Enable SGMII */
680 if (interface == PHY_INTERFACE_MODE_SGMII ||
681 phy_interface_mode_is_8023z(interface))
682 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
683 SYSCFG0_SGMII_MASK, mac->syscfg0);
684
685 /* Setup gmac */
686 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
687 mcr_new = mcr_cur;
688 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
689 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
690
691 /* Only update control register when needed! */
692 if (mcr_new != mcr_cur)
693 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
694
695 return 0;
696 }
697
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)698 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
699 phy_interface_t interface)
700 {
701 struct mtk_mac *mac = container_of(config, struct mtk_mac,
702 phylink_config);
703 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
704
705 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
706 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
707 }
708
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)709 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
710 int speed)
711 {
712 const struct mtk_soc_data *soc = eth->soc;
713 u32 ofs, val;
714
715 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
716 return;
717
718 val = MTK_QTX_SCH_MIN_RATE_EN |
719 /* minimum: 10 Mbps */
720 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
721 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
722 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
723 if (mtk_is_netsys_v1(eth))
724 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
725
726 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
727 switch (speed) {
728 case SPEED_10:
729 val |= MTK_QTX_SCH_MAX_RATE_EN |
730 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
731 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
732 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
733 break;
734 case SPEED_100:
735 val |= MTK_QTX_SCH_MAX_RATE_EN |
736 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
737 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
738 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
739 break;
740 case SPEED_1000:
741 val |= MTK_QTX_SCH_MAX_RATE_EN |
742 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
743 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
744 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
745 break;
746 default:
747 break;
748 }
749 } else {
750 switch (speed) {
751 case SPEED_10:
752 val |= MTK_QTX_SCH_MAX_RATE_EN |
753 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
754 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
755 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
756 break;
757 case SPEED_100:
758 val |= MTK_QTX_SCH_MAX_RATE_EN |
759 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
760 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
761 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
762 break;
763 case SPEED_1000:
764 val |= MTK_QTX_SCH_MAX_RATE_EN |
765 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
766 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
767 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
768 break;
769 default:
770 break;
771 }
772 }
773
774 ofs = MTK_QTX_OFFSET * idx;
775 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
776 }
777
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)778 static void mtk_mac_link_up(struct phylink_config *config,
779 struct phy_device *phy,
780 unsigned int mode, phy_interface_t interface,
781 int speed, int duplex, bool tx_pause, bool rx_pause)
782 {
783 struct mtk_mac *mac = container_of(config, struct mtk_mac,
784 phylink_config);
785 u32 mcr;
786
787 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
788 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
789 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
790 MAC_MCR_FORCE_RX_FC);
791
792 /* Configure speed */
793 mac->speed = speed;
794 switch (speed) {
795 case SPEED_2500:
796 case SPEED_1000:
797 mcr |= MAC_MCR_SPEED_1000;
798 break;
799 case SPEED_100:
800 mcr |= MAC_MCR_SPEED_100;
801 break;
802 }
803
804 /* Configure duplex */
805 if (duplex == DUPLEX_FULL)
806 mcr |= MAC_MCR_FORCE_DPX;
807
808 /* Configure pause modes - phylink will avoid these for half duplex */
809 if (tx_pause)
810 mcr |= MAC_MCR_FORCE_TX_FC;
811 if (rx_pause)
812 mcr |= MAC_MCR_FORCE_RX_FC;
813
814 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
815 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
816 }
817
818 static const struct phylink_mac_ops mtk_phylink_ops = {
819 .mac_select_pcs = mtk_mac_select_pcs,
820 .mac_config = mtk_mac_config,
821 .mac_finish = mtk_mac_finish,
822 .mac_link_down = mtk_mac_link_down,
823 .mac_link_up = mtk_mac_link_up,
824 };
825
mtk_mdio_config(struct mtk_eth * eth)826 static void mtk_mdio_config(struct mtk_eth *eth)
827 {
828 u32 val;
829
830 /* Configure MDC Divider */
831 val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
832
833 /* Configure MDC Turbo Mode */
834 if (mtk_is_netsys_v3_or_greater(eth))
835 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
836 else
837 val |= PPSC_MDC_TURBO;
838
839 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
840 }
841
mtk_mdio_init(struct mtk_eth * eth)842 static int mtk_mdio_init(struct mtk_eth *eth)
843 {
844 unsigned int max_clk = 2500000;
845 struct device_node *mii_np;
846 int ret;
847 u32 val;
848
849 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
850 if (!mii_np) {
851 dev_err(eth->dev, "no %s child node found", "mdio-bus");
852 return -ENODEV;
853 }
854
855 if (!of_device_is_available(mii_np)) {
856 ret = -ENODEV;
857 goto err_put_node;
858 }
859
860 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
861 if (!eth->mii_bus) {
862 ret = -ENOMEM;
863 goto err_put_node;
864 }
865
866 eth->mii_bus->name = "mdio";
867 eth->mii_bus->read = mtk_mdio_read_c22;
868 eth->mii_bus->write = mtk_mdio_write_c22;
869 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
870 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
871 eth->mii_bus->priv = eth;
872 eth->mii_bus->parent = eth->dev;
873
874 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
875
876 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
877 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
878 dev_err(eth->dev, "MDIO clock frequency out of range");
879 ret = -EINVAL;
880 goto err_put_node;
881 }
882 max_clk = val;
883 }
884 eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
885 mtk_mdio_config(eth);
886 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
887 ret = of_mdiobus_register(eth->mii_bus, mii_np);
888
889 err_put_node:
890 of_node_put(mii_np);
891 return ret;
892 }
893
mtk_mdio_cleanup(struct mtk_eth * eth)894 static void mtk_mdio_cleanup(struct mtk_eth *eth)
895 {
896 if (!eth->mii_bus)
897 return;
898
899 mdiobus_unregister(eth->mii_bus);
900 }
901
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)902 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
903 {
904 unsigned long flags;
905 u32 val;
906
907 spin_lock_irqsave(ð->tx_irq_lock, flags);
908 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
909 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
910 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
911 }
912
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)913 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
914 {
915 unsigned long flags;
916 u32 val;
917
918 spin_lock_irqsave(ð->tx_irq_lock, flags);
919 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
920 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
921 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
922 }
923
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)924 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
925 {
926 unsigned long flags;
927 u32 val;
928
929 spin_lock_irqsave(ð->rx_irq_lock, flags);
930 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
931 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
932 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
933 }
934
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)935 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
936 {
937 unsigned long flags;
938 u32 val;
939
940 spin_lock_irqsave(ð->rx_irq_lock, flags);
941 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
942 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
943 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
944 }
945
mtk_set_mac_address(struct net_device * dev,void * p)946 static int mtk_set_mac_address(struct net_device *dev, void *p)
947 {
948 int ret = eth_mac_addr(dev, p);
949 struct mtk_mac *mac = netdev_priv(dev);
950 struct mtk_eth *eth = mac->hw;
951 const char *macaddr = dev->dev_addr;
952
953 if (ret)
954 return ret;
955
956 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
957 return -EBUSY;
958
959 spin_lock_bh(&mac->hw->page_lock);
960 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
961 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
962 MT7628_SDM_MAC_ADRH);
963 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
964 (macaddr[4] << 8) | macaddr[5],
965 MT7628_SDM_MAC_ADRL);
966 } else {
967 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
968 MTK_GDMA_MAC_ADRH(mac->id));
969 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
970 (macaddr[4] << 8) | macaddr[5],
971 MTK_GDMA_MAC_ADRL(mac->id));
972 }
973 spin_unlock_bh(&mac->hw->page_lock);
974
975 return 0;
976 }
977
mtk_stats_update_mac(struct mtk_mac * mac)978 void mtk_stats_update_mac(struct mtk_mac *mac)
979 {
980 struct mtk_hw_stats *hw_stats = mac->hw_stats;
981 struct mtk_eth *eth = mac->hw;
982
983 u64_stats_update_begin(&hw_stats->syncp);
984
985 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
986 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
987 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
988 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
989 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
990 hw_stats->rx_checksum_errors +=
991 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
992 } else {
993 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
994 unsigned int offs = hw_stats->reg_offset;
995 u64 stats;
996
997 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
998 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
999 if (stats)
1000 hw_stats->rx_bytes += (stats << 32);
1001 hw_stats->rx_packets +=
1002 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1003 hw_stats->rx_overflow +=
1004 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1005 hw_stats->rx_fcs_errors +=
1006 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1007 hw_stats->rx_short_errors +=
1008 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1009 hw_stats->rx_long_errors +=
1010 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1011 hw_stats->rx_checksum_errors +=
1012 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1013 hw_stats->rx_flow_control_packets +=
1014 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1015
1016 if (mtk_is_netsys_v3_or_greater(eth)) {
1017 hw_stats->tx_skip +=
1018 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1019 hw_stats->tx_collisions +=
1020 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1021 hw_stats->tx_bytes +=
1022 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1023 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1024 if (stats)
1025 hw_stats->tx_bytes += (stats << 32);
1026 hw_stats->tx_packets +=
1027 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1028 } else {
1029 hw_stats->tx_skip +=
1030 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1031 hw_stats->tx_collisions +=
1032 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1033 hw_stats->tx_bytes +=
1034 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1035 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1036 if (stats)
1037 hw_stats->tx_bytes += (stats << 32);
1038 hw_stats->tx_packets +=
1039 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1040 }
1041 }
1042
1043 u64_stats_update_end(&hw_stats->syncp);
1044 }
1045
mtk_stats_update(struct mtk_eth * eth)1046 static void mtk_stats_update(struct mtk_eth *eth)
1047 {
1048 int i;
1049
1050 for (i = 0; i < MTK_MAX_DEVS; i++) {
1051 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1052 continue;
1053 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1054 mtk_stats_update_mac(eth->mac[i]);
1055 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1056 }
1057 }
1058 }
1059
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1060 static void mtk_get_stats64(struct net_device *dev,
1061 struct rtnl_link_stats64 *storage)
1062 {
1063 struct mtk_mac *mac = netdev_priv(dev);
1064 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1065 unsigned int start;
1066
1067 if (netif_running(dev) && netif_device_present(dev)) {
1068 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1069 mtk_stats_update_mac(mac);
1070 spin_unlock_bh(&hw_stats->stats_lock);
1071 }
1072 }
1073
1074 do {
1075 start = u64_stats_fetch_begin(&hw_stats->syncp);
1076 storage->rx_packets = hw_stats->rx_packets;
1077 storage->tx_packets = hw_stats->tx_packets;
1078 storage->rx_bytes = hw_stats->rx_bytes;
1079 storage->tx_bytes = hw_stats->tx_bytes;
1080 storage->collisions = hw_stats->tx_collisions;
1081 storage->rx_length_errors = hw_stats->rx_short_errors +
1082 hw_stats->rx_long_errors;
1083 storage->rx_over_errors = hw_stats->rx_overflow;
1084 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1085 storage->rx_errors = hw_stats->rx_checksum_errors;
1086 storage->tx_aborted_errors = hw_stats->tx_skip;
1087 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1088
1089 storage->tx_errors = dev->stats.tx_errors;
1090 storage->rx_dropped = dev->stats.rx_dropped;
1091 storage->tx_dropped = dev->stats.tx_dropped;
1092 }
1093
mtk_max_frag_size(int mtu)1094 static inline int mtk_max_frag_size(int mtu)
1095 {
1096 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1097 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1098 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1099
1100 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1101 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1102 }
1103
mtk_max_buf_size(int frag_size)1104 static inline int mtk_max_buf_size(int frag_size)
1105 {
1106 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1107 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1108
1109 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1110
1111 return buf_size;
1112 }
1113
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1114 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1115 struct mtk_rx_dma_v2 *dma_rxd)
1116 {
1117 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1118 if (!(rxd->rxd2 & RX_DMA_DONE))
1119 return false;
1120
1121 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1122 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1123 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1124 if (mtk_is_netsys_v3_or_greater(eth)) {
1125 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1126 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1127 }
1128
1129 return true;
1130 }
1131
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1132 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1133 {
1134 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1135 unsigned long data;
1136
1137 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1138 get_order(size));
1139
1140 return (void *)data;
1141 }
1142
1143 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1144 static int mtk_init_fq_dma(struct mtk_eth *eth)
1145 {
1146 const struct mtk_soc_data *soc = eth->soc;
1147 dma_addr_t phy_ring_tail;
1148 int cnt = soc->tx.fq_dma_size;
1149 dma_addr_t dma_addr;
1150 int i, j, len;
1151
1152 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1153 eth->scratch_ring = eth->sram_base;
1154 else
1155 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1156 cnt * soc->tx.desc_size,
1157 ð->phy_scratch_ring,
1158 GFP_KERNEL);
1159
1160 if (unlikely(!eth->scratch_ring))
1161 return -ENOMEM;
1162
1163 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1164
1165 for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1166 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1167 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1168
1169 if (unlikely(!eth->scratch_head[j]))
1170 return -ENOMEM;
1171
1172 dma_addr = dma_map_single(eth->dma_dev,
1173 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1174 DMA_FROM_DEVICE);
1175
1176 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1177 return -ENOMEM;
1178
1179 for (i = 0; i < len; i++) {
1180 struct mtk_tx_dma_v2 *txd;
1181
1182 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1183 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1184 if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1185 txd->txd2 = eth->phy_scratch_ring +
1186 (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1187
1188 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1189 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1190 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1191
1192 txd->txd4 = 0;
1193 if (mtk_is_netsys_v2_or_greater(eth)) {
1194 txd->txd5 = 0;
1195 txd->txd6 = 0;
1196 txd->txd7 = 0;
1197 txd->txd8 = 0;
1198 }
1199 }
1200 }
1201
1202 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1203 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1204 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1205 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1206
1207 return 0;
1208 }
1209
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1210 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1211 {
1212 return ring->dma + (desc - ring->phys);
1213 }
1214
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1215 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1216 void *txd, u32 txd_size)
1217 {
1218 int idx = (txd - ring->dma) / txd_size;
1219
1220 return &ring->buf[idx];
1221 }
1222
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1223 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1224 struct mtk_tx_dma *dma)
1225 {
1226 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1227 }
1228
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1229 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1230 {
1231 return (dma - ring->dma) / txd_size;
1232 }
1233
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1234 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1235 struct xdp_frame_bulk *bq, bool napi)
1236 {
1237 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1238 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1239 dma_unmap_single(eth->dma_dev,
1240 dma_unmap_addr(tx_buf, dma_addr0),
1241 dma_unmap_len(tx_buf, dma_len0),
1242 DMA_TO_DEVICE);
1243 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1244 dma_unmap_page(eth->dma_dev,
1245 dma_unmap_addr(tx_buf, dma_addr0),
1246 dma_unmap_len(tx_buf, dma_len0),
1247 DMA_TO_DEVICE);
1248 }
1249 } else {
1250 if (dma_unmap_len(tx_buf, dma_len0)) {
1251 dma_unmap_page(eth->dma_dev,
1252 dma_unmap_addr(tx_buf, dma_addr0),
1253 dma_unmap_len(tx_buf, dma_len0),
1254 DMA_TO_DEVICE);
1255 }
1256
1257 if (dma_unmap_len(tx_buf, dma_len1)) {
1258 dma_unmap_page(eth->dma_dev,
1259 dma_unmap_addr(tx_buf, dma_addr1),
1260 dma_unmap_len(tx_buf, dma_len1),
1261 DMA_TO_DEVICE);
1262 }
1263 }
1264
1265 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1266 if (tx_buf->type == MTK_TYPE_SKB) {
1267 struct sk_buff *skb = tx_buf->data;
1268
1269 if (napi)
1270 napi_consume_skb(skb, napi);
1271 else
1272 dev_kfree_skb_any(skb);
1273 } else {
1274 struct xdp_frame *xdpf = tx_buf->data;
1275
1276 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1277 xdp_return_frame_rx_napi(xdpf);
1278 else if (bq)
1279 xdp_return_frame_bulk(xdpf, bq);
1280 else
1281 xdp_return_frame(xdpf);
1282 }
1283 }
1284 tx_buf->flags = 0;
1285 tx_buf->data = NULL;
1286 }
1287
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1288 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1289 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1290 size_t size, int idx)
1291 {
1292 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1293 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1294 dma_unmap_len_set(tx_buf, dma_len0, size);
1295 } else {
1296 if (idx & 1) {
1297 txd->txd3 = mapped_addr;
1298 txd->txd2 |= TX_DMA_PLEN1(size);
1299 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1300 dma_unmap_len_set(tx_buf, dma_len1, size);
1301 } else {
1302 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1303 txd->txd1 = mapped_addr;
1304 txd->txd2 = TX_DMA_PLEN0(size);
1305 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1306 dma_unmap_len_set(tx_buf, dma_len0, size);
1307 }
1308 }
1309 }
1310
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1311 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1312 struct mtk_tx_dma_desc_info *info)
1313 {
1314 struct mtk_mac *mac = netdev_priv(dev);
1315 struct mtk_eth *eth = mac->hw;
1316 struct mtk_tx_dma *desc = txd;
1317 u32 data;
1318
1319 WRITE_ONCE(desc->txd1, info->addr);
1320
1321 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1322 FIELD_PREP(TX_DMA_PQID, info->qid);
1323 if (info->last)
1324 data |= TX_DMA_LS0;
1325 WRITE_ONCE(desc->txd3, data);
1326
1327 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1328 if (info->first) {
1329 if (info->gso)
1330 data |= TX_DMA_TSO;
1331 /* tx checksum offload */
1332 if (info->csum)
1333 data |= TX_DMA_CHKSUM;
1334 /* vlan header offload */
1335 if (info->vlan)
1336 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1337 }
1338 WRITE_ONCE(desc->txd4, data);
1339 }
1340
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1341 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1342 struct mtk_tx_dma_desc_info *info)
1343 {
1344 struct mtk_mac *mac = netdev_priv(dev);
1345 struct mtk_tx_dma_v2 *desc = txd;
1346 struct mtk_eth *eth = mac->hw;
1347 u32 data;
1348
1349 WRITE_ONCE(desc->txd1, info->addr);
1350
1351 data = TX_DMA_PLEN0(info->size);
1352 if (info->last)
1353 data |= TX_DMA_LS0;
1354
1355 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1356 data |= TX_DMA_PREP_ADDR64(info->addr);
1357
1358 WRITE_ONCE(desc->txd3, data);
1359
1360 /* set forward port */
1361 switch (mac->id) {
1362 case MTK_GMAC1_ID:
1363 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1364 break;
1365 case MTK_GMAC2_ID:
1366 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1367 break;
1368 case MTK_GMAC3_ID:
1369 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1370 break;
1371 }
1372
1373 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1374 WRITE_ONCE(desc->txd4, data);
1375
1376 data = 0;
1377 if (info->first) {
1378 if (info->gso)
1379 data |= TX_DMA_TSO_V2;
1380 /* tx checksum offload */
1381 if (info->csum)
1382 data |= TX_DMA_CHKSUM_V2;
1383 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1384 data |= TX_DMA_SPTAG_V3;
1385 }
1386 WRITE_ONCE(desc->txd5, data);
1387
1388 data = 0;
1389 if (info->first && info->vlan)
1390 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1391 WRITE_ONCE(desc->txd6, data);
1392
1393 WRITE_ONCE(desc->txd7, 0);
1394 WRITE_ONCE(desc->txd8, 0);
1395 }
1396
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1397 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1398 struct mtk_tx_dma_desc_info *info)
1399 {
1400 struct mtk_mac *mac = netdev_priv(dev);
1401 struct mtk_eth *eth = mac->hw;
1402
1403 if (mtk_is_netsys_v2_or_greater(eth))
1404 mtk_tx_set_dma_desc_v2(dev, txd, info);
1405 else
1406 mtk_tx_set_dma_desc_v1(dev, txd, info);
1407 }
1408
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1409 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1410 int tx_num, struct mtk_tx_ring *ring, bool gso)
1411 {
1412 struct mtk_tx_dma_desc_info txd_info = {
1413 .size = skb_headlen(skb),
1414 .gso = gso,
1415 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1416 .vlan = skb_vlan_tag_present(skb),
1417 .qid = skb_get_queue_mapping(skb),
1418 .vlan_tci = skb_vlan_tag_get(skb),
1419 .first = true,
1420 .last = !skb_is_nonlinear(skb),
1421 };
1422 struct netdev_queue *txq;
1423 struct mtk_mac *mac = netdev_priv(dev);
1424 struct mtk_eth *eth = mac->hw;
1425 const struct mtk_soc_data *soc = eth->soc;
1426 struct mtk_tx_dma *itxd, *txd;
1427 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1428 struct mtk_tx_buf *itx_buf, *tx_buf;
1429 int i, n_desc = 1;
1430 int queue = skb_get_queue_mapping(skb);
1431 int k = 0;
1432
1433 txq = netdev_get_tx_queue(dev, queue);
1434 itxd = ring->next_free;
1435 itxd_pdma = qdma_to_pdma(ring, itxd);
1436 if (itxd == ring->last_free)
1437 return -ENOMEM;
1438
1439 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1440 memset(itx_buf, 0, sizeof(*itx_buf));
1441
1442 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1443 DMA_TO_DEVICE);
1444 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1445 return -ENOMEM;
1446
1447 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1448
1449 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1450 itx_buf->mac_id = mac->id;
1451 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1452 k++);
1453
1454 /* TX SG offload */
1455 txd = itxd;
1456 txd_pdma = qdma_to_pdma(ring, txd);
1457
1458 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1459 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1460 unsigned int offset = 0;
1461 int frag_size = skb_frag_size(frag);
1462
1463 while (frag_size) {
1464 bool new_desc = true;
1465
1466 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1467 (i & 0x1)) {
1468 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1469 txd_pdma = qdma_to_pdma(ring, txd);
1470 if (txd == ring->last_free)
1471 goto err_dma;
1472
1473 n_desc++;
1474 } else {
1475 new_desc = false;
1476 }
1477
1478 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1479 txd_info.size = min_t(unsigned int, frag_size,
1480 soc->tx.dma_max_len);
1481 txd_info.qid = queue;
1482 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1483 !(frag_size - txd_info.size);
1484 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1485 offset, txd_info.size,
1486 DMA_TO_DEVICE);
1487 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1488 goto err_dma;
1489
1490 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1491
1492 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1493 soc->tx.desc_size);
1494 if (new_desc)
1495 memset(tx_buf, 0, sizeof(*tx_buf));
1496 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1497 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1498 tx_buf->mac_id = mac->id;
1499
1500 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1501 txd_info.size, k++);
1502
1503 frag_size -= txd_info.size;
1504 offset += txd_info.size;
1505 }
1506 }
1507
1508 /* store skb to cleanup */
1509 itx_buf->type = MTK_TYPE_SKB;
1510 itx_buf->data = skb;
1511
1512 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1513 if (k & 0x1)
1514 txd_pdma->txd2 |= TX_DMA_LS0;
1515 else
1516 txd_pdma->txd2 |= TX_DMA_LS1;
1517 }
1518
1519 netdev_tx_sent_queue(txq, skb->len);
1520 skb_tx_timestamp(skb);
1521
1522 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1523 atomic_sub(n_desc, &ring->free_count);
1524
1525 /* make sure that all changes to the dma ring are flushed before we
1526 * continue
1527 */
1528 wmb();
1529
1530 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1531 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1532 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1533 } else {
1534 int next_idx;
1535
1536 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1537 ring->dma_size);
1538 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1539 }
1540
1541 return 0;
1542
1543 err_dma:
1544 do {
1545 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1546
1547 /* unmap dma */
1548 mtk_tx_unmap(eth, tx_buf, NULL, false);
1549
1550 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1551 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1552 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1553
1554 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1555 itxd_pdma = qdma_to_pdma(ring, itxd);
1556 } while (itxd != txd);
1557
1558 return -ENOMEM;
1559 }
1560
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1561 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1562 {
1563 int i, nfrags = 1;
1564 skb_frag_t *frag;
1565
1566 if (skb_is_gso(skb)) {
1567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1568 frag = &skb_shinfo(skb)->frags[i];
1569 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1570 eth->soc->tx.dma_max_len);
1571 }
1572 } else {
1573 nfrags += skb_shinfo(skb)->nr_frags;
1574 }
1575
1576 return nfrags;
1577 }
1578
mtk_queue_stopped(struct mtk_eth * eth)1579 static int mtk_queue_stopped(struct mtk_eth *eth)
1580 {
1581 int i;
1582
1583 for (i = 0; i < MTK_MAX_DEVS; i++) {
1584 if (!eth->netdev[i])
1585 continue;
1586 if (netif_queue_stopped(eth->netdev[i]))
1587 return 1;
1588 }
1589
1590 return 0;
1591 }
1592
mtk_wake_queue(struct mtk_eth * eth)1593 static void mtk_wake_queue(struct mtk_eth *eth)
1594 {
1595 int i;
1596
1597 for (i = 0; i < MTK_MAX_DEVS; i++) {
1598 if (!eth->netdev[i])
1599 continue;
1600 netif_tx_wake_all_queues(eth->netdev[i]);
1601 }
1602 }
1603
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1604 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1605 {
1606 struct mtk_mac *mac = netdev_priv(dev);
1607 struct mtk_eth *eth = mac->hw;
1608 struct mtk_tx_ring *ring = ð->tx_ring;
1609 struct net_device_stats *stats = &dev->stats;
1610 bool gso = false;
1611 int tx_num;
1612
1613 /* normally we can rely on the stack not calling this more than once,
1614 * however we have 2 queues running on the same ring so we need to lock
1615 * the ring access
1616 */
1617 spin_lock(ð->page_lock);
1618
1619 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1620 goto drop;
1621
1622 tx_num = mtk_cal_txd_req(eth, skb);
1623 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1624 netif_tx_stop_all_queues(dev);
1625 netif_err(eth, tx_queued, dev,
1626 "Tx Ring full when queue awake!\n");
1627 spin_unlock(ð->page_lock);
1628 return NETDEV_TX_BUSY;
1629 }
1630
1631 /* TSO: fill MSS info in tcp checksum field */
1632 if (skb_is_gso(skb)) {
1633 if (skb_cow_head(skb, 0)) {
1634 netif_warn(eth, tx_err, dev,
1635 "GSO expand head fail.\n");
1636 goto drop;
1637 }
1638
1639 if (skb_shinfo(skb)->gso_type &
1640 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1641 gso = true;
1642 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1643 }
1644 }
1645
1646 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1647 goto drop;
1648
1649 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1650 netif_tx_stop_all_queues(dev);
1651
1652 spin_unlock(ð->page_lock);
1653
1654 return NETDEV_TX_OK;
1655
1656 drop:
1657 spin_unlock(ð->page_lock);
1658 stats->tx_dropped++;
1659 dev_kfree_skb_any(skb);
1660 return NETDEV_TX_OK;
1661 }
1662
mtk_get_rx_ring(struct mtk_eth * eth)1663 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1664 {
1665 int i;
1666 struct mtk_rx_ring *ring;
1667 int idx;
1668
1669 if (!eth->hwlro)
1670 return ð->rx_ring[0];
1671
1672 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1673 struct mtk_rx_dma *rxd;
1674
1675 ring = ð->rx_ring[i];
1676 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1677 rxd = ring->dma + idx * eth->soc->rx.desc_size;
1678 if (rxd->rxd2 & RX_DMA_DONE) {
1679 ring->calc_idx_update = true;
1680 return ring;
1681 }
1682 }
1683
1684 return NULL;
1685 }
1686
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1687 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1688 {
1689 struct mtk_rx_ring *ring;
1690 int i;
1691
1692 if (!eth->hwlro) {
1693 ring = ð->rx_ring[0];
1694 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1695 } else {
1696 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1697 ring = ð->rx_ring[i];
1698 if (ring->calc_idx_update) {
1699 ring->calc_idx_update = false;
1700 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1701 }
1702 }
1703 }
1704 }
1705
mtk_page_pool_enabled(struct mtk_eth * eth)1706 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1707 {
1708 return mtk_is_netsys_v2_or_greater(eth);
1709 }
1710
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1711 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1712 struct xdp_rxq_info *xdp_q,
1713 int id, int size)
1714 {
1715 struct page_pool_params pp_params = {
1716 .order = 0,
1717 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1718 .pool_size = size,
1719 .nid = NUMA_NO_NODE,
1720 .dev = eth->dma_dev,
1721 .offset = MTK_PP_HEADROOM,
1722 .max_len = MTK_PP_MAX_BUF_SIZE,
1723 };
1724 struct page_pool *pp;
1725 int err;
1726
1727 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1728 : DMA_FROM_DEVICE;
1729 pp = page_pool_create(&pp_params);
1730 if (IS_ERR(pp))
1731 return pp;
1732
1733 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1734 eth->rx_napi.napi_id, PAGE_SIZE);
1735 if (err < 0)
1736 goto err_free_pp;
1737
1738 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1739 if (err)
1740 goto err_unregister_rxq;
1741
1742 return pp;
1743
1744 err_unregister_rxq:
1745 xdp_rxq_info_unreg(xdp_q);
1746 err_free_pp:
1747 page_pool_destroy(pp);
1748
1749 return ERR_PTR(err);
1750 }
1751
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1752 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1753 gfp_t gfp_mask)
1754 {
1755 struct page *page;
1756
1757 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1758 if (!page)
1759 return NULL;
1760
1761 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1762 return page_address(page);
1763 }
1764
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1765 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1766 {
1767 if (ring->page_pool)
1768 page_pool_put_full_page(ring->page_pool,
1769 virt_to_head_page(data), napi);
1770 else
1771 skb_free_frag(data);
1772 }
1773
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1774 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1775 struct mtk_tx_dma_desc_info *txd_info,
1776 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1777 void *data, u16 headroom, int index, bool dma_map)
1778 {
1779 struct mtk_tx_ring *ring = ð->tx_ring;
1780 struct mtk_mac *mac = netdev_priv(dev);
1781 struct mtk_tx_dma *txd_pdma;
1782
1783 if (dma_map) { /* ndo_xdp_xmit */
1784 txd_info->addr = dma_map_single(eth->dma_dev, data,
1785 txd_info->size, DMA_TO_DEVICE);
1786 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1787 return -ENOMEM;
1788
1789 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1790 } else {
1791 struct page *page = virt_to_head_page(data);
1792
1793 txd_info->addr = page_pool_get_dma_addr(page) +
1794 sizeof(struct xdp_frame) + headroom;
1795 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1796 txd_info->size, DMA_BIDIRECTIONAL);
1797 }
1798 mtk_tx_set_dma_desc(dev, txd, txd_info);
1799
1800 tx_buf->mac_id = mac->id;
1801 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1802 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1803
1804 txd_pdma = qdma_to_pdma(ring, txd);
1805 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1806 index);
1807
1808 return 0;
1809 }
1810
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1811 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1812 struct net_device *dev, bool dma_map)
1813 {
1814 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1815 const struct mtk_soc_data *soc = eth->soc;
1816 struct mtk_tx_ring *ring = ð->tx_ring;
1817 struct mtk_mac *mac = netdev_priv(dev);
1818 struct mtk_tx_dma_desc_info txd_info = {
1819 .size = xdpf->len,
1820 .first = true,
1821 .last = !xdp_frame_has_frags(xdpf),
1822 .qid = mac->id,
1823 };
1824 int err, index = 0, n_desc = 1, nr_frags;
1825 struct mtk_tx_buf *htx_buf, *tx_buf;
1826 struct mtk_tx_dma *htxd, *txd;
1827 void *data = xdpf->data;
1828
1829 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1830 return -EBUSY;
1831
1832 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1833 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1834 return -EBUSY;
1835
1836 spin_lock(ð->page_lock);
1837
1838 txd = ring->next_free;
1839 if (txd == ring->last_free) {
1840 spin_unlock(ð->page_lock);
1841 return -ENOMEM;
1842 }
1843 htxd = txd;
1844
1845 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1846 memset(tx_buf, 0, sizeof(*tx_buf));
1847 htx_buf = tx_buf;
1848
1849 for (;;) {
1850 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1851 data, xdpf->headroom, index, dma_map);
1852 if (err < 0)
1853 goto unmap;
1854
1855 if (txd_info.last)
1856 break;
1857
1858 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1859 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1860 if (txd == ring->last_free)
1861 goto unmap;
1862
1863 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1864 soc->tx.desc_size);
1865 memset(tx_buf, 0, sizeof(*tx_buf));
1866 n_desc++;
1867 }
1868
1869 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1870 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1871 txd_info.last = index + 1 == nr_frags;
1872 txd_info.qid = mac->id;
1873 data = skb_frag_address(&sinfo->frags[index]);
1874
1875 index++;
1876 }
1877 /* store xdpf for cleanup */
1878 htx_buf->data = xdpf;
1879
1880 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1881 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1882
1883 if (index & 1)
1884 txd_pdma->txd2 |= TX_DMA_LS0;
1885 else
1886 txd_pdma->txd2 |= TX_DMA_LS1;
1887 }
1888
1889 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1890 atomic_sub(n_desc, &ring->free_count);
1891
1892 /* make sure that all changes to the dma ring are flushed before we
1893 * continue
1894 */
1895 wmb();
1896
1897 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1898 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1899 } else {
1900 int idx;
1901
1902 idx = txd_to_idx(ring, txd, soc->tx.desc_size);
1903 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1904 MT7628_TX_CTX_IDX0);
1905 }
1906
1907 spin_unlock(ð->page_lock);
1908
1909 return 0;
1910
1911 unmap:
1912 while (htxd != txd) {
1913 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
1914 mtk_tx_unmap(eth, tx_buf, NULL, false);
1915
1916 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1917 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1918 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1919
1920 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1921 }
1922
1923 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1924 }
1925
1926 spin_unlock(ð->page_lock);
1927
1928 return err;
1929 }
1930
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)1931 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1932 struct xdp_frame **frames, u32 flags)
1933 {
1934 struct mtk_mac *mac = netdev_priv(dev);
1935 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1936 struct mtk_eth *eth = mac->hw;
1937 int i, nxmit = 0;
1938
1939 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1940 return -EINVAL;
1941
1942 for (i = 0; i < num_frame; i++) {
1943 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1944 break;
1945 nxmit++;
1946 }
1947
1948 u64_stats_update_begin(&hw_stats->syncp);
1949 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1950 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1951 u64_stats_update_end(&hw_stats->syncp);
1952
1953 return nxmit;
1954 }
1955
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)1956 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1957 struct xdp_buff *xdp, struct net_device *dev)
1958 {
1959 struct mtk_mac *mac = netdev_priv(dev);
1960 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1961 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1962 struct bpf_prog *prog;
1963 u32 act = XDP_PASS;
1964
1965 rcu_read_lock();
1966
1967 prog = rcu_dereference(eth->prog);
1968 if (!prog)
1969 goto out;
1970
1971 act = bpf_prog_run_xdp(prog, xdp);
1972 switch (act) {
1973 case XDP_PASS:
1974 count = &hw_stats->xdp_stats.rx_xdp_pass;
1975 goto update_stats;
1976 case XDP_REDIRECT:
1977 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1978 act = XDP_DROP;
1979 break;
1980 }
1981
1982 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1983 goto update_stats;
1984 case XDP_TX: {
1985 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1986
1987 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1988 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1989 act = XDP_DROP;
1990 break;
1991 }
1992
1993 count = &hw_stats->xdp_stats.rx_xdp_tx;
1994 goto update_stats;
1995 }
1996 default:
1997 bpf_warn_invalid_xdp_action(dev, prog, act);
1998 fallthrough;
1999 case XDP_ABORTED:
2000 trace_xdp_exception(dev, prog, act);
2001 fallthrough;
2002 case XDP_DROP:
2003 break;
2004 }
2005
2006 page_pool_put_full_page(ring->page_pool,
2007 virt_to_head_page(xdp->data), true);
2008
2009 update_stats:
2010 u64_stats_update_begin(&hw_stats->syncp);
2011 *count = *count + 1;
2012 u64_stats_update_end(&hw_stats->syncp);
2013 out:
2014 rcu_read_unlock();
2015
2016 return act;
2017 }
2018
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2019 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2020 struct mtk_eth *eth)
2021 {
2022 struct dim_sample dim_sample = {};
2023 struct mtk_rx_ring *ring;
2024 bool xdp_flush = false;
2025 int idx;
2026 struct sk_buff *skb;
2027 u64 addr64 = 0;
2028 u8 *data, *new_data;
2029 struct mtk_rx_dma_v2 *rxd, trxd;
2030 int done = 0, bytes = 0;
2031 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2032 int ppe_idx = 0;
2033
2034 while (done < budget) {
2035 unsigned int pktlen, *rxdcsum;
2036 struct net_device *netdev;
2037 u32 hash, reason;
2038 int mac = 0;
2039
2040 ring = mtk_get_rx_ring(eth);
2041 if (unlikely(!ring))
2042 goto rx_done;
2043
2044 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2045 rxd = ring->dma + idx * eth->soc->rx.desc_size;
2046 data = ring->data[idx];
2047
2048 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2049 break;
2050
2051 /* find out which mac the packet come from. values start at 1 */
2052 if (mtk_is_netsys_v3_or_greater(eth)) {
2053 u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2054
2055 switch (val) {
2056 case PSE_GDM1_PORT:
2057 case PSE_GDM2_PORT:
2058 mac = val - 1;
2059 break;
2060 case PSE_GDM3_PORT:
2061 mac = MTK_GMAC3_ID;
2062 break;
2063 default:
2064 break;
2065 }
2066 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2067 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2068 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2069 }
2070
2071 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2072 !eth->netdev[mac]))
2073 goto release_desc;
2074
2075 netdev = eth->netdev[mac];
2076 ppe_idx = eth->mac[mac]->ppe_idx;
2077
2078 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2079 goto release_desc;
2080
2081 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2082
2083 /* alloc new buffer */
2084 if (ring->page_pool) {
2085 struct page *page = virt_to_head_page(data);
2086 struct xdp_buff xdp;
2087 u32 ret;
2088
2089 new_data = mtk_page_pool_get_buff(ring->page_pool,
2090 &dma_addr,
2091 GFP_ATOMIC);
2092 if (unlikely(!new_data)) {
2093 netdev->stats.rx_dropped++;
2094 goto release_desc;
2095 }
2096
2097 dma_sync_single_for_cpu(eth->dma_dev,
2098 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2099 pktlen, page_pool_get_dma_dir(ring->page_pool));
2100
2101 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2102 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2103 false);
2104 xdp_buff_clear_frags_flag(&xdp);
2105
2106 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2107 if (ret == XDP_REDIRECT)
2108 xdp_flush = true;
2109
2110 if (ret != XDP_PASS)
2111 goto skip_rx;
2112
2113 skb = build_skb(data, PAGE_SIZE);
2114 if (unlikely(!skb)) {
2115 page_pool_put_full_page(ring->page_pool,
2116 page, true);
2117 netdev->stats.rx_dropped++;
2118 goto skip_rx;
2119 }
2120
2121 skb_reserve(skb, xdp.data - xdp.data_hard_start);
2122 skb_put(skb, xdp.data_end - xdp.data);
2123 skb_mark_for_recycle(skb);
2124 } else {
2125 if (ring->frag_size <= PAGE_SIZE)
2126 new_data = napi_alloc_frag(ring->frag_size);
2127 else
2128 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2129
2130 if (unlikely(!new_data)) {
2131 netdev->stats.rx_dropped++;
2132 goto release_desc;
2133 }
2134
2135 dma_addr = dma_map_single(eth->dma_dev,
2136 new_data + NET_SKB_PAD + eth->ip_align,
2137 ring->buf_size, DMA_FROM_DEVICE);
2138 if (unlikely(dma_mapping_error(eth->dma_dev,
2139 dma_addr))) {
2140 skb_free_frag(new_data);
2141 netdev->stats.rx_dropped++;
2142 goto release_desc;
2143 }
2144
2145 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2146 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2147
2148 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2149 ring->buf_size, DMA_FROM_DEVICE);
2150
2151 skb = build_skb(data, ring->frag_size);
2152 if (unlikely(!skb)) {
2153 netdev->stats.rx_dropped++;
2154 skb_free_frag(data);
2155 goto skip_rx;
2156 }
2157
2158 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2159 skb_put(skb, pktlen);
2160 }
2161
2162 skb->dev = netdev;
2163 bytes += skb->len;
2164
2165 if (mtk_is_netsys_v3_or_greater(eth)) {
2166 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2167 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2168 if (hash != MTK_RXD5_FOE_ENTRY)
2169 skb_set_hash(skb, jhash_1word(hash, 0),
2170 PKT_HASH_TYPE_L4);
2171 rxdcsum = &trxd.rxd3;
2172 } else {
2173 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2174 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2175 if (hash != MTK_RXD4_FOE_ENTRY)
2176 skb_set_hash(skb, jhash_1word(hash, 0),
2177 PKT_HASH_TYPE_L4);
2178 rxdcsum = &trxd.rxd4;
2179 }
2180
2181 if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2182 skb->ip_summed = CHECKSUM_UNNECESSARY;
2183 else
2184 skb_checksum_none_assert(skb);
2185 skb->protocol = eth_type_trans(skb, netdev);
2186
2187 /* When using VLAN untagging in combination with DSA, the
2188 * hardware treats the MTK special tag as a VLAN and untags it.
2189 */
2190 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2191 netdev_uses_dsa(netdev)) {
2192 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2193
2194 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2195 eth->dsa_meta[port])
2196 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2197 }
2198
2199 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2200 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2201
2202 skb_record_rx_queue(skb, 0);
2203 napi_gro_receive(napi, skb);
2204
2205 skip_rx:
2206 ring->data[idx] = new_data;
2207 rxd->rxd1 = (unsigned int)dma_addr;
2208 release_desc:
2209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2210 rxd->rxd2 = RX_DMA_LSO;
2211 else
2212 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2213
2214 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2215 likely(dma_addr != DMA_MAPPING_ERROR))
2216 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2217
2218 ring->calc_idx = idx;
2219 done++;
2220 }
2221
2222 rx_done:
2223 if (done) {
2224 /* make sure that all changes to the dma ring are flushed before
2225 * we continue
2226 */
2227 wmb();
2228 mtk_update_rx_cpu_idx(eth);
2229 }
2230
2231 eth->rx_packets += done;
2232 eth->rx_bytes += bytes;
2233 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2234 &dim_sample);
2235 net_dim(ð->rx_dim, &dim_sample);
2236
2237 if (xdp_flush)
2238 xdp_do_flush();
2239
2240 return done;
2241 }
2242
2243 struct mtk_poll_state {
2244 struct netdev_queue *txq;
2245 unsigned int total;
2246 unsigned int done;
2247 unsigned int bytes;
2248 };
2249
2250 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2251 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2252 struct sk_buff *skb)
2253 {
2254 struct netdev_queue *txq;
2255 struct net_device *dev;
2256 unsigned int bytes = skb->len;
2257
2258 state->total++;
2259 eth->tx_packets++;
2260 eth->tx_bytes += bytes;
2261
2262 dev = eth->netdev[mac];
2263 if (!dev)
2264 return;
2265
2266 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2267 if (state->txq == txq) {
2268 state->done++;
2269 state->bytes += bytes;
2270 return;
2271 }
2272
2273 if (state->txq)
2274 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2275
2276 state->txq = txq;
2277 state->done = 1;
2278 state->bytes = bytes;
2279 }
2280
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2281 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2282 struct mtk_poll_state *state)
2283 {
2284 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2285 struct mtk_tx_ring *ring = ð->tx_ring;
2286 struct mtk_tx_buf *tx_buf;
2287 struct xdp_frame_bulk bq;
2288 struct mtk_tx_dma *desc;
2289 u32 cpu, dma;
2290
2291 cpu = ring->last_free_ptr;
2292 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2293
2294 desc = mtk_qdma_phys_to_virt(ring, cpu);
2295 xdp_frame_bulk_init(&bq);
2296
2297 while ((cpu != dma) && budget) {
2298 u32 next_cpu = desc->txd2;
2299
2300 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2301 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2302 break;
2303
2304 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2305 eth->soc->tx.desc_size);
2306 if (!tx_buf->data)
2307 break;
2308
2309 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2310 if (tx_buf->type == MTK_TYPE_SKB)
2311 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2312 tx_buf->data);
2313
2314 budget--;
2315 }
2316 mtk_tx_unmap(eth, tx_buf, &bq, true);
2317
2318 ring->last_free = desc;
2319 atomic_inc(&ring->free_count);
2320
2321 cpu = next_cpu;
2322 }
2323 xdp_flush_frame_bulk(&bq);
2324
2325 ring->last_free_ptr = cpu;
2326 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2327
2328 return budget;
2329 }
2330
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2331 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2332 struct mtk_poll_state *state)
2333 {
2334 struct mtk_tx_ring *ring = ð->tx_ring;
2335 struct mtk_tx_buf *tx_buf;
2336 struct xdp_frame_bulk bq;
2337 struct mtk_tx_dma *desc;
2338 u32 cpu, dma;
2339
2340 cpu = ring->cpu_idx;
2341 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2342 xdp_frame_bulk_init(&bq);
2343
2344 while ((cpu != dma) && budget) {
2345 tx_buf = &ring->buf[cpu];
2346 if (!tx_buf->data)
2347 break;
2348
2349 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2350 if (tx_buf->type == MTK_TYPE_SKB)
2351 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2352 budget--;
2353 }
2354 mtk_tx_unmap(eth, tx_buf, &bq, true);
2355
2356 desc = ring->dma + cpu * eth->soc->tx.desc_size;
2357 ring->last_free = desc;
2358 atomic_inc(&ring->free_count);
2359
2360 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2361 }
2362 xdp_flush_frame_bulk(&bq);
2363
2364 ring->cpu_idx = cpu;
2365
2366 return budget;
2367 }
2368
mtk_poll_tx(struct mtk_eth * eth,int budget)2369 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2370 {
2371 struct mtk_tx_ring *ring = ð->tx_ring;
2372 struct dim_sample dim_sample = {};
2373 struct mtk_poll_state state = {};
2374
2375 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2376 budget = mtk_poll_tx_qdma(eth, budget, &state);
2377 else
2378 budget = mtk_poll_tx_pdma(eth, budget, &state);
2379
2380 if (state.txq)
2381 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2382
2383 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2384 &dim_sample);
2385 net_dim(ð->tx_dim, &dim_sample);
2386
2387 if (mtk_queue_stopped(eth) &&
2388 (atomic_read(&ring->free_count) > ring->thresh))
2389 mtk_wake_queue(eth);
2390
2391 return state.total;
2392 }
2393
mtk_handle_status_irq(struct mtk_eth * eth)2394 static void mtk_handle_status_irq(struct mtk_eth *eth)
2395 {
2396 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2397
2398 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2399 mtk_stats_update(eth);
2400 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2401 MTK_INT_STATUS2);
2402 }
2403 }
2404
mtk_napi_tx(struct napi_struct * napi,int budget)2405 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2406 {
2407 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2408 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2409 int tx_done = 0;
2410
2411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2412 mtk_handle_status_irq(eth);
2413 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2414 tx_done = mtk_poll_tx(eth, budget);
2415
2416 if (unlikely(netif_msg_intr(eth))) {
2417 dev_info(eth->dev,
2418 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2419 mtk_r32(eth, reg_map->tx_irq_status),
2420 mtk_r32(eth, reg_map->tx_irq_mask));
2421 }
2422
2423 if (tx_done == budget)
2424 return budget;
2425
2426 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2427 return budget;
2428
2429 if (napi_complete_done(napi, tx_done))
2430 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2431
2432 return tx_done;
2433 }
2434
mtk_napi_rx(struct napi_struct * napi,int budget)2435 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2436 {
2437 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2438 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2439 int rx_done_total = 0;
2440
2441 mtk_handle_status_irq(eth);
2442
2443 do {
2444 int rx_done;
2445
2446 mtk_w32(eth, eth->soc->rx.irq_done_mask,
2447 reg_map->pdma.irq_status);
2448 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2449 rx_done_total += rx_done;
2450
2451 if (unlikely(netif_msg_intr(eth))) {
2452 dev_info(eth->dev,
2453 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2454 mtk_r32(eth, reg_map->pdma.irq_status),
2455 mtk_r32(eth, reg_map->pdma.irq_mask));
2456 }
2457
2458 if (rx_done_total == budget)
2459 return budget;
2460
2461 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2462 eth->soc->rx.irq_done_mask);
2463
2464 if (napi_complete_done(napi, rx_done_total))
2465 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2466
2467 return rx_done_total;
2468 }
2469
mtk_tx_alloc(struct mtk_eth * eth)2470 static int mtk_tx_alloc(struct mtk_eth *eth)
2471 {
2472 const struct mtk_soc_data *soc = eth->soc;
2473 struct mtk_tx_ring *ring = ð->tx_ring;
2474 int i, sz = soc->tx.desc_size;
2475 struct mtk_tx_dma_v2 *txd;
2476 int ring_size;
2477 u32 ofs, val;
2478
2479 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2480 ring_size = MTK_QDMA_RING_SIZE;
2481 else
2482 ring_size = soc->tx.dma_size;
2483
2484 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2485 GFP_KERNEL);
2486 if (!ring->buf)
2487 goto no_tx_mem;
2488
2489 if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2490 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
2491 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
2492 } else {
2493 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2494 &ring->phys, GFP_KERNEL);
2495 }
2496
2497 if (!ring->dma)
2498 goto no_tx_mem;
2499
2500 for (i = 0; i < ring_size; i++) {
2501 int next = (i + 1) % ring_size;
2502 u32 next_ptr = ring->phys + next * sz;
2503
2504 txd = ring->dma + i * sz;
2505 txd->txd2 = next_ptr;
2506 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2507 txd->txd4 = 0;
2508 if (mtk_is_netsys_v2_or_greater(eth)) {
2509 txd->txd5 = 0;
2510 txd->txd6 = 0;
2511 txd->txd7 = 0;
2512 txd->txd8 = 0;
2513 }
2514 }
2515
2516 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2517 * only as the framework. The real HW descriptors are the PDMA
2518 * descriptors in ring->dma_pdma.
2519 */
2520 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2521 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2522 &ring->phys_pdma, GFP_KERNEL);
2523 if (!ring->dma_pdma)
2524 goto no_tx_mem;
2525
2526 for (i = 0; i < ring_size; i++) {
2527 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2528 ring->dma_pdma[i].txd4 = 0;
2529 }
2530 }
2531
2532 ring->dma_size = ring_size;
2533 atomic_set(&ring->free_count, ring_size - 2);
2534 ring->next_free = ring->dma;
2535 ring->last_free = (void *)txd;
2536 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2537 ring->thresh = MAX_SKB_FRAGS;
2538
2539 /* make sure that all changes to the dma ring are flushed before we
2540 * continue
2541 */
2542 wmb();
2543
2544 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2545 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2546 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2547 mtk_w32(eth,
2548 ring->phys + ((ring_size - 1) * sz),
2549 soc->reg_map->qdma.crx_ptr);
2550 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2551
2552 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2553 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2554 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2555
2556 val = MTK_QTX_SCH_MIN_RATE_EN |
2557 /* minimum: 10 Mbps */
2558 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2559 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2560 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2561 if (mtk_is_netsys_v1(eth))
2562 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2563 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2564 ofs += MTK_QTX_OFFSET;
2565 }
2566 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2567 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2568 if (mtk_is_netsys_v2_or_greater(eth))
2569 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2570 } else {
2571 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2572 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2573 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2574 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2575 }
2576
2577 return 0;
2578
2579 no_tx_mem:
2580 return -ENOMEM;
2581 }
2582
mtk_tx_clean(struct mtk_eth * eth)2583 static void mtk_tx_clean(struct mtk_eth *eth)
2584 {
2585 const struct mtk_soc_data *soc = eth->soc;
2586 struct mtk_tx_ring *ring = ð->tx_ring;
2587 int i;
2588
2589 if (ring->buf) {
2590 for (i = 0; i < ring->dma_size; i++)
2591 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2592 kfree(ring->buf);
2593 ring->buf = NULL;
2594 }
2595 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2596 dma_free_coherent(eth->dma_dev,
2597 ring->dma_size * soc->tx.desc_size,
2598 ring->dma, ring->phys);
2599 ring->dma = NULL;
2600 }
2601
2602 if (ring->dma_pdma) {
2603 dma_free_coherent(eth->dma_dev,
2604 ring->dma_size * soc->tx.desc_size,
2605 ring->dma_pdma, ring->phys_pdma);
2606 ring->dma_pdma = NULL;
2607 }
2608 }
2609
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2610 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2611 {
2612 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2613 const struct mtk_soc_data *soc = eth->soc;
2614 struct mtk_rx_ring *ring;
2615 int rx_data_len, rx_dma_size, tx_ring_size;
2616 int i;
2617
2618 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2619 tx_ring_size = MTK_QDMA_RING_SIZE;
2620 else
2621 tx_ring_size = soc->tx.dma_size;
2622
2623 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2624 if (ring_no)
2625 return -EINVAL;
2626 ring = ð->rx_ring_qdma;
2627 } else {
2628 ring = ð->rx_ring[ring_no];
2629 }
2630
2631 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2632 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2633 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2634 } else {
2635 rx_data_len = ETH_DATA_LEN;
2636 rx_dma_size = soc->rx.dma_size;
2637 }
2638
2639 ring->frag_size = mtk_max_frag_size(rx_data_len);
2640 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2641 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2642 GFP_KERNEL);
2643 if (!ring->data)
2644 return -ENOMEM;
2645
2646 if (mtk_page_pool_enabled(eth)) {
2647 struct page_pool *pp;
2648
2649 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2650 rx_dma_size);
2651 if (IS_ERR(pp))
2652 return PTR_ERR(pp);
2653
2654 ring->page_pool = pp;
2655 }
2656
2657 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2658 rx_flag != MTK_RX_FLAGS_NORMAL) {
2659 ring->dma = dma_alloc_coherent(eth->dma_dev,
2660 rx_dma_size * eth->soc->rx.desc_size,
2661 &ring->phys, GFP_KERNEL);
2662 } else {
2663 struct mtk_tx_ring *tx_ring = ð->tx_ring;
2664
2665 ring->dma = tx_ring->dma + tx_ring_size *
2666 eth->soc->tx.desc_size * (ring_no + 1);
2667 ring->phys = tx_ring->phys + tx_ring_size *
2668 eth->soc->tx.desc_size * (ring_no + 1);
2669 }
2670
2671 if (!ring->dma)
2672 return -ENOMEM;
2673
2674 for (i = 0; i < rx_dma_size; i++) {
2675 struct mtk_rx_dma_v2 *rxd;
2676 dma_addr_t dma_addr;
2677 void *data;
2678
2679 rxd = ring->dma + i * eth->soc->rx.desc_size;
2680 if (ring->page_pool) {
2681 data = mtk_page_pool_get_buff(ring->page_pool,
2682 &dma_addr, GFP_KERNEL);
2683 if (!data)
2684 return -ENOMEM;
2685 } else {
2686 if (ring->frag_size <= PAGE_SIZE)
2687 data = netdev_alloc_frag(ring->frag_size);
2688 else
2689 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2690
2691 if (!data)
2692 return -ENOMEM;
2693
2694 dma_addr = dma_map_single(eth->dma_dev,
2695 data + NET_SKB_PAD + eth->ip_align,
2696 ring->buf_size, DMA_FROM_DEVICE);
2697 if (unlikely(dma_mapping_error(eth->dma_dev,
2698 dma_addr))) {
2699 skb_free_frag(data);
2700 return -ENOMEM;
2701 }
2702 }
2703 rxd->rxd1 = (unsigned int)dma_addr;
2704 ring->data[i] = data;
2705
2706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2707 rxd->rxd2 = RX_DMA_LSO;
2708 else
2709 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2710
2711 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2712 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2713
2714 rxd->rxd3 = 0;
2715 rxd->rxd4 = 0;
2716 if (mtk_is_netsys_v3_or_greater(eth)) {
2717 rxd->rxd5 = 0;
2718 rxd->rxd6 = 0;
2719 rxd->rxd7 = 0;
2720 rxd->rxd8 = 0;
2721 }
2722 }
2723
2724 ring->dma_size = rx_dma_size;
2725 ring->calc_idx_update = false;
2726 ring->calc_idx = rx_dma_size - 1;
2727 if (rx_flag == MTK_RX_FLAGS_QDMA)
2728 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2729 ring_no * MTK_QRX_OFFSET;
2730 else
2731 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2732 ring_no * MTK_QRX_OFFSET;
2733 /* make sure that all changes to the dma ring are flushed before we
2734 * continue
2735 */
2736 wmb();
2737
2738 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2739 mtk_w32(eth, ring->phys,
2740 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2741 mtk_w32(eth, rx_dma_size,
2742 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2743 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2744 reg_map->qdma.rst_idx);
2745 } else {
2746 mtk_w32(eth, ring->phys,
2747 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2748 mtk_w32(eth, rx_dma_size,
2749 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2750 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2751 reg_map->pdma.rst_idx);
2752 }
2753 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2754
2755 return 0;
2756 }
2757
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2758 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2759 {
2760 u64 addr64 = 0;
2761 int i;
2762
2763 if (ring->data && ring->dma) {
2764 for (i = 0; i < ring->dma_size; i++) {
2765 struct mtk_rx_dma *rxd;
2766
2767 if (!ring->data[i])
2768 continue;
2769
2770 rxd = ring->dma + i * eth->soc->rx.desc_size;
2771 if (!rxd->rxd1)
2772 continue;
2773
2774 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2775 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2776
2777 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2778 ring->buf_size, DMA_FROM_DEVICE);
2779 mtk_rx_put_buff(ring, ring->data[i], false);
2780 }
2781 kfree(ring->data);
2782 ring->data = NULL;
2783 }
2784
2785 if (!in_sram && ring->dma) {
2786 dma_free_coherent(eth->dma_dev,
2787 ring->dma_size * eth->soc->rx.desc_size,
2788 ring->dma, ring->phys);
2789 ring->dma = NULL;
2790 }
2791
2792 if (ring->page_pool) {
2793 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2794 xdp_rxq_info_unreg(&ring->xdp_q);
2795 page_pool_destroy(ring->page_pool);
2796 ring->page_pool = NULL;
2797 }
2798 }
2799
mtk_hwlro_rx_init(struct mtk_eth * eth)2800 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2801 {
2802 int i;
2803 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2804 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2805
2806 /* set LRO rings to auto-learn modes */
2807 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2808
2809 /* validate LRO ring */
2810 ring_ctrl_dw2 |= MTK_RING_VLD;
2811
2812 /* set AGE timer (unit: 20us) */
2813 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2814 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2815
2816 /* set max AGG timer (unit: 20us) */
2817 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2818
2819 /* set max LRO AGG count */
2820 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2821 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2822
2823 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2824 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2825 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2826 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2827 }
2828
2829 /* IPv4 checksum update enable */
2830 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2831
2832 /* switch priority comparison to packet count mode */
2833 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2834
2835 /* bandwidth threshold setting */
2836 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2837
2838 /* auto-learn score delta setting */
2839 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2840
2841 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2842 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2843 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2844
2845 /* set HW LRO mode & the max aggregation count for rx packets */
2846 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2847
2848 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2849 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2850
2851 /* enable HW LRO */
2852 lro_ctrl_dw0 |= MTK_LRO_EN;
2853
2854 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2855 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2856
2857 return 0;
2858 }
2859
mtk_hwlro_rx_uninit(struct mtk_eth * eth)2860 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2861 {
2862 int i;
2863 u32 val;
2864
2865 /* relinquish lro rings, flush aggregated packets */
2866 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2867
2868 /* wait for relinquishments done */
2869 for (i = 0; i < 10; i++) {
2870 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2871 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2872 msleep(20);
2873 continue;
2874 }
2875 break;
2876 }
2877
2878 /* invalidate lro rings */
2879 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2880 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2881
2882 /* disable HW LRO */
2883 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2884 }
2885
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)2886 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2887 {
2888 u32 reg_val;
2889
2890 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2891
2892 /* invalidate the IP setting */
2893 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2894
2895 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2896
2897 /* validate the IP setting */
2898 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2899 }
2900
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)2901 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2902 {
2903 u32 reg_val;
2904
2905 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2906
2907 /* invalidate the IP setting */
2908 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2909
2910 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2911 }
2912
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)2913 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2914 {
2915 int cnt = 0;
2916 int i;
2917
2918 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2919 if (mac->hwlro_ip[i])
2920 cnt++;
2921 }
2922
2923 return cnt;
2924 }
2925
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)2926 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2927 struct ethtool_rxnfc *cmd)
2928 {
2929 struct ethtool_rx_flow_spec *fsp =
2930 (struct ethtool_rx_flow_spec *)&cmd->fs;
2931 struct mtk_mac *mac = netdev_priv(dev);
2932 struct mtk_eth *eth = mac->hw;
2933 int hwlro_idx;
2934
2935 if ((fsp->flow_type != TCP_V4_FLOW) ||
2936 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2937 (fsp->location > 1))
2938 return -EINVAL;
2939
2940 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2941 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2942
2943 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2944
2945 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2946
2947 return 0;
2948 }
2949
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)2950 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2951 struct ethtool_rxnfc *cmd)
2952 {
2953 struct ethtool_rx_flow_spec *fsp =
2954 (struct ethtool_rx_flow_spec *)&cmd->fs;
2955 struct mtk_mac *mac = netdev_priv(dev);
2956 struct mtk_eth *eth = mac->hw;
2957 int hwlro_idx;
2958
2959 if (fsp->location > 1)
2960 return -EINVAL;
2961
2962 mac->hwlro_ip[fsp->location] = 0;
2963 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2964
2965 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2966
2967 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2968
2969 return 0;
2970 }
2971
mtk_hwlro_netdev_disable(struct net_device * dev)2972 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2973 {
2974 struct mtk_mac *mac = netdev_priv(dev);
2975 struct mtk_eth *eth = mac->hw;
2976 int i, hwlro_idx;
2977
2978 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2979 mac->hwlro_ip[i] = 0;
2980 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2981
2982 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2983 }
2984
2985 mac->hwlro_ip_cnt = 0;
2986 }
2987
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)2988 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2989 struct ethtool_rxnfc *cmd)
2990 {
2991 struct mtk_mac *mac = netdev_priv(dev);
2992 struct ethtool_rx_flow_spec *fsp =
2993 (struct ethtool_rx_flow_spec *)&cmd->fs;
2994
2995 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2996 return -EINVAL;
2997
2998 /* only tcp dst ipv4 is meaningful, others are meaningless */
2999 fsp->flow_type = TCP_V4_FLOW;
3000 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3001 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3002
3003 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3004 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3005 fsp->h_u.tcp_ip4_spec.psrc = 0;
3006 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3007 fsp->h_u.tcp_ip4_spec.pdst = 0;
3008 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3009 fsp->h_u.tcp_ip4_spec.tos = 0;
3010 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3011
3012 return 0;
3013 }
3014
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3015 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3016 struct ethtool_rxnfc *cmd,
3017 u32 *rule_locs)
3018 {
3019 struct mtk_mac *mac = netdev_priv(dev);
3020 int cnt = 0;
3021 int i;
3022
3023 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3024 if (cnt == cmd->rule_cnt)
3025 return -EMSGSIZE;
3026
3027 if (mac->hwlro_ip[i]) {
3028 rule_locs[cnt] = i;
3029 cnt++;
3030 }
3031 }
3032
3033 cmd->rule_cnt = cnt;
3034
3035 return 0;
3036 }
3037
mtk_fix_features(struct net_device * dev,netdev_features_t features)3038 static netdev_features_t mtk_fix_features(struct net_device *dev,
3039 netdev_features_t features)
3040 {
3041 if (!(features & NETIF_F_LRO)) {
3042 struct mtk_mac *mac = netdev_priv(dev);
3043 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3044
3045 if (ip_cnt) {
3046 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3047
3048 features |= NETIF_F_LRO;
3049 }
3050 }
3051
3052 return features;
3053 }
3054
mtk_set_features(struct net_device * dev,netdev_features_t features)3055 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3056 {
3057 netdev_features_t diff = dev->features ^ features;
3058
3059 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3060 mtk_hwlro_netdev_disable(dev);
3061
3062 return 0;
3063 }
3064
3065 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3066 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3067 {
3068 unsigned int reg;
3069 int ret;
3070 u32 val;
3071
3072 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3073 reg = eth->soc->reg_map->qdma.glo_cfg;
3074 else
3075 reg = eth->soc->reg_map->pdma.glo_cfg;
3076
3077 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3078 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3079 5, MTK_DMA_BUSY_TIMEOUT_US);
3080 if (ret)
3081 dev_err(eth->dev, "DMA init timeout\n");
3082
3083 return ret;
3084 }
3085
mtk_dma_init(struct mtk_eth * eth)3086 static int mtk_dma_init(struct mtk_eth *eth)
3087 {
3088 int err;
3089 u32 i;
3090
3091 if (mtk_dma_busy_wait(eth))
3092 return -EBUSY;
3093
3094 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3095 /* QDMA needs scratch memory for internal reordering of the
3096 * descriptors
3097 */
3098 err = mtk_init_fq_dma(eth);
3099 if (err)
3100 return err;
3101 }
3102
3103 err = mtk_tx_alloc(eth);
3104 if (err)
3105 return err;
3106
3107 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3108 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3109 if (err)
3110 return err;
3111 }
3112
3113 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3114 if (err)
3115 return err;
3116
3117 if (eth->hwlro) {
3118 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3119 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3120 if (err)
3121 return err;
3122 }
3123 err = mtk_hwlro_rx_init(eth);
3124 if (err)
3125 return err;
3126 }
3127
3128 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3129 /* Enable random early drop and set drop threshold
3130 * automatically
3131 */
3132 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3133 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3134 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3135 }
3136
3137 return 0;
3138 }
3139
mtk_dma_free(struct mtk_eth * eth)3140 static void mtk_dma_free(struct mtk_eth *eth)
3141 {
3142 const struct mtk_soc_data *soc = eth->soc;
3143 int i;
3144
3145 for (i = 0; i < MTK_MAX_DEVS; i++)
3146 if (eth->netdev[i])
3147 netdev_reset_queue(eth->netdev[i]);
3148 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3149 dma_free_coherent(eth->dma_dev,
3150 MTK_QDMA_RING_SIZE * soc->tx.desc_size,
3151 eth->scratch_ring, eth->phy_scratch_ring);
3152 eth->scratch_ring = NULL;
3153 eth->phy_scratch_ring = 0;
3154 }
3155 mtk_tx_clean(eth);
3156 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3157 mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3158
3159 if (eth->hwlro) {
3160 mtk_hwlro_rx_uninit(eth);
3161 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3162 mtk_rx_clean(eth, ð->rx_ring[i], false);
3163 }
3164
3165 for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3166 kfree(eth->scratch_head[i]);
3167 eth->scratch_head[i] = NULL;
3168 }
3169 }
3170
mtk_hw_reset_check(struct mtk_eth * eth)3171 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3172 {
3173 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3174
3175 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3176 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3177 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3178 }
3179
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3180 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3181 {
3182 struct mtk_mac *mac = netdev_priv(dev);
3183 struct mtk_eth *eth = mac->hw;
3184
3185 if (test_bit(MTK_RESETTING, ð->state))
3186 return;
3187
3188 if (!mtk_hw_reset_check(eth))
3189 return;
3190
3191 eth->netdev[mac->id]->stats.tx_errors++;
3192 netif_err(eth, tx_err, dev, "transmit timed out\n");
3193
3194 schedule_work(ð->pending_work);
3195 }
3196
mtk_handle_irq_rx(int irq,void * _eth)3197 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3198 {
3199 struct mtk_eth *eth = _eth;
3200
3201 eth->rx_events++;
3202 if (likely(napi_schedule_prep(ð->rx_napi))) {
3203 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3204 __napi_schedule(ð->rx_napi);
3205 }
3206
3207 return IRQ_HANDLED;
3208 }
3209
mtk_handle_irq_tx(int irq,void * _eth)3210 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3211 {
3212 struct mtk_eth *eth = _eth;
3213
3214 eth->tx_events++;
3215 if (likely(napi_schedule_prep(ð->tx_napi))) {
3216 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3217 __napi_schedule(ð->tx_napi);
3218 }
3219
3220 return IRQ_HANDLED;
3221 }
3222
mtk_handle_irq(int irq,void * _eth)3223 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3224 {
3225 struct mtk_eth *eth = _eth;
3226 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3227
3228 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3229 eth->soc->rx.irq_done_mask) {
3230 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3231 eth->soc->rx.irq_done_mask)
3232 mtk_handle_irq_rx(irq, _eth);
3233 }
3234 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3235 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3236 mtk_handle_irq_tx(irq, _eth);
3237 }
3238
3239 return IRQ_HANDLED;
3240 }
3241
3242 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3243 static void mtk_poll_controller(struct net_device *dev)
3244 {
3245 struct mtk_mac *mac = netdev_priv(dev);
3246 struct mtk_eth *eth = mac->hw;
3247
3248 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3249 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3250 mtk_handle_irq_rx(eth->irq[2], dev);
3251 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3252 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3253 }
3254 #endif
3255
mtk_start_dma(struct mtk_eth * eth)3256 static int mtk_start_dma(struct mtk_eth *eth)
3257 {
3258 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3259 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3260 int err;
3261
3262 err = mtk_dma_init(eth);
3263 if (err) {
3264 mtk_dma_free(eth);
3265 return err;
3266 }
3267
3268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3269 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3270 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3271 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3272 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3273
3274 if (mtk_is_netsys_v2_or_greater(eth))
3275 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3276 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3277 MTK_CHK_DDONE_EN;
3278 else
3279 val |= MTK_RX_BT_32DWORDS;
3280 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3281
3282 mtk_w32(eth,
3283 MTK_RX_DMA_EN | rx_2b_offset |
3284 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3285 reg_map->pdma.glo_cfg);
3286 } else {
3287 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3288 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3289 reg_map->pdma.glo_cfg);
3290 }
3291
3292 return 0;
3293 }
3294
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3295 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3296 {
3297 u32 val;
3298
3299 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3300 return;
3301
3302 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3303
3304 /* default setup the forward port to send frame to PDMA */
3305 val &= ~0xffff;
3306
3307 /* Enable RX checksum */
3308 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3309
3310 val |= config;
3311
3312 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3313 val |= MTK_GDMA_SPECIAL_TAG;
3314
3315 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3316 }
3317
3318
mtk_uses_dsa(struct net_device * dev)3319 static bool mtk_uses_dsa(struct net_device *dev)
3320 {
3321 #if IS_ENABLED(CONFIG_NET_DSA)
3322 return netdev_uses_dsa(dev) &&
3323 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3324 #else
3325 return false;
3326 #endif
3327 }
3328
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3329 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3330 {
3331 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3332 struct mtk_eth *eth = mac->hw;
3333 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3334 struct ethtool_link_ksettings s;
3335 struct net_device *ldev;
3336 struct list_head *iter;
3337 struct dsa_port *dp;
3338
3339 if (event != NETDEV_CHANGE)
3340 return NOTIFY_DONE;
3341
3342 netdev_for_each_lower_dev(dev, ldev, iter) {
3343 if (netdev_priv(ldev) == mac)
3344 goto found;
3345 }
3346
3347 return NOTIFY_DONE;
3348
3349 found:
3350 if (!dsa_user_dev_check(dev))
3351 return NOTIFY_DONE;
3352
3353 if (__ethtool_get_link_ksettings(dev, &s))
3354 return NOTIFY_DONE;
3355
3356 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3357 return NOTIFY_DONE;
3358
3359 dp = dsa_port_from_netdev(dev);
3360 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3361 return NOTIFY_DONE;
3362
3363 if (mac->speed > 0 && mac->speed <= s.base.speed)
3364 s.base.speed = 0;
3365
3366 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3367
3368 return NOTIFY_DONE;
3369 }
3370
mtk_open(struct net_device * dev)3371 static int mtk_open(struct net_device *dev)
3372 {
3373 struct mtk_mac *mac = netdev_priv(dev);
3374 struct mtk_eth *eth = mac->hw;
3375 struct mtk_mac *target_mac;
3376 int i, err, ppe_num;
3377
3378 ppe_num = eth->soc->ppe_num;
3379
3380 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3381 if (err) {
3382 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3383 err);
3384 return err;
3385 }
3386
3387 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3388 if (!refcount_read(ð->dma_refcnt)) {
3389 const struct mtk_soc_data *soc = eth->soc;
3390 u32 gdm_config;
3391 int i;
3392
3393 err = mtk_start_dma(eth);
3394 if (err) {
3395 phylink_disconnect_phy(mac->phylink);
3396 return err;
3397 }
3398
3399 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3400 mtk_ppe_start(eth->ppe[i]);
3401
3402 for (i = 0; i < MTK_MAX_DEVS; i++) {
3403 if (!eth->netdev[i])
3404 continue;
3405
3406 target_mac = netdev_priv(eth->netdev[i]);
3407 if (!soc->offload_version) {
3408 target_mac->ppe_idx = 0;
3409 gdm_config = MTK_GDMA_TO_PDMA;
3410 } else if (ppe_num >= 3 && target_mac->id == 2) {
3411 target_mac->ppe_idx = 2;
3412 gdm_config = soc->reg_map->gdma_to_ppe[2];
3413 } else if (ppe_num >= 2 && target_mac->id == 1) {
3414 target_mac->ppe_idx = 1;
3415 gdm_config = soc->reg_map->gdma_to_ppe[1];
3416 } else {
3417 target_mac->ppe_idx = 0;
3418 gdm_config = soc->reg_map->gdma_to_ppe[0];
3419 }
3420 mtk_gdm_config(eth, target_mac->id, gdm_config);
3421 }
3422 /* Reset and enable PSE */
3423 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3424 mtk_w32(eth, 0, MTK_RST_GL);
3425
3426 napi_enable(ð->tx_napi);
3427 napi_enable(ð->rx_napi);
3428 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3429 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3430 refcount_set(ð->dma_refcnt, 1);
3431 } else {
3432 refcount_inc(ð->dma_refcnt);
3433 }
3434
3435 phylink_start(mac->phylink);
3436 netif_tx_start_all_queues(dev);
3437
3438 if (mtk_is_netsys_v2_or_greater(eth))
3439 return 0;
3440
3441 if (mtk_uses_dsa(dev) && !eth->prog) {
3442 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3443 struct metadata_dst *md_dst = eth->dsa_meta[i];
3444
3445 if (md_dst)
3446 continue;
3447
3448 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3449 GFP_KERNEL);
3450 if (!md_dst)
3451 return -ENOMEM;
3452
3453 md_dst->u.port_info.port_id = i;
3454 eth->dsa_meta[i] = md_dst;
3455 }
3456 } else {
3457 /* Hardware DSA untagging and VLAN RX offloading need to be
3458 * disabled if at least one MAC does not use DSA.
3459 */
3460 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3461
3462 val &= ~MTK_CDMP_STAG_EN;
3463 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3464
3465 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3466 }
3467
3468 return 0;
3469 }
3470
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3471 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3472 {
3473 u32 val;
3474 int i;
3475
3476 /* stop the dma engine */
3477 spin_lock_bh(ð->page_lock);
3478 val = mtk_r32(eth, glo_cfg);
3479 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3480 glo_cfg);
3481 spin_unlock_bh(ð->page_lock);
3482
3483 /* wait for dma stop */
3484 for (i = 0; i < 10; i++) {
3485 val = mtk_r32(eth, glo_cfg);
3486 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3487 msleep(20);
3488 continue;
3489 }
3490 break;
3491 }
3492 }
3493
mtk_stop(struct net_device * dev)3494 static int mtk_stop(struct net_device *dev)
3495 {
3496 struct mtk_mac *mac = netdev_priv(dev);
3497 struct mtk_eth *eth = mac->hw;
3498 int i;
3499
3500 phylink_stop(mac->phylink);
3501
3502 netif_tx_disable(dev);
3503
3504 phylink_disconnect_phy(mac->phylink);
3505
3506 /* only shutdown DMA if this is the last user */
3507 if (!refcount_dec_and_test(ð->dma_refcnt))
3508 return 0;
3509
3510 for (i = 0; i < MTK_MAX_DEVS; i++)
3511 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3512
3513 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3514 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3515 napi_disable(ð->tx_napi);
3516 napi_disable(ð->rx_napi);
3517
3518 cancel_work_sync(ð->rx_dim.work);
3519 cancel_work_sync(ð->tx_dim.work);
3520
3521 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3522 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3523 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3524
3525 mtk_dma_free(eth);
3526
3527 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3528 mtk_ppe_stop(eth->ppe[i]);
3529
3530 return 0;
3531 }
3532
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3533 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3534 struct netlink_ext_ack *extack)
3535 {
3536 struct mtk_mac *mac = netdev_priv(dev);
3537 struct mtk_eth *eth = mac->hw;
3538 struct bpf_prog *old_prog;
3539 bool need_update;
3540
3541 if (eth->hwlro) {
3542 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3543 return -EOPNOTSUPP;
3544 }
3545
3546 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3547 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3548 return -EOPNOTSUPP;
3549 }
3550
3551 need_update = !!eth->prog != !!prog;
3552 if (netif_running(dev) && need_update)
3553 mtk_stop(dev);
3554
3555 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3556 if (old_prog)
3557 bpf_prog_put(old_prog);
3558
3559 if (netif_running(dev) && need_update)
3560 return mtk_open(dev);
3561
3562 return 0;
3563 }
3564
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3565 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3566 {
3567 switch (xdp->command) {
3568 case XDP_SETUP_PROG:
3569 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3570 default:
3571 return -EINVAL;
3572 }
3573 }
3574
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3575 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3576 {
3577 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3578 reset_bits,
3579 reset_bits);
3580
3581 usleep_range(1000, 1100);
3582 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3583 reset_bits,
3584 ~reset_bits);
3585 mdelay(10);
3586 }
3587
mtk_clk_disable(struct mtk_eth * eth)3588 static void mtk_clk_disable(struct mtk_eth *eth)
3589 {
3590 int clk;
3591
3592 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3593 clk_disable_unprepare(eth->clks[clk]);
3594 }
3595
mtk_clk_enable(struct mtk_eth * eth)3596 static int mtk_clk_enable(struct mtk_eth *eth)
3597 {
3598 int clk, ret;
3599
3600 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3601 ret = clk_prepare_enable(eth->clks[clk]);
3602 if (ret)
3603 goto err_disable_clks;
3604 }
3605
3606 return 0;
3607
3608 err_disable_clks:
3609 while (--clk >= 0)
3610 clk_disable_unprepare(eth->clks[clk]);
3611
3612 return ret;
3613 }
3614
mtk_dim_rx(struct work_struct * work)3615 static void mtk_dim_rx(struct work_struct *work)
3616 {
3617 struct dim *dim = container_of(work, struct dim, work);
3618 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3619 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3620 struct dim_cq_moder cur_profile;
3621 u32 val, cur;
3622
3623 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3624 dim->profile_ix);
3625 spin_lock_bh(ð->dim_lock);
3626
3627 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3628 val &= MTK_PDMA_DELAY_TX_MASK;
3629 val |= MTK_PDMA_DELAY_RX_EN;
3630
3631 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3632 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3633
3634 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3635 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3636
3637 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3639 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3640
3641 spin_unlock_bh(ð->dim_lock);
3642
3643 dim->state = DIM_START_MEASURE;
3644 }
3645
mtk_dim_tx(struct work_struct * work)3646 static void mtk_dim_tx(struct work_struct *work)
3647 {
3648 struct dim *dim = container_of(work, struct dim, work);
3649 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3650 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3651 struct dim_cq_moder cur_profile;
3652 u32 val, cur;
3653
3654 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3655 dim->profile_ix);
3656 spin_lock_bh(ð->dim_lock);
3657
3658 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3659 val &= MTK_PDMA_DELAY_RX_MASK;
3660 val |= MTK_PDMA_DELAY_TX_EN;
3661
3662 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3663 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3664
3665 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3666 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3667
3668 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3669 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3670 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3671
3672 spin_unlock_bh(ð->dim_lock);
3673
3674 dim->state = DIM_START_MEASURE;
3675 }
3676
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3677 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3678 {
3679 struct mtk_eth *eth = mac->hw;
3680 u32 mcr_cur, mcr_new;
3681
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3683 return;
3684
3685 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3686 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3687
3688 if (val <= 1518)
3689 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3690 else if (val <= 1536)
3691 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3692 else if (val <= 1552)
3693 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3694 else
3695 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3696
3697 if (mcr_new != mcr_cur)
3698 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3699 }
3700
mtk_hw_reset(struct mtk_eth * eth)3701 static void mtk_hw_reset(struct mtk_eth *eth)
3702 {
3703 u32 val;
3704
3705 if (mtk_is_netsys_v2_or_greater(eth))
3706 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3707
3708 if (mtk_is_netsys_v3_or_greater(eth)) {
3709 val = RSTCTRL_PPE0_V3;
3710
3711 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3712 val |= RSTCTRL_PPE1_V3;
3713
3714 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3715 val |= RSTCTRL_PPE2;
3716
3717 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3718 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3719 val = RSTCTRL_PPE0_V2;
3720
3721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3722 val |= RSTCTRL_PPE1;
3723 } else {
3724 val = RSTCTRL_PPE0;
3725 }
3726
3727 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3728
3729 if (mtk_is_netsys_v3_or_greater(eth))
3730 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3731 0x6f8ff);
3732 else if (mtk_is_netsys_v2_or_greater(eth))
3733 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3734 0x3ffffff);
3735 }
3736
mtk_hw_reset_read(struct mtk_eth * eth)3737 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3738 {
3739 u32 val;
3740
3741 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3742 return val;
3743 }
3744
mtk_hw_warm_reset(struct mtk_eth * eth)3745 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3746 {
3747 u32 rst_mask, val;
3748
3749 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3750 RSTCTRL_FE);
3751 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3752 val & RSTCTRL_FE, 1, 1000)) {
3753 dev_err(eth->dev, "warm reset failed\n");
3754 mtk_hw_reset(eth);
3755 return;
3756 }
3757
3758 if (mtk_is_netsys_v3_or_greater(eth)) {
3759 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3760 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3761 rst_mask |= RSTCTRL_PPE1_V3;
3762 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3763 rst_mask |= RSTCTRL_PPE2;
3764
3765 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3766 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3767 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3769 rst_mask |= RSTCTRL_PPE1;
3770 } else {
3771 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3772 }
3773
3774 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3775
3776 udelay(1);
3777 val = mtk_hw_reset_read(eth);
3778 if (!(val & rst_mask))
3779 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3780 val, rst_mask);
3781
3782 rst_mask |= RSTCTRL_FE;
3783 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3784
3785 udelay(1);
3786 val = mtk_hw_reset_read(eth);
3787 if (val & rst_mask)
3788 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3789 val, rst_mask);
3790 }
3791
mtk_hw_check_dma_hang(struct mtk_eth * eth)3792 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3793 {
3794 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3795 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3796 bool oq_hang, cdm1_busy, adma_busy;
3797 bool wtx_busy, cdm_full, oq_free;
3798 u32 wdidx, val, gdm1_fc, gdm2_fc;
3799 bool qfsm_hang, qfwd_hang;
3800 bool ret = false;
3801
3802 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3803 return false;
3804
3805 /* WDMA sanity checks */
3806 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3807
3808 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3809 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3810
3811 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3812 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3813
3814 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3815 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3816 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3817
3818 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3819 if (++eth->reset.wdma_hang_count > 2) {
3820 eth->reset.wdma_hang_count = 0;
3821 ret = true;
3822 }
3823 goto out;
3824 }
3825
3826 /* QDMA sanity checks */
3827 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3828 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3829
3830 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3831 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3832 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3833 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3834 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3835 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3836
3837 if (qfsm_hang && qfwd_hang &&
3838 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3839 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3840 if (++eth->reset.qdma_hang_count > 2) {
3841 eth->reset.qdma_hang_count = 0;
3842 ret = true;
3843 }
3844 goto out;
3845 }
3846
3847 /* ADMA sanity checks */
3848 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3849 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3850 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3851 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3852
3853 if (oq_hang && cdm1_busy && adma_busy) {
3854 if (++eth->reset.adma_hang_count > 2) {
3855 eth->reset.adma_hang_count = 0;
3856 ret = true;
3857 }
3858 goto out;
3859 }
3860
3861 eth->reset.wdma_hang_count = 0;
3862 eth->reset.qdma_hang_count = 0;
3863 eth->reset.adma_hang_count = 0;
3864 out:
3865 eth->reset.wdidx = wdidx;
3866
3867 return ret;
3868 }
3869
mtk_hw_reset_monitor_work(struct work_struct * work)3870 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3871 {
3872 struct delayed_work *del_work = to_delayed_work(work);
3873 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3874 reset.monitor_work);
3875
3876 if (test_bit(MTK_RESETTING, ð->state))
3877 goto out;
3878
3879 /* DMA stuck checks */
3880 if (mtk_hw_check_dma_hang(eth))
3881 schedule_work(ð->pending_work);
3882
3883 out:
3884 schedule_delayed_work(ð->reset.monitor_work,
3885 MTK_DMA_MONITOR_TIMEOUT);
3886 }
3887
mtk_hw_init(struct mtk_eth * eth,bool reset)3888 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3889 {
3890 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3891 ETHSYS_DMA_AG_MAP_PPE;
3892 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3893 int i, val, ret;
3894
3895 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3896 return 0;
3897
3898 if (!reset) {
3899 pm_runtime_enable(eth->dev);
3900 pm_runtime_get_sync(eth->dev);
3901
3902 ret = mtk_clk_enable(eth);
3903 if (ret)
3904 goto err_disable_pm;
3905 }
3906
3907 if (eth->ethsys)
3908 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3909 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3910
3911 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3912 ret = device_reset(eth->dev);
3913 if (ret) {
3914 dev_err(eth->dev, "MAC reset failed!\n");
3915 goto err_disable_pm;
3916 }
3917
3918 /* set interrupt delays based on current Net DIM sample */
3919 mtk_dim_rx(ð->rx_dim.work);
3920 mtk_dim_tx(ð->tx_dim.work);
3921
3922 /* disable delay and normal interrupt */
3923 mtk_tx_irq_disable(eth, ~0);
3924 mtk_rx_irq_disable(eth, ~0);
3925
3926 return 0;
3927 }
3928
3929 msleep(100);
3930
3931 if (reset)
3932 mtk_hw_warm_reset(eth);
3933 else
3934 mtk_hw_reset(eth);
3935
3936 /* No MT7628/88 support yet */
3937 if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3938 mtk_mdio_config(eth);
3939
3940 if (mtk_is_netsys_v3_or_greater(eth)) {
3941 /* Set FE to PDMAv2 if necessary */
3942 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3943 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3944 }
3945
3946 if (eth->pctl) {
3947 /* Set GE2 driving and slew rate */
3948 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3949
3950 /* set GE2 TDSEL */
3951 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3952
3953 /* set GE2 TUNE */
3954 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3955 }
3956
3957 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3958 * up with the more appropriate value when mtk_mac_config call is being
3959 * invoked.
3960 */
3961 for (i = 0; i < MTK_MAX_DEVS; i++) {
3962 struct net_device *dev = eth->netdev[i];
3963
3964 if (!dev)
3965 continue;
3966
3967 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3968 mtk_set_mcr_max_rx(netdev_priv(dev),
3969 dev->mtu + MTK_RX_ETH_HLEN);
3970 }
3971
3972 /* Indicates CDM to parse the MTK special tag from CPU
3973 * which also is working out for untag packets.
3974 */
3975 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3976 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3977 if (mtk_is_netsys_v1(eth)) {
3978 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3979 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3980
3981 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3982 }
3983
3984 /* set interrupt delays based on current Net DIM sample */
3985 mtk_dim_rx(ð->rx_dim.work);
3986 mtk_dim_tx(ð->tx_dim.work);
3987
3988 /* disable delay and normal interrupt */
3989 mtk_tx_irq_disable(eth, ~0);
3990 mtk_rx_irq_disable(eth, ~0);
3991
3992 /* FE int grouping */
3993 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3994 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
3995 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3996 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
3997 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3998
3999 if (mtk_is_netsys_v3_or_greater(eth)) {
4000 /* PSE should not drop port1, port8 and port9 packets */
4001 mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
4002
4003 /* GDM and CDM Threshold */
4004 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
4005 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4006
4007 /* Disable GDM1 RX CRC stripping */
4008 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4009
4010 /* PSE GDM3 MIB counter has incorrect hw default values,
4011 * so the driver ought to read clear the values beforehand
4012 * in case ethtool retrieve wrong mib values.
4013 */
4014 for (i = 0; i < 0x80; i += 0x4)
4015 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4016 } else if (!mtk_is_netsys_v1(eth)) {
4017 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4018 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4019
4020 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4021 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
4022
4023 /* PSE Free Queue Flow Control */
4024 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4025
4026 /* PSE config input queue threshold */
4027 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4028 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4029 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4030 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4031 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4032 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4033 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4034 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4035
4036 /* PSE config output queue threshold */
4037 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4038 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4039 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4040 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4041 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4042 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4043 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4044 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4045
4046 /* GDM and CDM Threshold */
4047 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4048 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4049 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4050 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4051 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4052 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4053 }
4054
4055 return 0;
4056
4057 err_disable_pm:
4058 if (!reset) {
4059 pm_runtime_put_sync(eth->dev);
4060 pm_runtime_disable(eth->dev);
4061 }
4062
4063 return ret;
4064 }
4065
mtk_hw_deinit(struct mtk_eth * eth)4066 static int mtk_hw_deinit(struct mtk_eth *eth)
4067 {
4068 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
4069 return 0;
4070
4071 mtk_clk_disable(eth);
4072
4073 pm_runtime_put_sync(eth->dev);
4074 pm_runtime_disable(eth->dev);
4075
4076 return 0;
4077 }
4078
mtk_uninit(struct net_device * dev)4079 static void mtk_uninit(struct net_device *dev)
4080 {
4081 struct mtk_mac *mac = netdev_priv(dev);
4082 struct mtk_eth *eth = mac->hw;
4083
4084 phylink_disconnect_phy(mac->phylink);
4085 mtk_tx_irq_disable(eth, ~0);
4086 mtk_rx_irq_disable(eth, ~0);
4087 }
4088
mtk_change_mtu(struct net_device * dev,int new_mtu)4089 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4090 {
4091 int length = new_mtu + MTK_RX_ETH_HLEN;
4092 struct mtk_mac *mac = netdev_priv(dev);
4093 struct mtk_eth *eth = mac->hw;
4094
4095 if (rcu_access_pointer(eth->prog) &&
4096 length > MTK_PP_MAX_BUF_SIZE) {
4097 netdev_err(dev, "Invalid MTU for XDP mode\n");
4098 return -EINVAL;
4099 }
4100
4101 mtk_set_mcr_max_rx(mac, length);
4102 WRITE_ONCE(dev->mtu, new_mtu);
4103
4104 return 0;
4105 }
4106
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4107 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4108 {
4109 struct mtk_mac *mac = netdev_priv(dev);
4110
4111 switch (cmd) {
4112 case SIOCGMIIPHY:
4113 case SIOCGMIIREG:
4114 case SIOCSMIIREG:
4115 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4116 default:
4117 break;
4118 }
4119
4120 return -EOPNOTSUPP;
4121 }
4122
mtk_prepare_for_reset(struct mtk_eth * eth)4123 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4124 {
4125 u32 val;
4126 int i;
4127
4128 /* set FE PPE ports link down */
4129 for (i = MTK_GMAC1_ID;
4130 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4131 i += 2) {
4132 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4133 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4134 val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4135 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4136 val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4137 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4138 }
4139
4140 /* adjust PPE configurations to prepare for reset */
4141 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4142 mtk_ppe_prepare_reset(eth->ppe[i]);
4143
4144 /* disable NETSYS interrupts */
4145 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4146
4147 /* force link down GMAC */
4148 for (i = 0; i < 2; i++) {
4149 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4150 mtk_w32(eth, val, MTK_MAC_MCR(i));
4151 }
4152 }
4153
mtk_pending_work(struct work_struct * work)4154 static void mtk_pending_work(struct work_struct *work)
4155 {
4156 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4157 unsigned long restart = 0;
4158 u32 val;
4159 int i;
4160
4161 rtnl_lock();
4162 set_bit(MTK_RESETTING, ð->state);
4163
4164 mtk_prepare_for_reset(eth);
4165 mtk_wed_fe_reset();
4166 /* Run again reset preliminary configuration in order to avoid any
4167 * possible race during FE reset since it can run releasing RTNL lock.
4168 */
4169 mtk_prepare_for_reset(eth);
4170
4171 /* stop all devices to make sure that dma is properly shut down */
4172 for (i = 0; i < MTK_MAX_DEVS; i++) {
4173 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4174 continue;
4175
4176 mtk_stop(eth->netdev[i]);
4177 __set_bit(i, &restart);
4178 }
4179
4180 usleep_range(15000, 16000);
4181
4182 if (eth->dev->pins)
4183 pinctrl_select_state(eth->dev->pins->p,
4184 eth->dev->pins->default_state);
4185 mtk_hw_init(eth, true);
4186
4187 /* restart DMA and enable IRQs */
4188 for (i = 0; i < MTK_MAX_DEVS; i++) {
4189 if (!eth->netdev[i] || !test_bit(i, &restart))
4190 continue;
4191
4192 if (mtk_open(eth->netdev[i])) {
4193 netif_alert(eth, ifup, eth->netdev[i],
4194 "Driver up/down cycle failed\n");
4195 dev_close(eth->netdev[i]);
4196 }
4197 }
4198
4199 /* set FE PPE ports link up */
4200 for (i = MTK_GMAC1_ID;
4201 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4202 i += 2) {
4203 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4204 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4205 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4207 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4208
4209 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4210 }
4211
4212 clear_bit(MTK_RESETTING, ð->state);
4213
4214 mtk_wed_fe_reset_complete();
4215
4216 rtnl_unlock();
4217 }
4218
mtk_free_dev(struct mtk_eth * eth)4219 static int mtk_free_dev(struct mtk_eth *eth)
4220 {
4221 int i;
4222
4223 for (i = 0; i < MTK_MAX_DEVS; i++) {
4224 if (!eth->netdev[i])
4225 continue;
4226 free_netdev(eth->netdev[i]);
4227 }
4228
4229 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4230 if (!eth->dsa_meta[i])
4231 break;
4232 metadata_dst_free(eth->dsa_meta[i]);
4233 }
4234
4235 return 0;
4236 }
4237
mtk_unreg_dev(struct mtk_eth * eth)4238 static int mtk_unreg_dev(struct mtk_eth *eth)
4239 {
4240 int i;
4241
4242 for (i = 0; i < MTK_MAX_DEVS; i++) {
4243 struct mtk_mac *mac;
4244 if (!eth->netdev[i])
4245 continue;
4246 mac = netdev_priv(eth->netdev[i]);
4247 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4248 unregister_netdevice_notifier(&mac->device_notifier);
4249 unregister_netdev(eth->netdev[i]);
4250 }
4251
4252 return 0;
4253 }
4254
mtk_sgmii_destroy(struct mtk_eth * eth)4255 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4256 {
4257 int i;
4258
4259 for (i = 0; i < MTK_MAX_DEVS; i++)
4260 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4261 }
4262
mtk_cleanup(struct mtk_eth * eth)4263 static int mtk_cleanup(struct mtk_eth *eth)
4264 {
4265 mtk_sgmii_destroy(eth);
4266 mtk_unreg_dev(eth);
4267 mtk_free_dev(eth);
4268 cancel_work_sync(ð->pending_work);
4269 cancel_delayed_work_sync(ð->reset.monitor_work);
4270
4271 return 0;
4272 }
4273
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4274 static int mtk_get_link_ksettings(struct net_device *ndev,
4275 struct ethtool_link_ksettings *cmd)
4276 {
4277 struct mtk_mac *mac = netdev_priv(ndev);
4278
4279 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4280 return -EBUSY;
4281
4282 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4283 }
4284
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4285 static int mtk_set_link_ksettings(struct net_device *ndev,
4286 const struct ethtool_link_ksettings *cmd)
4287 {
4288 struct mtk_mac *mac = netdev_priv(ndev);
4289
4290 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4291 return -EBUSY;
4292
4293 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4294 }
4295
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4296 static void mtk_get_drvinfo(struct net_device *dev,
4297 struct ethtool_drvinfo *info)
4298 {
4299 struct mtk_mac *mac = netdev_priv(dev);
4300
4301 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4302 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4303 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4304 }
4305
mtk_get_msglevel(struct net_device * dev)4306 static u32 mtk_get_msglevel(struct net_device *dev)
4307 {
4308 struct mtk_mac *mac = netdev_priv(dev);
4309
4310 return mac->hw->msg_enable;
4311 }
4312
mtk_set_msglevel(struct net_device * dev,u32 value)4313 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4314 {
4315 struct mtk_mac *mac = netdev_priv(dev);
4316
4317 mac->hw->msg_enable = value;
4318 }
4319
mtk_nway_reset(struct net_device * dev)4320 static int mtk_nway_reset(struct net_device *dev)
4321 {
4322 struct mtk_mac *mac = netdev_priv(dev);
4323
4324 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4325 return -EBUSY;
4326
4327 if (!mac->phylink)
4328 return -ENOTSUPP;
4329
4330 return phylink_ethtool_nway_reset(mac->phylink);
4331 }
4332
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4333 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4334 {
4335 int i;
4336
4337 switch (stringset) {
4338 case ETH_SS_STATS: {
4339 struct mtk_mac *mac = netdev_priv(dev);
4340
4341 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4342 ethtool_puts(&data, mtk_ethtool_stats[i].str);
4343 if (mtk_page_pool_enabled(mac->hw))
4344 page_pool_ethtool_stats_get_strings(data);
4345 break;
4346 }
4347 default:
4348 break;
4349 }
4350 }
4351
mtk_get_sset_count(struct net_device * dev,int sset)4352 static int mtk_get_sset_count(struct net_device *dev, int sset)
4353 {
4354 switch (sset) {
4355 case ETH_SS_STATS: {
4356 int count = ARRAY_SIZE(mtk_ethtool_stats);
4357 struct mtk_mac *mac = netdev_priv(dev);
4358
4359 if (mtk_page_pool_enabled(mac->hw))
4360 count += page_pool_ethtool_stats_get_count();
4361 return count;
4362 }
4363 default:
4364 return -EOPNOTSUPP;
4365 }
4366 }
4367
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4368 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4369 {
4370 struct page_pool_stats stats = {};
4371 int i;
4372
4373 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4374 struct mtk_rx_ring *ring = ð->rx_ring[i];
4375
4376 if (!ring->page_pool)
4377 continue;
4378
4379 page_pool_get_stats(ring->page_pool, &stats);
4380 }
4381 page_pool_ethtool_stats_get(data, &stats);
4382 }
4383
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4384 static void mtk_get_ethtool_stats(struct net_device *dev,
4385 struct ethtool_stats *stats, u64 *data)
4386 {
4387 struct mtk_mac *mac = netdev_priv(dev);
4388 struct mtk_hw_stats *hwstats = mac->hw_stats;
4389 u64 *data_src, *data_dst;
4390 unsigned int start;
4391 int i;
4392
4393 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4394 return;
4395
4396 if (netif_running(dev) && netif_device_present(dev)) {
4397 if (spin_trylock_bh(&hwstats->stats_lock)) {
4398 mtk_stats_update_mac(mac);
4399 spin_unlock_bh(&hwstats->stats_lock);
4400 }
4401 }
4402
4403 data_src = (u64 *)hwstats;
4404
4405 do {
4406 data_dst = data;
4407 start = u64_stats_fetch_begin(&hwstats->syncp);
4408
4409 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4410 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4411 if (mtk_page_pool_enabled(mac->hw))
4412 mtk_ethtool_pp_stats(mac->hw, data_dst);
4413 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4414 }
4415
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4416 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4417 u32 *rule_locs)
4418 {
4419 int ret = -EOPNOTSUPP;
4420
4421 switch (cmd->cmd) {
4422 case ETHTOOL_GRXRINGS:
4423 if (dev->hw_features & NETIF_F_LRO) {
4424 cmd->data = MTK_MAX_RX_RING_NUM;
4425 ret = 0;
4426 }
4427 break;
4428 case ETHTOOL_GRXCLSRLCNT:
4429 if (dev->hw_features & NETIF_F_LRO) {
4430 struct mtk_mac *mac = netdev_priv(dev);
4431
4432 cmd->rule_cnt = mac->hwlro_ip_cnt;
4433 ret = 0;
4434 }
4435 break;
4436 case ETHTOOL_GRXCLSRULE:
4437 if (dev->hw_features & NETIF_F_LRO)
4438 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4439 break;
4440 case ETHTOOL_GRXCLSRLALL:
4441 if (dev->hw_features & NETIF_F_LRO)
4442 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4443 rule_locs);
4444 break;
4445 default:
4446 break;
4447 }
4448
4449 return ret;
4450 }
4451
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4452 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4453 {
4454 int ret = -EOPNOTSUPP;
4455
4456 switch (cmd->cmd) {
4457 case ETHTOOL_SRXCLSRLINS:
4458 if (dev->hw_features & NETIF_F_LRO)
4459 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4460 break;
4461 case ETHTOOL_SRXCLSRLDEL:
4462 if (dev->hw_features & NETIF_F_LRO)
4463 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4464 break;
4465 default:
4466 break;
4467 }
4468
4469 return ret;
4470 }
4471
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4472 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4473 {
4474 struct mtk_mac *mac = netdev_priv(dev);
4475
4476 phylink_ethtool_get_pauseparam(mac->phylink, pause);
4477 }
4478
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4479 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4480 {
4481 struct mtk_mac *mac = netdev_priv(dev);
4482
4483 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4484 }
4485
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4486 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4487 struct net_device *sb_dev)
4488 {
4489 struct mtk_mac *mac = netdev_priv(dev);
4490 unsigned int queue = 0;
4491
4492 if (netdev_uses_dsa(dev))
4493 queue = skb_get_queue_mapping(skb) + 3;
4494 else
4495 queue = mac->id;
4496
4497 if (queue >= dev->num_tx_queues)
4498 queue = 0;
4499
4500 return queue;
4501 }
4502
4503 static const struct ethtool_ops mtk_ethtool_ops = {
4504 .get_link_ksettings = mtk_get_link_ksettings,
4505 .set_link_ksettings = mtk_set_link_ksettings,
4506 .get_drvinfo = mtk_get_drvinfo,
4507 .get_msglevel = mtk_get_msglevel,
4508 .set_msglevel = mtk_set_msglevel,
4509 .nway_reset = mtk_nway_reset,
4510 .get_link = ethtool_op_get_link,
4511 .get_strings = mtk_get_strings,
4512 .get_sset_count = mtk_get_sset_count,
4513 .get_ethtool_stats = mtk_get_ethtool_stats,
4514 .get_pauseparam = mtk_get_pauseparam,
4515 .set_pauseparam = mtk_set_pauseparam,
4516 .get_rxnfc = mtk_get_rxnfc,
4517 .set_rxnfc = mtk_set_rxnfc,
4518 };
4519
4520 static const struct net_device_ops mtk_netdev_ops = {
4521 .ndo_uninit = mtk_uninit,
4522 .ndo_open = mtk_open,
4523 .ndo_stop = mtk_stop,
4524 .ndo_start_xmit = mtk_start_xmit,
4525 .ndo_set_mac_address = mtk_set_mac_address,
4526 .ndo_validate_addr = eth_validate_addr,
4527 .ndo_eth_ioctl = mtk_do_ioctl,
4528 .ndo_change_mtu = mtk_change_mtu,
4529 .ndo_tx_timeout = mtk_tx_timeout,
4530 .ndo_get_stats64 = mtk_get_stats64,
4531 .ndo_fix_features = mtk_fix_features,
4532 .ndo_set_features = mtk_set_features,
4533 #ifdef CONFIG_NET_POLL_CONTROLLER
4534 .ndo_poll_controller = mtk_poll_controller,
4535 #endif
4536 .ndo_setup_tc = mtk_eth_setup_tc,
4537 .ndo_bpf = mtk_xdp,
4538 .ndo_xdp_xmit = mtk_xdp_xmit,
4539 .ndo_select_queue = mtk_select_queue,
4540 };
4541
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4542 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4543 {
4544 const __be32 *_id = of_get_property(np, "reg", NULL);
4545 phy_interface_t phy_mode;
4546 struct phylink *phylink;
4547 struct mtk_mac *mac;
4548 int id, err;
4549 int txqs = 1;
4550 u32 val;
4551
4552 if (!_id) {
4553 dev_err(eth->dev, "missing mac id\n");
4554 return -EINVAL;
4555 }
4556
4557 id = be32_to_cpup(_id);
4558 if (id >= MTK_MAX_DEVS) {
4559 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4560 return -EINVAL;
4561 }
4562
4563 if (eth->netdev[id]) {
4564 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4565 return -EINVAL;
4566 }
4567
4568 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4569 txqs = MTK_QDMA_NUM_QUEUES;
4570
4571 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4572 if (!eth->netdev[id]) {
4573 dev_err(eth->dev, "alloc_etherdev failed\n");
4574 return -ENOMEM;
4575 }
4576 mac = netdev_priv(eth->netdev[id]);
4577 eth->mac[id] = mac;
4578 mac->id = id;
4579 mac->hw = eth;
4580 mac->of_node = np;
4581
4582 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4583 if (err == -EPROBE_DEFER)
4584 return err;
4585
4586 if (err) {
4587 /* If the mac address is invalid, use random mac address */
4588 eth_hw_addr_random(eth->netdev[id]);
4589 dev_err(eth->dev, "generated random MAC address %pM\n",
4590 eth->netdev[id]->dev_addr);
4591 }
4592
4593 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4594 mac->hwlro_ip_cnt = 0;
4595
4596 mac->hw_stats = devm_kzalloc(eth->dev,
4597 sizeof(*mac->hw_stats),
4598 GFP_KERNEL);
4599 if (!mac->hw_stats) {
4600 dev_err(eth->dev, "failed to allocate counter memory\n");
4601 err = -ENOMEM;
4602 goto free_netdev;
4603 }
4604 spin_lock_init(&mac->hw_stats->stats_lock);
4605 u64_stats_init(&mac->hw_stats->syncp);
4606
4607 if (mtk_is_netsys_v3_or_greater(eth))
4608 mac->hw_stats->reg_offset = id * 0x80;
4609 else
4610 mac->hw_stats->reg_offset = id * 0x40;
4611
4612 /* phylink create */
4613 err = of_get_phy_mode(np, &phy_mode);
4614 if (err) {
4615 dev_err(eth->dev, "incorrect phy-mode\n");
4616 goto free_netdev;
4617 }
4618
4619 /* mac config is not set */
4620 mac->interface = PHY_INTERFACE_MODE_NA;
4621 mac->speed = SPEED_UNKNOWN;
4622
4623 mac->phylink_config.dev = ð->netdev[id]->dev;
4624 mac->phylink_config.type = PHYLINK_NETDEV;
4625 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4626 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4627
4628 /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4629 * in its .mac_config method (since state->speed is not valid there.
4630 * Disable support for MII, GMII and RGMII.
4631 */
4632 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4633 __set_bit(PHY_INTERFACE_MODE_MII,
4634 mac->phylink_config.supported_interfaces);
4635 __set_bit(PHY_INTERFACE_MODE_GMII,
4636 mac->phylink_config.supported_interfaces);
4637
4638 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4639 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4640 }
4641
4642 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4643 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4644 mac->phylink_config.supported_interfaces);
4645
4646 /* TRGMII is not permitted on MT7621 if using DDR2 */
4647 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4648 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4649 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4650 if (val & SYSCFG_DRAM_TYPE_DDR2)
4651 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4652 mac->phylink_config.supported_interfaces);
4653 }
4654
4655 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4656 __set_bit(PHY_INTERFACE_MODE_SGMII,
4657 mac->phylink_config.supported_interfaces);
4658 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4659 mac->phylink_config.supported_interfaces);
4660 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4661 mac->phylink_config.supported_interfaces);
4662 }
4663
4664 if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4665 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4666 id == MTK_GMAC1_ID) {
4667 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4668 MAC_SYM_PAUSE |
4669 MAC_10000FD;
4670 phy_interface_zero(mac->phylink_config.supported_interfaces);
4671 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4672 mac->phylink_config.supported_interfaces);
4673 }
4674
4675 phylink = phylink_create(&mac->phylink_config,
4676 of_fwnode_handle(mac->of_node),
4677 phy_mode, &mtk_phylink_ops);
4678 if (IS_ERR(phylink)) {
4679 err = PTR_ERR(phylink);
4680 goto free_netdev;
4681 }
4682
4683 mac->phylink = phylink;
4684
4685 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4686 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4687 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4688 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4689
4690 eth->netdev[id]->hw_features = eth->soc->hw_features;
4691 if (eth->hwlro)
4692 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4693
4694 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4695 ~NETIF_F_HW_VLAN_CTAG_TX;
4696 eth->netdev[id]->features |= eth->soc->hw_features;
4697 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4698
4699 eth->netdev[id]->irq = eth->irq[0];
4700 eth->netdev[id]->dev.of_node = np;
4701
4702 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4703 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4704 else
4705 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4706
4707 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4708 mac->device_notifier.notifier_call = mtk_device_event;
4709 register_netdevice_notifier(&mac->device_notifier);
4710 }
4711
4712 if (mtk_page_pool_enabled(eth))
4713 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4714 NETDEV_XDP_ACT_REDIRECT |
4715 NETDEV_XDP_ACT_NDO_XMIT |
4716 NETDEV_XDP_ACT_NDO_XMIT_SG;
4717
4718 return 0;
4719
4720 free_netdev:
4721 free_netdev(eth->netdev[id]);
4722 return err;
4723 }
4724
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4725 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4726 {
4727 struct net_device *dev, *tmp;
4728 LIST_HEAD(dev_list);
4729 int i;
4730
4731 rtnl_lock();
4732
4733 for (i = 0; i < MTK_MAX_DEVS; i++) {
4734 dev = eth->netdev[i];
4735
4736 if (!dev || !(dev->flags & IFF_UP))
4737 continue;
4738
4739 list_add_tail(&dev->close_list, &dev_list);
4740 }
4741
4742 dev_close_many(&dev_list, false);
4743
4744 eth->dma_dev = dma_dev;
4745
4746 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4747 list_del_init(&dev->close_list);
4748 dev_open(dev, NULL);
4749 }
4750
4751 rtnl_unlock();
4752 }
4753
mtk_sgmii_init(struct mtk_eth * eth)4754 static int mtk_sgmii_init(struct mtk_eth *eth)
4755 {
4756 struct device_node *np;
4757 struct regmap *regmap;
4758 u32 flags;
4759 int i;
4760
4761 for (i = 0; i < MTK_MAX_DEVS; i++) {
4762 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4763 if (!np)
4764 break;
4765
4766 regmap = syscon_node_to_regmap(np);
4767 flags = 0;
4768 if (of_property_read_bool(np, "mediatek,pnswap"))
4769 flags |= MTK_SGMII_FLAG_PN_SWAP;
4770
4771 of_node_put(np);
4772
4773 if (IS_ERR(regmap))
4774 return PTR_ERR(regmap);
4775
4776 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4777 eth->soc->ana_rgc3,
4778 flags);
4779 }
4780
4781 return 0;
4782 }
4783
mtk_probe(struct platform_device * pdev)4784 static int mtk_probe(struct platform_device *pdev)
4785 {
4786 struct resource *res = NULL, *res_sram;
4787 struct device_node *mac_np;
4788 struct mtk_eth *eth;
4789 int err, i;
4790
4791 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4792 if (!eth)
4793 return -ENOMEM;
4794
4795 eth->soc = of_device_get_match_data(&pdev->dev);
4796
4797 eth->dev = &pdev->dev;
4798 eth->dma_dev = &pdev->dev;
4799 eth->base = devm_platform_ioremap_resource(pdev, 0);
4800 if (IS_ERR(eth->base))
4801 return PTR_ERR(eth->base);
4802
4803 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4804 eth->ip_align = NET_IP_ALIGN;
4805
4806 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4807 /* SRAM is actual memory and supports transparent access just like DRAM.
4808 * Hence we don't require __iomem being set and don't need to use accessor
4809 * functions to read from or write to SRAM.
4810 */
4811 if (mtk_is_netsys_v3_or_greater(eth)) {
4812 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4813 if (IS_ERR(eth->sram_base))
4814 return PTR_ERR(eth->sram_base);
4815 } else {
4816 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4817 }
4818 }
4819
4820 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4821 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4822 if (!err)
4823 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4824
4825 if (err) {
4826 dev_err(&pdev->dev, "Wrong DMA config\n");
4827 return -EINVAL;
4828 }
4829 }
4830
4831 spin_lock_init(ð->page_lock);
4832 spin_lock_init(ð->tx_irq_lock);
4833 spin_lock_init(ð->rx_irq_lock);
4834 spin_lock_init(ð->dim_lock);
4835
4836 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4837 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4838 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4839
4840 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4841 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4842
4843 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4844 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4845 "mediatek,ethsys");
4846 if (IS_ERR(eth->ethsys)) {
4847 dev_err(&pdev->dev, "no ethsys regmap found\n");
4848 return PTR_ERR(eth->ethsys);
4849 }
4850 }
4851
4852 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4853 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4854 "mediatek,infracfg");
4855 if (IS_ERR(eth->infra)) {
4856 dev_err(&pdev->dev, "no infracfg regmap found\n");
4857 return PTR_ERR(eth->infra);
4858 }
4859 }
4860
4861 if (of_dma_is_coherent(pdev->dev.of_node)) {
4862 struct regmap *cci;
4863
4864 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4865 "cci-control-port");
4866 /* enable CPU/bus coherency */
4867 if (!IS_ERR(cci))
4868 regmap_write(cci, 0, 3);
4869 }
4870
4871 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4872 err = mtk_sgmii_init(eth);
4873
4874 if (err)
4875 return err;
4876 }
4877
4878 if (eth->soc->required_pctl) {
4879 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4880 "mediatek,pctl");
4881 if (IS_ERR(eth->pctl)) {
4882 dev_err(&pdev->dev, "no pctl regmap found\n");
4883 err = PTR_ERR(eth->pctl);
4884 goto err_destroy_sgmii;
4885 }
4886 }
4887
4888 if (mtk_is_netsys_v2_or_greater(eth)) {
4889 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4890 if (!res) {
4891 err = -EINVAL;
4892 goto err_destroy_sgmii;
4893 }
4894 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4895 if (mtk_is_netsys_v3_or_greater(eth)) {
4896 res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4897 if (!res_sram) {
4898 err = -EINVAL;
4899 goto err_destroy_sgmii;
4900 }
4901 eth->phy_scratch_ring = res_sram->start;
4902 } else {
4903 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4904 }
4905 }
4906 }
4907
4908 if (eth->soc->offload_version) {
4909 for (i = 0;; i++) {
4910 struct device_node *np;
4911 phys_addr_t wdma_phy;
4912 u32 wdma_base;
4913
4914 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4915 break;
4916
4917 np = of_parse_phandle(pdev->dev.of_node,
4918 "mediatek,wed", i);
4919 if (!np)
4920 break;
4921
4922 wdma_base = eth->soc->reg_map->wdma_base[i];
4923 wdma_phy = res ? res->start + wdma_base : 0;
4924 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4925 wdma_phy, i);
4926 }
4927 }
4928
4929 for (i = 0; i < 3; i++) {
4930 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4931 eth->irq[i] = eth->irq[0];
4932 else
4933 eth->irq[i] = platform_get_irq(pdev, i);
4934 if (eth->irq[i] < 0) {
4935 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4936 err = -ENXIO;
4937 goto err_wed_exit;
4938 }
4939 }
4940 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4941 eth->clks[i] = devm_clk_get(eth->dev,
4942 mtk_clks_source_name[i]);
4943 if (IS_ERR(eth->clks[i])) {
4944 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4945 err = -EPROBE_DEFER;
4946 goto err_wed_exit;
4947 }
4948 if (eth->soc->required_clks & BIT(i)) {
4949 dev_err(&pdev->dev, "clock %s not found\n",
4950 mtk_clks_source_name[i]);
4951 err = -EINVAL;
4952 goto err_wed_exit;
4953 }
4954 eth->clks[i] = NULL;
4955 }
4956 }
4957
4958 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4959 INIT_WORK(ð->pending_work, mtk_pending_work);
4960
4961 err = mtk_hw_init(eth, false);
4962 if (err)
4963 goto err_wed_exit;
4964
4965 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4966
4967 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4968 if (!of_device_is_compatible(mac_np,
4969 "mediatek,eth-mac"))
4970 continue;
4971
4972 if (!of_device_is_available(mac_np))
4973 continue;
4974
4975 err = mtk_add_mac(eth, mac_np);
4976 if (err) {
4977 of_node_put(mac_np);
4978 goto err_deinit_hw;
4979 }
4980 }
4981
4982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4983 err = devm_request_irq(eth->dev, eth->irq[0],
4984 mtk_handle_irq, 0,
4985 dev_name(eth->dev), eth);
4986 } else {
4987 err = devm_request_irq(eth->dev, eth->irq[1],
4988 mtk_handle_irq_tx, 0,
4989 dev_name(eth->dev), eth);
4990 if (err)
4991 goto err_free_dev;
4992
4993 err = devm_request_irq(eth->dev, eth->irq[2],
4994 mtk_handle_irq_rx, 0,
4995 dev_name(eth->dev), eth);
4996 }
4997 if (err)
4998 goto err_free_dev;
4999
5000 /* No MT7628/88 support yet */
5001 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5002 err = mtk_mdio_init(eth);
5003 if (err)
5004 goto err_free_dev;
5005 }
5006
5007 if (eth->soc->offload_version) {
5008 u8 ppe_num = eth->soc->ppe_num;
5009
5010 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5011 for (i = 0; i < ppe_num; i++) {
5012 u32 ppe_addr = eth->soc->reg_map->ppe_base;
5013
5014 ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5015 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5016
5017 if (!eth->ppe[i]) {
5018 err = -ENOMEM;
5019 goto err_deinit_ppe;
5020 }
5021 err = mtk_eth_offload_init(eth, i);
5022
5023 if (err)
5024 goto err_deinit_ppe;
5025 }
5026 }
5027
5028 for (i = 0; i < MTK_MAX_DEVS; i++) {
5029 if (!eth->netdev[i])
5030 continue;
5031
5032 err = register_netdev(eth->netdev[i]);
5033 if (err) {
5034 dev_err(eth->dev, "error bringing up device\n");
5035 goto err_deinit_ppe;
5036 } else
5037 netif_info(eth, probe, eth->netdev[i],
5038 "mediatek frame engine at 0x%08lx, irq %d\n",
5039 eth->netdev[i]->base_addr, eth->irq[0]);
5040 }
5041
5042 /* we run 2 devices on the same DMA ring so we need a dummy device
5043 * for NAPI to work
5044 */
5045 eth->dummy_dev = alloc_netdev_dummy(0);
5046 if (!eth->dummy_dev) {
5047 err = -ENOMEM;
5048 dev_err(eth->dev, "failed to allocated dummy device\n");
5049 goto err_unreg_netdev;
5050 }
5051 netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx);
5052 netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx);
5053
5054 platform_set_drvdata(pdev, eth);
5055 schedule_delayed_work(ð->reset.monitor_work,
5056 MTK_DMA_MONITOR_TIMEOUT);
5057
5058 return 0;
5059
5060 err_unreg_netdev:
5061 mtk_unreg_dev(eth);
5062 err_deinit_ppe:
5063 mtk_ppe_deinit(eth);
5064 mtk_mdio_cleanup(eth);
5065 err_free_dev:
5066 mtk_free_dev(eth);
5067 err_deinit_hw:
5068 mtk_hw_deinit(eth);
5069 err_wed_exit:
5070 mtk_wed_exit();
5071 err_destroy_sgmii:
5072 mtk_sgmii_destroy(eth);
5073
5074 return err;
5075 }
5076
mtk_remove(struct platform_device * pdev)5077 static void mtk_remove(struct platform_device *pdev)
5078 {
5079 struct mtk_eth *eth = platform_get_drvdata(pdev);
5080 struct mtk_mac *mac;
5081 int i;
5082
5083 /* stop all devices to make sure that dma is properly shut down */
5084 for (i = 0; i < MTK_MAX_DEVS; i++) {
5085 if (!eth->netdev[i])
5086 continue;
5087 mtk_stop(eth->netdev[i]);
5088 mac = netdev_priv(eth->netdev[i]);
5089 phylink_disconnect_phy(mac->phylink);
5090 }
5091
5092 mtk_wed_exit();
5093 mtk_hw_deinit(eth);
5094
5095 netif_napi_del(ð->tx_napi);
5096 netif_napi_del(ð->rx_napi);
5097 mtk_cleanup(eth);
5098 free_netdev(eth->dummy_dev);
5099 mtk_mdio_cleanup(eth);
5100 }
5101
5102 static const struct mtk_soc_data mt2701_data = {
5103 .reg_map = &mtk_reg_map,
5104 .caps = MT7623_CAPS | MTK_HWLRO,
5105 .hw_features = MTK_HW_FEATURES,
5106 .required_clks = MT7623_CLKS_BITMAP,
5107 .required_pctl = true,
5108 .version = 1,
5109 .tx = {
5110 .desc_size = sizeof(struct mtk_tx_dma),
5111 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5112 .dma_len_offset = 16,
5113 .dma_size = MTK_DMA_SIZE(2K),
5114 .fq_dma_size = MTK_DMA_SIZE(2K),
5115 },
5116 .rx = {
5117 .desc_size = sizeof(struct mtk_rx_dma),
5118 .irq_done_mask = MTK_RX_DONE_INT,
5119 .dma_l4_valid = RX_DMA_L4_VALID,
5120 .dma_size = MTK_DMA_SIZE(2K),
5121 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5122 .dma_len_offset = 16,
5123 },
5124 };
5125
5126 static const struct mtk_soc_data mt7621_data = {
5127 .reg_map = &mtk_reg_map,
5128 .caps = MT7621_CAPS,
5129 .hw_features = MTK_HW_FEATURES,
5130 .required_clks = MT7621_CLKS_BITMAP,
5131 .required_pctl = false,
5132 .version = 1,
5133 .offload_version = 1,
5134 .ppe_num = 1,
5135 .hash_offset = 2,
5136 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5137 .tx = {
5138 .desc_size = sizeof(struct mtk_tx_dma),
5139 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5140 .dma_len_offset = 16,
5141 .dma_size = MTK_DMA_SIZE(2K),
5142 .fq_dma_size = MTK_DMA_SIZE(2K),
5143 },
5144 .rx = {
5145 .desc_size = sizeof(struct mtk_rx_dma),
5146 .irq_done_mask = MTK_RX_DONE_INT,
5147 .dma_l4_valid = RX_DMA_L4_VALID,
5148 .dma_size = MTK_DMA_SIZE(2K),
5149 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5150 .dma_len_offset = 16,
5151 },
5152 };
5153
5154 static const struct mtk_soc_data mt7622_data = {
5155 .reg_map = &mtk_reg_map,
5156 .ana_rgc3 = 0x2028,
5157 .caps = MT7622_CAPS | MTK_HWLRO,
5158 .hw_features = MTK_HW_FEATURES,
5159 .required_clks = MT7622_CLKS_BITMAP,
5160 .required_pctl = false,
5161 .version = 1,
5162 .offload_version = 2,
5163 .ppe_num = 1,
5164 .hash_offset = 2,
5165 .has_accounting = true,
5166 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5167 .tx = {
5168 .desc_size = sizeof(struct mtk_tx_dma),
5169 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5170 .dma_len_offset = 16,
5171 .dma_size = MTK_DMA_SIZE(2K),
5172 .fq_dma_size = MTK_DMA_SIZE(2K),
5173 },
5174 .rx = {
5175 .desc_size = sizeof(struct mtk_rx_dma),
5176 .irq_done_mask = MTK_RX_DONE_INT,
5177 .dma_l4_valid = RX_DMA_L4_VALID,
5178 .dma_size = MTK_DMA_SIZE(2K),
5179 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5180 .dma_len_offset = 16,
5181 },
5182 };
5183
5184 static const struct mtk_soc_data mt7623_data = {
5185 .reg_map = &mtk_reg_map,
5186 .caps = MT7623_CAPS | MTK_HWLRO,
5187 .hw_features = MTK_HW_FEATURES,
5188 .required_clks = MT7623_CLKS_BITMAP,
5189 .required_pctl = true,
5190 .version = 1,
5191 .offload_version = 1,
5192 .ppe_num = 1,
5193 .hash_offset = 2,
5194 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5195 .disable_pll_modes = true,
5196 .tx = {
5197 .desc_size = sizeof(struct mtk_tx_dma),
5198 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5199 .dma_len_offset = 16,
5200 .dma_size = MTK_DMA_SIZE(2K),
5201 .fq_dma_size = MTK_DMA_SIZE(2K),
5202 },
5203 .rx = {
5204 .desc_size = sizeof(struct mtk_rx_dma),
5205 .irq_done_mask = MTK_RX_DONE_INT,
5206 .dma_l4_valid = RX_DMA_L4_VALID,
5207 .dma_size = MTK_DMA_SIZE(2K),
5208 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5209 .dma_len_offset = 16,
5210 },
5211 };
5212
5213 static const struct mtk_soc_data mt7629_data = {
5214 .reg_map = &mtk_reg_map,
5215 .ana_rgc3 = 0x128,
5216 .caps = MT7629_CAPS | MTK_HWLRO,
5217 .hw_features = MTK_HW_FEATURES,
5218 .required_clks = MT7629_CLKS_BITMAP,
5219 .required_pctl = false,
5220 .has_accounting = true,
5221 .version = 1,
5222 .tx = {
5223 .desc_size = sizeof(struct mtk_tx_dma),
5224 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5225 .dma_len_offset = 16,
5226 .dma_size = MTK_DMA_SIZE(2K),
5227 .fq_dma_size = MTK_DMA_SIZE(2K),
5228 },
5229 .rx = {
5230 .desc_size = sizeof(struct mtk_rx_dma),
5231 .irq_done_mask = MTK_RX_DONE_INT,
5232 .dma_l4_valid = RX_DMA_L4_VALID,
5233 .dma_size = MTK_DMA_SIZE(2K),
5234 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5235 .dma_len_offset = 16,
5236 },
5237 };
5238
5239 static const struct mtk_soc_data mt7981_data = {
5240 .reg_map = &mt7986_reg_map,
5241 .ana_rgc3 = 0x128,
5242 .caps = MT7981_CAPS,
5243 .hw_features = MTK_HW_FEATURES,
5244 .required_clks = MT7981_CLKS_BITMAP,
5245 .required_pctl = false,
5246 .version = 2,
5247 .offload_version = 2,
5248 .ppe_num = 2,
5249 .hash_offset = 4,
5250 .has_accounting = true,
5251 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5252 .tx = {
5253 .desc_size = sizeof(struct mtk_tx_dma_v2),
5254 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5255 .dma_len_offset = 8,
5256 .dma_size = MTK_DMA_SIZE(2K),
5257 .fq_dma_size = MTK_DMA_SIZE(2K),
5258 },
5259 .rx = {
5260 .desc_size = sizeof(struct mtk_rx_dma),
5261 .irq_done_mask = MTK_RX_DONE_INT,
5262 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5263 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5264 .dma_len_offset = 16,
5265 .dma_size = MTK_DMA_SIZE(2K),
5266 },
5267 };
5268
5269 static const struct mtk_soc_data mt7986_data = {
5270 .reg_map = &mt7986_reg_map,
5271 .ana_rgc3 = 0x128,
5272 .caps = MT7986_CAPS,
5273 .hw_features = MTK_HW_FEATURES,
5274 .required_clks = MT7986_CLKS_BITMAP,
5275 .required_pctl = false,
5276 .version = 2,
5277 .offload_version = 2,
5278 .ppe_num = 2,
5279 .hash_offset = 4,
5280 .has_accounting = true,
5281 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5282 .tx = {
5283 .desc_size = sizeof(struct mtk_tx_dma_v2),
5284 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5285 .dma_len_offset = 8,
5286 .dma_size = MTK_DMA_SIZE(2K),
5287 .fq_dma_size = MTK_DMA_SIZE(2K),
5288 },
5289 .rx = {
5290 .desc_size = sizeof(struct mtk_rx_dma),
5291 .irq_done_mask = MTK_RX_DONE_INT,
5292 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5293 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5294 .dma_len_offset = 16,
5295 .dma_size = MTK_DMA_SIZE(2K),
5296 },
5297 };
5298
5299 static const struct mtk_soc_data mt7988_data = {
5300 .reg_map = &mt7988_reg_map,
5301 .ana_rgc3 = 0x128,
5302 .caps = MT7988_CAPS,
5303 .hw_features = MTK_HW_FEATURES,
5304 .required_clks = MT7988_CLKS_BITMAP,
5305 .required_pctl = false,
5306 .version = 3,
5307 .offload_version = 2,
5308 .ppe_num = 3,
5309 .hash_offset = 4,
5310 .has_accounting = true,
5311 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5312 .tx = {
5313 .desc_size = sizeof(struct mtk_tx_dma_v2),
5314 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5315 .dma_len_offset = 8,
5316 .dma_size = MTK_DMA_SIZE(2K),
5317 .fq_dma_size = MTK_DMA_SIZE(4K),
5318 },
5319 .rx = {
5320 .desc_size = sizeof(struct mtk_rx_dma_v2),
5321 .irq_done_mask = MTK_RX_DONE_INT_V2,
5322 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5323 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5324 .dma_len_offset = 8,
5325 .dma_size = MTK_DMA_SIZE(2K),
5326 },
5327 };
5328
5329 static const struct mtk_soc_data rt5350_data = {
5330 .reg_map = &mt7628_reg_map,
5331 .caps = MT7628_CAPS,
5332 .hw_features = MTK_HW_FEATURES_MT7628,
5333 .required_clks = MT7628_CLKS_BITMAP,
5334 .required_pctl = false,
5335 .version = 1,
5336 .tx = {
5337 .desc_size = sizeof(struct mtk_tx_dma),
5338 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5339 .dma_len_offset = 16,
5340 .dma_size = MTK_DMA_SIZE(2K),
5341 },
5342 .rx = {
5343 .desc_size = sizeof(struct mtk_rx_dma),
5344 .irq_done_mask = MTK_RX_DONE_INT,
5345 .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5346 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5347 .dma_len_offset = 16,
5348 .dma_size = MTK_DMA_SIZE(2K),
5349 },
5350 };
5351
5352 const struct of_device_id of_mtk_match[] = {
5353 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5354 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5355 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5356 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5357 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5358 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5359 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5360 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5361 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5362 {},
5363 };
5364 MODULE_DEVICE_TABLE(of, of_mtk_match);
5365
5366 static struct platform_driver mtk_driver = {
5367 .probe = mtk_probe,
5368 .remove = mtk_remove,
5369 .driver = {
5370 .name = "mtk_soc_eth",
5371 .of_match_table = of_mtk_match,
5372 },
5373 };
5374
5375 module_platform_driver(mtk_driver);
5376
5377 MODULE_LICENSE("GPL");
5378 MODULE_AUTHOR("John Crispin <[email protected]>");
5379 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5380