1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <[email protected]>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112
113 #define STMMAC_DEFAULT_LPI_TIMER 1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120 * but allow user to force to use the chain instead of the ring
121 */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 u32 rxmode, u32 chan);
139
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 int ret = 0;
151
152 if (enabled) {
153 ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 if (ret)
155 return ret;
156 ret = clk_prepare_enable(priv->plat->pclk);
157 if (ret) {
158 clk_disable_unprepare(priv->plat->stmmac_clk);
159 return ret;
160 }
161 if (priv->plat->clks_config) {
162 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 if (ret) {
164 clk_disable_unprepare(priv->plat->stmmac_clk);
165 clk_disable_unprepare(priv->plat->pclk);
166 return ret;
167 }
168 }
169 } else {
170 clk_disable_unprepare(priv->plat->stmmac_clk);
171 clk_disable_unprepare(priv->plat->pclk);
172 if (priv->plat->clks_config)
173 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 }
175
176 return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179
180 /**
181 * stmmac_verify_args - verify the driver parameters.
182 * Description: it checks the driver parameters and set a default in case of
183 * errors.
184 */
stmmac_verify_args(void)185 static void stmmac_verify_args(void)
186 {
187 if (unlikely(watchdog < 0))
188 watchdog = TX_TIMEO;
189 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 buf_sz = DEFAULT_BUFSIZE;
191 if (unlikely(flow_ctrl > 1))
192 flow_ctrl = FLOW_AUTO;
193 else if (likely(flow_ctrl < 0))
194 flow_ctrl = FLOW_OFF;
195 if (unlikely((pause < 0) || (pause > 0xffff)))
196 pause = PAUSE_TIME;
197 }
198
__stmmac_disable_all_queues(struct stmmac_priv * priv)199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 u32 queue;
205
206 for (queue = 0; queue < maxq; queue++) {
207 struct stmmac_channel *ch = &priv->channel[queue];
208
209 if (stmmac_xdp_is_enabled(priv) &&
210 test_bit(queue, priv->af_xdp_zc_qps)) {
211 napi_disable(&ch->rxtx_napi);
212 continue;
213 }
214
215 if (queue < rx_queues_cnt)
216 napi_disable(&ch->rx_napi);
217 if (queue < tx_queues_cnt)
218 napi_disable(&ch->tx_napi);
219 }
220 }
221
222 /**
223 * stmmac_disable_all_queues - Disable all queues
224 * @priv: driver private structure
225 */
stmmac_disable_all_queues(struct stmmac_priv * priv)226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 struct stmmac_rx_queue *rx_q;
230 u32 queue;
231
232 /* synchronize_rcu() needed for pending XDP buffers to drain */
233 for (queue = 0; queue < rx_queues_cnt; queue++) {
234 rx_q = &priv->dma_conf.rx_queue[queue];
235 if (rx_q->xsk_pool) {
236 synchronize_rcu();
237 break;
238 }
239 }
240
241 __stmmac_disable_all_queues(priv);
242 }
243
244 /**
245 * stmmac_enable_all_queues - Enable all queues
246 * @priv: driver private structure
247 */
stmmac_enable_all_queues(struct stmmac_priv * priv)248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 u32 queue;
254
255 for (queue = 0; queue < maxq; queue++) {
256 struct stmmac_channel *ch = &priv->channel[queue];
257
258 if (stmmac_xdp_is_enabled(priv) &&
259 test_bit(queue, priv->af_xdp_zc_qps)) {
260 napi_enable(&ch->rxtx_napi);
261 continue;
262 }
263
264 if (queue < rx_queues_cnt)
265 napi_enable(&ch->rx_napi);
266 if (queue < tx_queues_cnt)
267 napi_enable(&ch->tx_napi);
268 }
269 }
270
stmmac_service_event_schedule(struct stmmac_priv * priv)271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 queue_work(priv->wq, &priv->service_task);
276 }
277
stmmac_global_err(struct stmmac_priv * priv)278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 netif_carrier_off(priv->dev);
281 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 stmmac_service_event_schedule(priv);
283 }
284
285 /**
286 * stmmac_clk_csr_set - dynamically set the MDC clock
287 * @priv: driver private structure
288 * Description: this is to dynamically set the MDC clock according to the csr
289 * clock input.
290 * Note:
291 * If a specific clk_csr value is passed from the platform
292 * this means that the CSR Clock Range selection cannot be
293 * changed at run-time and it is fixed (as reported in the driver
294 * documentation). Viceversa the driver will try to set the MDC
295 * clock dynamically according to the actual clock input.
296 */
stmmac_clk_csr_set(struct stmmac_priv * priv)297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 unsigned long clk_rate;
300
301 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302
303 /* Platform provided default clk_csr would be assumed valid
304 * for all other cases except for the below mentioned ones.
305 * For values higher than the IEEE 802.3 specified frequency
306 * we can not estimate the proper divider as it is not known
307 * the frequency of clk_csr_i. So we do not change the default
308 * divider.
309 */
310 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 if (clk_rate < CSR_F_35M)
312 priv->clk_csr = STMMAC_CSR_20_35M;
313 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 priv->clk_csr = STMMAC_CSR_35_60M;
315 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 priv->clk_csr = STMMAC_CSR_60_100M;
317 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 priv->clk_csr = STMMAC_CSR_100_150M;
319 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 priv->clk_csr = STMMAC_CSR_150_250M;
321 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 priv->clk_csr = STMMAC_CSR_250_300M;
323 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 priv->clk_csr = STMMAC_CSR_300_500M;
325 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 priv->clk_csr = STMMAC_CSR_500_800M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_disable_hw_lpi_timer(struct stmmac_priv * priv)393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397
stmmac_enable_hw_lpi_timer(struct stmmac_priv * priv)398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402
stmmac_eee_tx_busy(struct stmmac_priv * priv)403 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
404 {
405 u32 tx_cnt = priv->plat->tx_queues_to_use;
406 u32 queue;
407
408 /* check if all TX queues have the work finished */
409 for (queue = 0; queue < tx_cnt; queue++) {
410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
411
412 if (tx_q->dirty_tx != tx_q->cur_tx)
413 return true; /* still unfinished work */
414 }
415
416 return false;
417 }
418
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)419 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
420 {
421 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
422 }
423
424 /**
425 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
426 * @priv: driver private structure
427 * Description: this function is to verify and enter in LPI mode in case of
428 * EEE.
429 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)430 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
431 {
432 if (stmmac_eee_tx_busy(priv)) {
433 stmmac_restart_sw_lpi_timer(priv);
434 return;
435 }
436
437 /* Check and enter in LPI mode */
438 if (!priv->tx_path_in_lpi_mode)
439 stmmac_set_eee_mode(priv, priv->hw,
440 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
441 }
442
443 /**
444 * stmmac_stop_sw_lpi - stop transmitting LPI
445 * @priv: driver private structure
446 * Description: When using software-controlled LPI, stop transmitting LPI state.
447 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)448 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
449 {
450 stmmac_reset_eee_mode(priv, priv->hw);
451 del_timer_sync(&priv->eee_ctrl_timer);
452 priv->tx_path_in_lpi_mode = false;
453 }
454
455 /**
456 * stmmac_eee_ctrl_timer - EEE TX SW timer.
457 * @t: timer_list struct containing private info
458 * Description:
459 * if there is no data transfer and if we are not in LPI state,
460 * then MAC Transmitter can be moved to LPI state.
461 */
stmmac_eee_ctrl_timer(struct timer_list * t)462 static void stmmac_eee_ctrl_timer(struct timer_list *t)
463 {
464 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
465
466 stmmac_try_to_start_sw_lpi(priv);
467 }
468
469 /**
470 * stmmac_eee_init - init EEE
471 * @priv: driver private structure
472 * @active: indicates whether EEE should be enabled.
473 * Description:
474 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
475 * can also manage EEE, this function enable the LPI state and start related
476 * timer.
477 */
stmmac_eee_init(struct stmmac_priv * priv,bool active)478 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
479 {
480 priv->eee_active = active;
481
482 /* Check if MAC core supports the EEE feature. */
483 if (!priv->dma_cap.eee) {
484 priv->eee_enabled = false;
485 return;
486 }
487
488 mutex_lock(&priv->lock);
489
490 /* Check if it needs to be deactivated */
491 if (!priv->eee_active) {
492 if (priv->eee_enabled) {
493 netdev_dbg(priv->dev, "disable EEE\n");
494 priv->eee_sw_timer_en = false;
495 stmmac_disable_hw_lpi_timer(priv);
496 del_timer_sync(&priv->eee_ctrl_timer);
497 stmmac_set_eee_timer(priv, priv->hw, 0,
498 STMMAC_DEFAULT_TWT_LS);
499 if (priv->hw->xpcs)
500 xpcs_config_eee(priv->hw->xpcs,
501 priv->plat->mult_fact_100ns,
502 false);
503 }
504 priv->eee_enabled = false;
505 mutex_unlock(&priv->lock);
506 return;
507 }
508
509 if (priv->eee_active && !priv->eee_enabled) {
510 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
511 STMMAC_DEFAULT_TWT_LS);
512 if (priv->hw->xpcs)
513 xpcs_config_eee(priv->hw->xpcs,
514 priv->plat->mult_fact_100ns,
515 true);
516 }
517
518 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
519 /* Use hardware LPI mode */
520 del_timer_sync(&priv->eee_ctrl_timer);
521 priv->tx_path_in_lpi_mode = false;
522 priv->eee_sw_timer_en = false;
523 stmmac_enable_hw_lpi_timer(priv);
524 } else {
525 /* Use software LPI mode */
526 priv->eee_sw_timer_en = true;
527 stmmac_disable_hw_lpi_timer(priv);
528 stmmac_restart_sw_lpi_timer(priv);
529 }
530
531 priv->eee_enabled = true;
532
533 mutex_unlock(&priv->lock);
534 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
535 }
536
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
538 * @priv: driver private structure
539 * @p : descriptor pointer
540 * @skb : the socket buffer
541 * Description :
542 * This function will read timestamp from the descriptor & pass it to stack.
543 * and also perform some sanity checks.
544 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)545 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
546 struct dma_desc *p, struct sk_buff *skb)
547 {
548 struct skb_shared_hwtstamps shhwtstamp;
549 bool found = false;
550 u64 ns = 0;
551
552 if (!priv->hwts_tx_en)
553 return;
554
555 /* exit if skb doesn't support hw tstamp */
556 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
557 return;
558
559 /* check tx tstamp status */
560 if (stmmac_get_tx_timestamp_status(priv, p)) {
561 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
562 found = true;
563 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
564 found = true;
565 }
566
567 if (found) {
568 ns -= priv->plat->cdc_error_adj;
569
570 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
571 shhwtstamp.hwtstamp = ns_to_ktime(ns);
572
573 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
574 /* pass tstamp to stack */
575 skb_tstamp_tx(skb, &shhwtstamp);
576 }
577 }
578
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
580 * @priv: driver private structure
581 * @p : descriptor pointer
582 * @np : next descriptor pointer
583 * @skb : the socket buffer
584 * Description :
585 * This function will read received packet's timestamp from the descriptor
586 * and pass it to stack. It also perform some sanity checks.
587 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)588 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
589 struct dma_desc *np, struct sk_buff *skb)
590 {
591 struct skb_shared_hwtstamps *shhwtstamp = NULL;
592 struct dma_desc *desc = p;
593 u64 ns = 0;
594
595 if (!priv->hwts_rx_en)
596 return;
597 /* For GMAC4, the valid timestamp is from CTX next desc. */
598 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
599 desc = np;
600
601 /* Check if timestamp is available */
602 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
603 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
604
605 ns -= priv->plat->cdc_error_adj;
606
607 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
608 shhwtstamp = skb_hwtstamps(skb);
609 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
610 shhwtstamp->hwtstamp = ns_to_ktime(ns);
611 } else {
612 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
613 }
614 }
615
616 /**
617 * stmmac_hwtstamp_set - control hardware timestamping.
618 * @dev: device pointer.
619 * @ifr: An IOCTL specific structure, that can contain a pointer to
620 * a proprietary structure used to pass information to the driver.
621 * Description:
622 * This function configures the MAC to enable/disable both outgoing(TX)
623 * and incoming(RX) packets time stamping based on user input.
624 * Return Value:
625 * 0 on success and an appropriate -ve integer on failure.
626 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)627 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
628 {
629 struct stmmac_priv *priv = netdev_priv(dev);
630 struct hwtstamp_config config;
631 u32 ptp_v2 = 0;
632 u32 tstamp_all = 0;
633 u32 ptp_over_ipv4_udp = 0;
634 u32 ptp_over_ipv6_udp = 0;
635 u32 ptp_over_ethernet = 0;
636 u32 snap_type_sel = 0;
637 u32 ts_master_en = 0;
638 u32 ts_event_en = 0;
639
640 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
641 netdev_alert(priv->dev, "No support for HW time stamping\n");
642 priv->hwts_tx_en = 0;
643 priv->hwts_rx_en = 0;
644
645 return -EOPNOTSUPP;
646 }
647
648 if (copy_from_user(&config, ifr->ifr_data,
649 sizeof(config)))
650 return -EFAULT;
651
652 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
653 __func__, config.flags, config.tx_type, config.rx_filter);
654
655 if (config.tx_type != HWTSTAMP_TX_OFF &&
656 config.tx_type != HWTSTAMP_TX_ON)
657 return -ERANGE;
658
659 if (priv->adv_ts) {
660 switch (config.rx_filter) {
661 case HWTSTAMP_FILTER_NONE:
662 /* time stamp no incoming packet at all */
663 config.rx_filter = HWTSTAMP_FILTER_NONE;
664 break;
665
666 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
667 /* PTP v1, UDP, any kind of event packet */
668 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
669 /* 'xmac' hardware can support Sync, Pdelay_Req and
670 * Pdelay_resp by setting bit14 and bits17/16 to 01
671 * This leaves Delay_Req timestamps out.
672 * Enable all events *and* general purpose message
673 * timestamping
674 */
675 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
676 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 break;
679
680 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
681 /* PTP v1, UDP, Sync packet */
682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
683 /* take time stamp for SYNC messages only */
684 ts_event_en = PTP_TCR_TSEVNTENA;
685
686 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 break;
689
690 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
691 /* PTP v1, UDP, Delay_req packet */
692 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
693 /* take time stamp for Delay_Req messages only */
694 ts_master_en = PTP_TCR_TSMSTRENA;
695 ts_event_en = PTP_TCR_TSEVNTENA;
696
697 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 break;
700
701 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
702 /* PTP v2, UDP, any kind of event packet */
703 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
704 ptp_v2 = PTP_TCR_TSVER2ENA;
705 /* take time stamp for all event messages */
706 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
707
708 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 break;
711
712 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
713 /* PTP v2, UDP, Sync packet */
714 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
715 ptp_v2 = PTP_TCR_TSVER2ENA;
716 /* take time stamp for SYNC messages only */
717 ts_event_en = PTP_TCR_TSEVNTENA;
718
719 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 break;
722
723 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
724 /* PTP v2, UDP, Delay_req packet */
725 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
726 ptp_v2 = PTP_TCR_TSVER2ENA;
727 /* take time stamp for Delay_Req messages only */
728 ts_master_en = PTP_TCR_TSMSTRENA;
729 ts_event_en = PTP_TCR_TSEVNTENA;
730
731 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 break;
734
735 case HWTSTAMP_FILTER_PTP_V2_EVENT:
736 /* PTP v2/802.AS1 any layer, any kind of event packet */
737 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
738 ptp_v2 = PTP_TCR_TSVER2ENA;
739 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
740 if (priv->synopsys_id < DWMAC_CORE_4_10)
741 ts_event_en = PTP_TCR_TSEVNTENA;
742 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 ptp_over_ethernet = PTP_TCR_TSIPENA;
745 break;
746
747 case HWTSTAMP_FILTER_PTP_V2_SYNC:
748 /* PTP v2/802.AS1, any layer, Sync packet */
749 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
750 ptp_v2 = PTP_TCR_TSVER2ENA;
751 /* take time stamp for SYNC messages only */
752 ts_event_en = PTP_TCR_TSEVNTENA;
753
754 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 ptp_over_ethernet = PTP_TCR_TSIPENA;
757 break;
758
759 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
760 /* PTP v2/802.AS1, any layer, Delay_req packet */
761 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
762 ptp_v2 = PTP_TCR_TSVER2ENA;
763 /* take time stamp for Delay_Req messages only */
764 ts_master_en = PTP_TCR_TSMSTRENA;
765 ts_event_en = PTP_TCR_TSEVNTENA;
766
767 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
768 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
769 ptp_over_ethernet = PTP_TCR_TSIPENA;
770 break;
771
772 case HWTSTAMP_FILTER_NTP_ALL:
773 case HWTSTAMP_FILTER_ALL:
774 /* time stamp any incoming packet */
775 config.rx_filter = HWTSTAMP_FILTER_ALL;
776 tstamp_all = PTP_TCR_TSENALL;
777 break;
778
779 default:
780 return -ERANGE;
781 }
782 } else {
783 switch (config.rx_filter) {
784 case HWTSTAMP_FILTER_NONE:
785 config.rx_filter = HWTSTAMP_FILTER_NONE;
786 break;
787 default:
788 /* PTP v1, UDP, any kind of event packet */
789 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
790 break;
791 }
792 }
793 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
794 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
795
796 priv->systime_flags = STMMAC_HWTS_ACTIVE;
797
798 if (priv->hwts_tx_en || priv->hwts_rx_en) {
799 priv->systime_flags |= tstamp_all | ptp_v2 |
800 ptp_over_ethernet | ptp_over_ipv6_udp |
801 ptp_over_ipv4_udp | ts_event_en |
802 ts_master_en | snap_type_sel;
803 }
804
805 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
806
807 memcpy(&priv->tstamp_config, &config, sizeof(config));
808
809 return copy_to_user(ifr->ifr_data, &config,
810 sizeof(config)) ? -EFAULT : 0;
811 }
812
813 /**
814 * stmmac_hwtstamp_get - read hardware timestamping.
815 * @dev: device pointer.
816 * @ifr: An IOCTL specific structure, that can contain a pointer to
817 * a proprietary structure used to pass information to the driver.
818 * Description:
819 * This function obtain the current hardware timestamping settings
820 * as requested.
821 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)822 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
823 {
824 struct stmmac_priv *priv = netdev_priv(dev);
825 struct hwtstamp_config *config = &priv->tstamp_config;
826
827 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
828 return -EOPNOTSUPP;
829
830 return copy_to_user(ifr->ifr_data, config,
831 sizeof(*config)) ? -EFAULT : 0;
832 }
833
834 /**
835 * stmmac_init_tstamp_counter - init hardware timestamping counter
836 * @priv: driver private structure
837 * @systime_flags: timestamping flags
838 * Description:
839 * Initialize hardware counter for packet timestamping.
840 * This is valid as long as the interface is open and not suspended.
841 * Will be rerun after resuming from suspend, case in which the timestamping
842 * flags updated by stmmac_hwtstamp_set() also need to be restored.
843 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)844 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
845 {
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 struct timespec64 now;
848 u32 sec_inc = 0;
849 u64 temp = 0;
850
851 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 return -EOPNOTSUPP;
853
854 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
855 priv->systime_flags = systime_flags;
856
857 /* program Sub Second Increment reg */
858 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
859 priv->plat->clk_ptp_rate,
860 xmac, &sec_inc);
861 temp = div_u64(1000000000ULL, sec_inc);
862
863 /* Store sub second increment for later use */
864 priv->sub_second_inc = sec_inc;
865
866 /* calculate default added value:
867 * formula is :
868 * addend = (2^32)/freq_div_ratio;
869 * where, freq_div_ratio = 1e9ns/sec_inc
870 */
871 temp = (u64)(temp << 32);
872 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
873 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
874
875 /* initialize system time */
876 ktime_get_real_ts64(&now);
877
878 /* lower 32 bits of tv_sec are safe until y2106 */
879 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
880
881 return 0;
882 }
883 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
884
885 /**
886 * stmmac_init_ptp - init PTP
887 * @priv: driver private structure
888 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
889 * This is done by looking at the HW cap. register.
890 * This function also registers the ptp driver.
891 */
stmmac_init_ptp(struct stmmac_priv * priv)892 static int stmmac_init_ptp(struct stmmac_priv *priv)
893 {
894 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
895 int ret;
896
897 if (priv->plat->ptp_clk_freq_config)
898 priv->plat->ptp_clk_freq_config(priv);
899
900 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
901 if (ret)
902 return ret;
903
904 priv->adv_ts = 0;
905 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
906 if (xmac && priv->dma_cap.atime_stamp)
907 priv->adv_ts = 1;
908 /* Dwmac 3.x core with extend_desc can support adv_ts */
909 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
910 priv->adv_ts = 1;
911
912 if (priv->dma_cap.time_stamp)
913 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
914
915 if (priv->adv_ts)
916 netdev_info(priv->dev,
917 "IEEE 1588-2008 Advanced Timestamp supported\n");
918
919 priv->hwts_tx_en = 0;
920 priv->hwts_rx_en = 0;
921
922 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
923 stmmac_hwtstamp_correct_latency(priv, priv);
924
925 return 0;
926 }
927
stmmac_release_ptp(struct stmmac_priv * priv)928 static void stmmac_release_ptp(struct stmmac_priv *priv)
929 {
930 clk_disable_unprepare(priv->plat->clk_ptp_ref);
931 stmmac_ptp_unregister(priv);
932 }
933
934 /**
935 * stmmac_mac_flow_ctrl - Configure flow control in all queues
936 * @priv: driver private structure
937 * @duplex: duplex passed to the next function
938 * Description: It is used for configuring the flow control in all queues
939 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)940 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
941 {
942 u32 tx_cnt = priv->plat->tx_queues_to_use;
943
944 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
945 priv->pause, tx_cnt);
946 }
947
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)948 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
949 phy_interface_t interface)
950 {
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952
953 /* Refresh the MAC-specific capabilities */
954 stmmac_mac_update_caps(priv);
955
956 config->mac_capabilities = priv->hw->link.caps;
957
958 if (priv->plat->max_speed)
959 phylink_limit_mac_speed(config, priv->plat->max_speed);
960
961 return config->mac_capabilities;
962 }
963
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)964 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
965 phy_interface_t interface)
966 {
967 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 struct phylink_pcs *pcs;
969
970 if (priv->plat->select_pcs) {
971 pcs = priv->plat->select_pcs(priv, interface);
972 if (!IS_ERR(pcs))
973 return pcs;
974 }
975
976 return NULL;
977 }
978
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)979 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
980 const struct phylink_link_state *state)
981 {
982 /* Nothing to do, xpcs_config() handles everything */
983 }
984
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)985 static void stmmac_mac_link_down(struct phylink_config *config,
986 unsigned int mode, phy_interface_t interface)
987 {
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989
990 stmmac_mac_set(priv, priv->ioaddr, false);
991 if (priv->dma_cap.eee)
992 stmmac_set_eee_pls(priv, priv->hw, false);
993
994 if (stmmac_fpe_supported(priv))
995 stmmac_fpe_link_state_handle(priv, false);
996 }
997
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)998 static void stmmac_mac_link_up(struct phylink_config *config,
999 struct phy_device *phy,
1000 unsigned int mode, phy_interface_t interface,
1001 int speed, int duplex,
1002 bool tx_pause, bool rx_pause)
1003 {
1004 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005 u32 old_ctrl, ctrl;
1006
1007 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1008 priv->plat->serdes_powerup)
1009 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1010
1011 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013
1014 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 switch (speed) {
1016 case SPEED_10000:
1017 ctrl |= priv->hw->link.xgmii.speed10000;
1018 break;
1019 case SPEED_5000:
1020 ctrl |= priv->hw->link.xgmii.speed5000;
1021 break;
1022 case SPEED_2500:
1023 ctrl |= priv->hw->link.xgmii.speed2500;
1024 break;
1025 default:
1026 return;
1027 }
1028 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 switch (speed) {
1030 case SPEED_100000:
1031 ctrl |= priv->hw->link.xlgmii.speed100000;
1032 break;
1033 case SPEED_50000:
1034 ctrl |= priv->hw->link.xlgmii.speed50000;
1035 break;
1036 case SPEED_40000:
1037 ctrl |= priv->hw->link.xlgmii.speed40000;
1038 break;
1039 case SPEED_25000:
1040 ctrl |= priv->hw->link.xlgmii.speed25000;
1041 break;
1042 case SPEED_10000:
1043 ctrl |= priv->hw->link.xgmii.speed10000;
1044 break;
1045 case SPEED_2500:
1046 ctrl |= priv->hw->link.speed2500;
1047 break;
1048 case SPEED_1000:
1049 ctrl |= priv->hw->link.speed1000;
1050 break;
1051 default:
1052 return;
1053 }
1054 } else {
1055 switch (speed) {
1056 case SPEED_2500:
1057 ctrl |= priv->hw->link.speed2500;
1058 break;
1059 case SPEED_1000:
1060 ctrl |= priv->hw->link.speed1000;
1061 break;
1062 case SPEED_100:
1063 ctrl |= priv->hw->link.speed100;
1064 break;
1065 case SPEED_10:
1066 ctrl |= priv->hw->link.speed10;
1067 break;
1068 default:
1069 return;
1070 }
1071 }
1072
1073 priv->speed = speed;
1074
1075 if (priv->plat->fix_mac_speed)
1076 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077
1078 if (!duplex)
1079 ctrl &= ~priv->hw->link.duplex;
1080 else
1081 ctrl |= priv->hw->link.duplex;
1082
1083 /* Flow Control operation */
1084 if (rx_pause && tx_pause)
1085 priv->flow_ctrl = FLOW_AUTO;
1086 else if (rx_pause && !tx_pause)
1087 priv->flow_ctrl = FLOW_RX;
1088 else if (!rx_pause && tx_pause)
1089 priv->flow_ctrl = FLOW_TX;
1090 else
1091 priv->flow_ctrl = FLOW_OFF;
1092
1093 stmmac_mac_flow_ctrl(priv, duplex);
1094
1095 if (ctrl != old_ctrl)
1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097
1098 stmmac_mac_set(priv, priv->ioaddr, true);
1099 if (priv->dma_cap.eee)
1100 stmmac_set_eee_pls(priv, priv->hw, true);
1101
1102 if (stmmac_fpe_supported(priv))
1103 stmmac_fpe_link_state_handle(priv, true);
1104
1105 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1106 stmmac_hwtstamp_correct_latency(priv, priv);
1107 }
1108
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1109 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1110 {
1111 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1112
1113 stmmac_eee_init(priv, false);
1114 }
1115
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1116 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1117 bool tx_clk_stop)
1118 {
1119 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1120
1121 priv->tx_lpi_timer = timer;
1122 stmmac_eee_init(priv, true);
1123
1124 return 0;
1125 }
1126
1127 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1128 .mac_get_caps = stmmac_mac_get_caps,
1129 .mac_select_pcs = stmmac_mac_select_pcs,
1130 .mac_config = stmmac_mac_config,
1131 .mac_link_down = stmmac_mac_link_down,
1132 .mac_link_up = stmmac_mac_link_up,
1133 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1134 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1135 };
1136
1137 /**
1138 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1139 * @priv: driver private structure
1140 * Description: this is to verify if the HW supports the PCS.
1141 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1142 * configured for the TBI, RTBI, or SGMII PHY interface.
1143 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1144 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1145 {
1146 int interface = priv->plat->mac_interface;
1147
1148 if (priv->dma_cap.pcs) {
1149 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1150 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1151 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1152 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1153 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1154 priv->hw->pcs = STMMAC_PCS_RGMII;
1155 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1156 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1157 priv->hw->pcs = STMMAC_PCS_SGMII;
1158 }
1159 }
1160 }
1161
1162 /**
1163 * stmmac_init_phy - PHY initialization
1164 * @dev: net device structure
1165 * Description: it initializes the driver's PHY state, and attaches the PHY
1166 * to the mac driver.
1167 * Return value:
1168 * 0 on success
1169 */
stmmac_init_phy(struct net_device * dev)1170 static int stmmac_init_phy(struct net_device *dev)
1171 {
1172 struct stmmac_priv *priv = netdev_priv(dev);
1173 struct fwnode_handle *phy_fwnode;
1174 struct fwnode_handle *fwnode;
1175 int ret;
1176
1177 if (!phylink_expects_phy(priv->phylink))
1178 return 0;
1179
1180 fwnode = priv->plat->port_node;
1181 if (!fwnode)
1182 fwnode = dev_fwnode(priv->device);
1183
1184 if (fwnode)
1185 phy_fwnode = fwnode_get_phy_node(fwnode);
1186 else
1187 phy_fwnode = NULL;
1188
1189 /* Some DT bindings do not set-up the PHY handle. Let's try to
1190 * manually parse it
1191 */
1192 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1193 int addr = priv->plat->phy_addr;
1194 struct phy_device *phydev;
1195
1196 if (addr < 0) {
1197 netdev_err(priv->dev, "no phy found\n");
1198 return -ENODEV;
1199 }
1200
1201 phydev = mdiobus_get_phy(priv->mii, addr);
1202 if (!phydev) {
1203 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1204 return -ENODEV;
1205 }
1206
1207 ret = phylink_connect_phy(priv->phylink, phydev);
1208 } else {
1209 fwnode_handle_put(phy_fwnode);
1210 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1211 }
1212
1213 if (ret == 0) {
1214 struct ethtool_keee eee;
1215
1216 /* Configure phylib's copy of the LPI timer. Normally,
1217 * phylink_config.lpi_timer_default would do this, but there is
1218 * a chance that userspace could change the eee_timer setting
1219 * via sysfs before the first open. Thus, preserve existing
1220 * behaviour.
1221 */
1222 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1223 eee.tx_lpi_timer = priv->tx_lpi_timer;
1224 phylink_ethtool_set_eee(priv->phylink, &eee);
1225 }
1226 }
1227
1228 if (!priv->plat->pmt) {
1229 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1230
1231 phylink_ethtool_get_wol(priv->phylink, &wol);
1232 device_set_wakeup_capable(priv->device, !!wol.supported);
1233 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1234 }
1235
1236 return ret;
1237 }
1238
stmmac_phy_setup(struct stmmac_priv * priv)1239 static int stmmac_phy_setup(struct stmmac_priv *priv)
1240 {
1241 struct stmmac_mdio_bus_data *mdio_bus_data;
1242 int mode = priv->plat->phy_interface;
1243 struct fwnode_handle *fwnode;
1244 struct phylink_pcs *pcs;
1245 struct phylink *phylink;
1246
1247 priv->phylink_config.dev = &priv->dev->dev;
1248 priv->phylink_config.type = PHYLINK_NETDEV;
1249 priv->phylink_config.mac_managed_pm = true;
1250
1251 /* Stmmac always requires an RX clock for hardware initialization */
1252 priv->phylink_config.mac_requires_rxc = true;
1253
1254 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1255 priv->phylink_config.eee_rx_clk_stop_enable = true;
1256
1257 mdio_bus_data = priv->plat->mdio_bus_data;
1258 if (mdio_bus_data)
1259 priv->phylink_config.default_an_inband =
1260 mdio_bus_data->default_an_inband;
1261
1262 /* Set the platform/firmware specified interface mode. Note, phylink
1263 * deals with the PHY interface mode, not the MAC interface mode.
1264 */
1265 __set_bit(mode, priv->phylink_config.supported_interfaces);
1266
1267 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1268 if (priv->hw->xpcs)
1269 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1270 else
1271 pcs = priv->hw->phylink_pcs;
1272
1273 if (pcs)
1274 phy_interface_or(priv->phylink_config.supported_interfaces,
1275 priv->phylink_config.supported_interfaces,
1276 pcs->supported_interfaces);
1277
1278 if (priv->dma_cap.eee) {
1279 /* Assume all supported interfaces also support LPI */
1280 memcpy(priv->phylink_config.lpi_interfaces,
1281 priv->phylink_config.supported_interfaces,
1282 sizeof(priv->phylink_config.lpi_interfaces));
1283
1284 /* All full duplex speeds above 100Mbps are supported */
1285 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1286 MAC_100FD;
1287 priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1288 priv->phylink_config.eee_enabled_default = true;
1289 }
1290
1291 fwnode = priv->plat->port_node;
1292 if (!fwnode)
1293 fwnode = dev_fwnode(priv->device);
1294
1295 phylink = phylink_create(&priv->phylink_config, fwnode,
1296 mode, &stmmac_phylink_mac_ops);
1297 if (IS_ERR(phylink))
1298 return PTR_ERR(phylink);
1299
1300 priv->phylink = phylink;
1301 return 0;
1302 }
1303
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1304 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1305 struct stmmac_dma_conf *dma_conf)
1306 {
1307 u32 rx_cnt = priv->plat->rx_queues_to_use;
1308 unsigned int desc_size;
1309 void *head_rx;
1310 u32 queue;
1311
1312 /* Display RX rings */
1313 for (queue = 0; queue < rx_cnt; queue++) {
1314 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1315
1316 pr_info("\tRX Queue %u rings\n", queue);
1317
1318 if (priv->extend_desc) {
1319 head_rx = (void *)rx_q->dma_erx;
1320 desc_size = sizeof(struct dma_extended_desc);
1321 } else {
1322 head_rx = (void *)rx_q->dma_rx;
1323 desc_size = sizeof(struct dma_desc);
1324 }
1325
1326 /* Display RX ring */
1327 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1328 rx_q->dma_rx_phy, desc_size);
1329 }
1330 }
1331
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1332 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1333 struct stmmac_dma_conf *dma_conf)
1334 {
1335 u32 tx_cnt = priv->plat->tx_queues_to_use;
1336 unsigned int desc_size;
1337 void *head_tx;
1338 u32 queue;
1339
1340 /* Display TX rings */
1341 for (queue = 0; queue < tx_cnt; queue++) {
1342 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1343
1344 pr_info("\tTX Queue %d rings\n", queue);
1345
1346 if (priv->extend_desc) {
1347 head_tx = (void *)tx_q->dma_etx;
1348 desc_size = sizeof(struct dma_extended_desc);
1349 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1350 head_tx = (void *)tx_q->dma_entx;
1351 desc_size = sizeof(struct dma_edesc);
1352 } else {
1353 head_tx = (void *)tx_q->dma_tx;
1354 desc_size = sizeof(struct dma_desc);
1355 }
1356
1357 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1358 tx_q->dma_tx_phy, desc_size);
1359 }
1360 }
1361
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1362 static void stmmac_display_rings(struct stmmac_priv *priv,
1363 struct stmmac_dma_conf *dma_conf)
1364 {
1365 /* Display RX ring */
1366 stmmac_display_rx_rings(priv, dma_conf);
1367
1368 /* Display TX ring */
1369 stmmac_display_tx_rings(priv, dma_conf);
1370 }
1371
stmmac_rx_offset(struct stmmac_priv * priv)1372 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1373 {
1374 if (stmmac_xdp_is_enabled(priv))
1375 return XDP_PACKET_HEADROOM;
1376
1377 return NET_SKB_PAD;
1378 }
1379
stmmac_set_bfsize(int mtu,int bufsize)1380 static int stmmac_set_bfsize(int mtu, int bufsize)
1381 {
1382 int ret = bufsize;
1383
1384 if (mtu >= BUF_SIZE_8KiB)
1385 ret = BUF_SIZE_16KiB;
1386 else if (mtu >= BUF_SIZE_4KiB)
1387 ret = BUF_SIZE_8KiB;
1388 else if (mtu >= BUF_SIZE_2KiB)
1389 ret = BUF_SIZE_4KiB;
1390 else if (mtu > DEFAULT_BUFSIZE)
1391 ret = BUF_SIZE_2KiB;
1392 else
1393 ret = DEFAULT_BUFSIZE;
1394
1395 return ret;
1396 }
1397
1398 /**
1399 * stmmac_clear_rx_descriptors - clear RX descriptors
1400 * @priv: driver private structure
1401 * @dma_conf: structure to take the dma data
1402 * @queue: RX queue index
1403 * Description: this function is called to clear the RX descriptors
1404 * in case of both basic and extended descriptors are used.
1405 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1406 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1407 struct stmmac_dma_conf *dma_conf,
1408 u32 queue)
1409 {
1410 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1411 int i;
1412
1413 /* Clear the RX descriptors */
1414 for (i = 0; i < dma_conf->dma_rx_size; i++)
1415 if (priv->extend_desc)
1416 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1417 priv->use_riwt, priv->mode,
1418 (i == dma_conf->dma_rx_size - 1),
1419 dma_conf->dma_buf_sz);
1420 else
1421 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1422 priv->use_riwt, priv->mode,
1423 (i == dma_conf->dma_rx_size - 1),
1424 dma_conf->dma_buf_sz);
1425 }
1426
1427 /**
1428 * stmmac_clear_tx_descriptors - clear tx descriptors
1429 * @priv: driver private structure
1430 * @dma_conf: structure to take the dma data
1431 * @queue: TX queue index.
1432 * Description: this function is called to clear the TX descriptors
1433 * in case of both basic and extended descriptors are used.
1434 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1435 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1436 struct stmmac_dma_conf *dma_conf,
1437 u32 queue)
1438 {
1439 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1440 int i;
1441
1442 /* Clear the TX descriptors */
1443 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1444 int last = (i == (dma_conf->dma_tx_size - 1));
1445 struct dma_desc *p;
1446
1447 if (priv->extend_desc)
1448 p = &tx_q->dma_etx[i].basic;
1449 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1450 p = &tx_q->dma_entx[i].basic;
1451 else
1452 p = &tx_q->dma_tx[i];
1453
1454 stmmac_init_tx_desc(priv, p, priv->mode, last);
1455 }
1456 }
1457
1458 /**
1459 * stmmac_clear_descriptors - clear descriptors
1460 * @priv: driver private structure
1461 * @dma_conf: structure to take the dma data
1462 * Description: this function is called to clear the TX and RX descriptors
1463 * in case of both basic and extended descriptors are used.
1464 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1465 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1466 struct stmmac_dma_conf *dma_conf)
1467 {
1468 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1469 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1470 u32 queue;
1471
1472 /* Clear the RX descriptors */
1473 for (queue = 0; queue < rx_queue_cnt; queue++)
1474 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1475
1476 /* Clear the TX descriptors */
1477 for (queue = 0; queue < tx_queue_cnt; queue++)
1478 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1479 }
1480
1481 /**
1482 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1483 * @priv: driver private structure
1484 * @dma_conf: structure to take the dma data
1485 * @p: descriptor pointer
1486 * @i: descriptor index
1487 * @flags: gfp flag
1488 * @queue: RX queue index
1489 * Description: this function is called to allocate a receive buffer, perform
1490 * the DMA mapping and init the descriptor.
1491 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1492 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1493 struct stmmac_dma_conf *dma_conf,
1494 struct dma_desc *p,
1495 int i, gfp_t flags, u32 queue)
1496 {
1497 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1498 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1500
1501 if (priv->dma_cap.host_dma_width <= 32)
1502 gfp |= GFP_DMA32;
1503
1504 if (!buf->page) {
1505 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1506 if (!buf->page)
1507 return -ENOMEM;
1508 buf->page_offset = stmmac_rx_offset(priv);
1509 }
1510
1511 if (priv->sph && !buf->sec_page) {
1512 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1513 if (!buf->sec_page)
1514 return -ENOMEM;
1515
1516 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1517 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1518 } else {
1519 buf->sec_page = NULL;
1520 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1521 }
1522
1523 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1524
1525 stmmac_set_desc_addr(priv, p, buf->addr);
1526 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1527 stmmac_init_desc3(priv, p);
1528
1529 return 0;
1530 }
1531
1532 /**
1533 * stmmac_free_rx_buffer - free RX dma buffers
1534 * @priv: private structure
1535 * @rx_q: RX queue
1536 * @i: buffer index.
1537 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1538 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1539 struct stmmac_rx_queue *rx_q,
1540 int i)
1541 {
1542 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1543
1544 if (buf->page)
1545 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1546 buf->page = NULL;
1547
1548 if (buf->sec_page)
1549 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1550 buf->sec_page = NULL;
1551 }
1552
1553 /**
1554 * stmmac_free_tx_buffer - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1558 * @i: buffer index.
1559 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1560 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1561 struct stmmac_dma_conf *dma_conf,
1562 u32 queue, int i)
1563 {
1564 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1565
1566 if (tx_q->tx_skbuff_dma[i].buf &&
1567 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1568 if (tx_q->tx_skbuff_dma[i].map_as_page)
1569 dma_unmap_page(priv->device,
1570 tx_q->tx_skbuff_dma[i].buf,
1571 tx_q->tx_skbuff_dma[i].len,
1572 DMA_TO_DEVICE);
1573 else
1574 dma_unmap_single(priv->device,
1575 tx_q->tx_skbuff_dma[i].buf,
1576 tx_q->tx_skbuff_dma[i].len,
1577 DMA_TO_DEVICE);
1578 }
1579
1580 if (tx_q->xdpf[i] &&
1581 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1582 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1583 xdp_return_frame(tx_q->xdpf[i]);
1584 tx_q->xdpf[i] = NULL;
1585 }
1586
1587 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1588 tx_q->xsk_frames_done++;
1589
1590 if (tx_q->tx_skbuff[i] &&
1591 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1592 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1593 tx_q->tx_skbuff[i] = NULL;
1594 }
1595
1596 tx_q->tx_skbuff_dma[i].buf = 0;
1597 tx_q->tx_skbuff_dma[i].map_as_page = false;
1598 }
1599
1600 /**
1601 * dma_free_rx_skbufs - free RX dma buffers
1602 * @priv: private structure
1603 * @dma_conf: structure to take the dma data
1604 * @queue: RX queue index
1605 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1607 struct stmmac_dma_conf *dma_conf,
1608 u32 queue)
1609 {
1610 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 int i;
1612
1613 for (i = 0; i < dma_conf->dma_rx_size; i++)
1614 stmmac_free_rx_buffer(priv, rx_q, i);
1615 }
1616
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1617 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1618 struct stmmac_dma_conf *dma_conf,
1619 u32 queue, gfp_t flags)
1620 {
1621 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1622 int i;
1623
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 struct dma_desc *p;
1626 int ret;
1627
1628 if (priv->extend_desc)
1629 p = &((rx_q->dma_erx + i)->basic);
1630 else
1631 p = rx_q->dma_rx + i;
1632
1633 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1634 queue);
1635 if (ret)
1636 return ret;
1637
1638 rx_q->buf_alloc_num++;
1639 }
1640
1641 return 0;
1642 }
1643
1644 /**
1645 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1646 * @priv: private structure
1647 * @dma_conf: structure to take the dma data
1648 * @queue: RX queue index
1649 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1650 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1651 struct stmmac_dma_conf *dma_conf,
1652 u32 queue)
1653 {
1654 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1655 int i;
1656
1657 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1658 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1659
1660 if (!buf->xdp)
1661 continue;
1662
1663 xsk_buff_free(buf->xdp);
1664 buf->xdp = NULL;
1665 }
1666 }
1667
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1668 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1669 struct stmmac_dma_conf *dma_conf,
1670 u32 queue)
1671 {
1672 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673 int i;
1674
1675 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1676 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1677 * use this macro to make sure no size violations.
1678 */
1679 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1680
1681 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1682 struct stmmac_rx_buffer *buf;
1683 dma_addr_t dma_addr;
1684 struct dma_desc *p;
1685
1686 if (priv->extend_desc)
1687 p = (struct dma_desc *)(rx_q->dma_erx + i);
1688 else
1689 p = rx_q->dma_rx + i;
1690
1691 buf = &rx_q->buf_pool[i];
1692
1693 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1694 if (!buf->xdp)
1695 return -ENOMEM;
1696
1697 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1698 stmmac_set_desc_addr(priv, p, dma_addr);
1699 rx_q->buf_alloc_num++;
1700 }
1701
1702 return 0;
1703 }
1704
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1705 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1706 {
1707 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1708 return NULL;
1709
1710 return xsk_get_pool_from_qid(priv->dev, queue);
1711 }
1712
1713 /**
1714 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1715 * @priv: driver private structure
1716 * @dma_conf: structure to take the dma data
1717 * @queue: RX queue index
1718 * @flags: gfp flag.
1719 * Description: this function initializes the DMA RX descriptors
1720 * and allocates the socket buffers. It supports the chained and ring
1721 * modes.
1722 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1723 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1724 struct stmmac_dma_conf *dma_conf,
1725 u32 queue, gfp_t flags)
1726 {
1727 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1728 int ret;
1729
1730 netif_dbg(priv, probe, priv->dev,
1731 "(%s) dma_rx_phy=0x%08x\n", __func__,
1732 (u32)rx_q->dma_rx_phy);
1733
1734 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1735
1736 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1737
1738 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1739
1740 if (rx_q->xsk_pool) {
1741 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1742 MEM_TYPE_XSK_BUFF_POOL,
1743 NULL));
1744 netdev_info(priv->dev,
1745 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1746 rx_q->queue_index);
1747 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1748 } else {
1749 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1750 MEM_TYPE_PAGE_POOL,
1751 rx_q->page_pool));
1752 netdev_info(priv->dev,
1753 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1754 rx_q->queue_index);
1755 }
1756
1757 if (rx_q->xsk_pool) {
1758 /* RX XDP ZC buffer pool may not be populated, e.g.
1759 * xdpsock TX-only.
1760 */
1761 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1762 } else {
1763 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1764 if (ret < 0)
1765 return -ENOMEM;
1766 }
1767
1768 /* Setup the chained descriptor addresses */
1769 if (priv->mode == STMMAC_CHAIN_MODE) {
1770 if (priv->extend_desc)
1771 stmmac_mode_init(priv, rx_q->dma_erx,
1772 rx_q->dma_rx_phy,
1773 dma_conf->dma_rx_size, 1);
1774 else
1775 stmmac_mode_init(priv, rx_q->dma_rx,
1776 rx_q->dma_rx_phy,
1777 dma_conf->dma_rx_size, 0);
1778 }
1779
1780 return 0;
1781 }
1782
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1783 static int init_dma_rx_desc_rings(struct net_device *dev,
1784 struct stmmac_dma_conf *dma_conf,
1785 gfp_t flags)
1786 {
1787 struct stmmac_priv *priv = netdev_priv(dev);
1788 u32 rx_count = priv->plat->rx_queues_to_use;
1789 int queue;
1790 int ret;
1791
1792 /* RX INITIALIZATION */
1793 netif_dbg(priv, probe, priv->dev,
1794 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1795
1796 for (queue = 0; queue < rx_count; queue++) {
1797 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1798 if (ret)
1799 goto err_init_rx_buffers;
1800 }
1801
1802 return 0;
1803
1804 err_init_rx_buffers:
1805 while (queue >= 0) {
1806 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1807
1808 if (rx_q->xsk_pool)
1809 dma_free_rx_xskbufs(priv, dma_conf, queue);
1810 else
1811 dma_free_rx_skbufs(priv, dma_conf, queue);
1812
1813 rx_q->buf_alloc_num = 0;
1814 rx_q->xsk_pool = NULL;
1815
1816 queue--;
1817 }
1818
1819 return ret;
1820 }
1821
1822 /**
1823 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1824 * @priv: driver private structure
1825 * @dma_conf: structure to take the dma data
1826 * @queue: TX queue index
1827 * Description: this function initializes the DMA TX descriptors
1828 * and allocates the socket buffers. It supports the chained and ring
1829 * modes.
1830 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1831 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1832 struct stmmac_dma_conf *dma_conf,
1833 u32 queue)
1834 {
1835 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1836 int i;
1837
1838 netif_dbg(priv, probe, priv->dev,
1839 "(%s) dma_tx_phy=0x%08x\n", __func__,
1840 (u32)tx_q->dma_tx_phy);
1841
1842 /* Setup the chained descriptor addresses */
1843 if (priv->mode == STMMAC_CHAIN_MODE) {
1844 if (priv->extend_desc)
1845 stmmac_mode_init(priv, tx_q->dma_etx,
1846 tx_q->dma_tx_phy,
1847 dma_conf->dma_tx_size, 1);
1848 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1849 stmmac_mode_init(priv, tx_q->dma_tx,
1850 tx_q->dma_tx_phy,
1851 dma_conf->dma_tx_size, 0);
1852 }
1853
1854 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1855
1856 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1857 struct dma_desc *p;
1858
1859 if (priv->extend_desc)
1860 p = &((tx_q->dma_etx + i)->basic);
1861 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1862 p = &((tx_q->dma_entx + i)->basic);
1863 else
1864 p = tx_q->dma_tx + i;
1865
1866 stmmac_clear_desc(priv, p);
1867
1868 tx_q->tx_skbuff_dma[i].buf = 0;
1869 tx_q->tx_skbuff_dma[i].map_as_page = false;
1870 tx_q->tx_skbuff_dma[i].len = 0;
1871 tx_q->tx_skbuff_dma[i].last_segment = false;
1872 tx_q->tx_skbuff[i] = NULL;
1873 }
1874
1875 return 0;
1876 }
1877
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1878 static int init_dma_tx_desc_rings(struct net_device *dev,
1879 struct stmmac_dma_conf *dma_conf)
1880 {
1881 struct stmmac_priv *priv = netdev_priv(dev);
1882 u32 tx_queue_cnt;
1883 u32 queue;
1884
1885 tx_queue_cnt = priv->plat->tx_queues_to_use;
1886
1887 for (queue = 0; queue < tx_queue_cnt; queue++)
1888 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1889
1890 return 0;
1891 }
1892
1893 /**
1894 * init_dma_desc_rings - init the RX/TX descriptor rings
1895 * @dev: net device structure
1896 * @dma_conf: structure to take the dma data
1897 * @flags: gfp flag.
1898 * Description: this function initializes the DMA RX/TX descriptors
1899 * and allocates the socket buffers. It supports the chained and ring
1900 * modes.
1901 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1902 static int init_dma_desc_rings(struct net_device *dev,
1903 struct stmmac_dma_conf *dma_conf,
1904 gfp_t flags)
1905 {
1906 struct stmmac_priv *priv = netdev_priv(dev);
1907 int ret;
1908
1909 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1910 if (ret)
1911 return ret;
1912
1913 ret = init_dma_tx_desc_rings(dev, dma_conf);
1914
1915 stmmac_clear_descriptors(priv, dma_conf);
1916
1917 if (netif_msg_hw(priv))
1918 stmmac_display_rings(priv, dma_conf);
1919
1920 return ret;
1921 }
1922
1923 /**
1924 * dma_free_tx_skbufs - free TX dma buffers
1925 * @priv: private structure
1926 * @dma_conf: structure to take the dma data
1927 * @queue: TX queue index
1928 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1929 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1930 struct stmmac_dma_conf *dma_conf,
1931 u32 queue)
1932 {
1933 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1934 int i;
1935
1936 tx_q->xsk_frames_done = 0;
1937
1938 for (i = 0; i < dma_conf->dma_tx_size; i++)
1939 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1940
1941 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1942 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1943 tx_q->xsk_frames_done = 0;
1944 tx_q->xsk_pool = NULL;
1945 }
1946 }
1947
1948 /**
1949 * stmmac_free_tx_skbufs - free TX skb buffers
1950 * @priv: private structure
1951 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1952 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1953 {
1954 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1955 u32 queue;
1956
1957 for (queue = 0; queue < tx_queue_cnt; queue++)
1958 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1959 }
1960
1961 /**
1962 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1963 * @priv: private structure
1964 * @dma_conf: structure to take the dma data
1965 * @queue: RX queue index
1966 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1967 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1968 struct stmmac_dma_conf *dma_conf,
1969 u32 queue)
1970 {
1971 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1972
1973 /* Release the DMA RX socket buffers */
1974 if (rx_q->xsk_pool)
1975 dma_free_rx_xskbufs(priv, dma_conf, queue);
1976 else
1977 dma_free_rx_skbufs(priv, dma_conf, queue);
1978
1979 rx_q->buf_alloc_num = 0;
1980 rx_q->xsk_pool = NULL;
1981
1982 /* Free DMA regions of consistent memory previously allocated */
1983 if (!priv->extend_desc)
1984 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1985 sizeof(struct dma_desc),
1986 rx_q->dma_rx, rx_q->dma_rx_phy);
1987 else
1988 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1989 sizeof(struct dma_extended_desc),
1990 rx_q->dma_erx, rx_q->dma_rx_phy);
1991
1992 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1993 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1994
1995 kfree(rx_q->buf_pool);
1996 if (rx_q->page_pool)
1997 page_pool_destroy(rx_q->page_pool);
1998 }
1999
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2000 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2001 struct stmmac_dma_conf *dma_conf)
2002 {
2003 u32 rx_count = priv->plat->rx_queues_to_use;
2004 u32 queue;
2005
2006 /* Free RX queue resources */
2007 for (queue = 0; queue < rx_count; queue++)
2008 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: TX queue index
2016 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2017 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2018 struct stmmac_dma_conf *dma_conf,
2019 u32 queue)
2020 {
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 size_t size;
2023 void *addr;
2024
2025 /* Release the DMA TX socket buffers */
2026 dma_free_tx_skbufs(priv, dma_conf, queue);
2027
2028 if (priv->extend_desc) {
2029 size = sizeof(struct dma_extended_desc);
2030 addr = tx_q->dma_etx;
2031 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2032 size = sizeof(struct dma_edesc);
2033 addr = tx_q->dma_entx;
2034 } else {
2035 size = sizeof(struct dma_desc);
2036 addr = tx_q->dma_tx;
2037 }
2038
2039 size *= dma_conf->dma_tx_size;
2040
2041 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2042
2043 kfree(tx_q->tx_skbuff_dma);
2044 kfree(tx_q->tx_skbuff);
2045 }
2046
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2047 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2048 struct stmmac_dma_conf *dma_conf)
2049 {
2050 u32 tx_count = priv->plat->tx_queues_to_use;
2051 u32 queue;
2052
2053 /* Free TX queue resources */
2054 for (queue = 0; queue < tx_count; queue++)
2055 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2056 }
2057
2058 /**
2059 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2060 * @priv: private structure
2061 * @dma_conf: structure to take the dma data
2062 * @queue: RX queue index
2063 * Description: according to which descriptor can be used (extend or basic)
2064 * this function allocates the resources for TX and RX paths. In case of
2065 * reception, for example, it pre-allocated the RX socket buffer in order to
2066 * allow zero-copy mechanism.
2067 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2068 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2069 struct stmmac_dma_conf *dma_conf,
2070 u32 queue)
2071 {
2072 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2073 struct stmmac_channel *ch = &priv->channel[queue];
2074 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2075 struct page_pool_params pp_params = { 0 };
2076 unsigned int dma_buf_sz_pad, num_pages;
2077 unsigned int napi_id;
2078 int ret;
2079
2080 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2081 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2082 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2083
2084 rx_q->queue_index = queue;
2085 rx_q->priv_data = priv;
2086 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2087
2088 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2089 pp_params.pool_size = dma_conf->dma_rx_size;
2090 pp_params.order = order_base_2(num_pages);
2091 pp_params.nid = dev_to_node(priv->device);
2092 pp_params.dev = priv->device;
2093 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2094 pp_params.offset = stmmac_rx_offset(priv);
2095 pp_params.max_len = dma_conf->dma_buf_sz;
2096
2097 if (priv->sph) {
2098 pp_params.offset = 0;
2099 pp_params.max_len += stmmac_rx_offset(priv);
2100 }
2101
2102 rx_q->page_pool = page_pool_create(&pp_params);
2103 if (IS_ERR(rx_q->page_pool)) {
2104 ret = PTR_ERR(rx_q->page_pool);
2105 rx_q->page_pool = NULL;
2106 return ret;
2107 }
2108
2109 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2110 sizeof(*rx_q->buf_pool),
2111 GFP_KERNEL);
2112 if (!rx_q->buf_pool)
2113 return -ENOMEM;
2114
2115 if (priv->extend_desc) {
2116 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2117 dma_conf->dma_rx_size *
2118 sizeof(struct dma_extended_desc),
2119 &rx_q->dma_rx_phy,
2120 GFP_KERNEL);
2121 if (!rx_q->dma_erx)
2122 return -ENOMEM;
2123
2124 } else {
2125 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2126 dma_conf->dma_rx_size *
2127 sizeof(struct dma_desc),
2128 &rx_q->dma_rx_phy,
2129 GFP_KERNEL);
2130 if (!rx_q->dma_rx)
2131 return -ENOMEM;
2132 }
2133
2134 if (stmmac_xdp_is_enabled(priv) &&
2135 test_bit(queue, priv->af_xdp_zc_qps))
2136 napi_id = ch->rxtx_napi.napi_id;
2137 else
2138 napi_id = ch->rx_napi.napi_id;
2139
2140 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2141 rx_q->queue_index,
2142 napi_id);
2143 if (ret) {
2144 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2145 return -EINVAL;
2146 }
2147
2148 return 0;
2149 }
2150
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2151 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2152 struct stmmac_dma_conf *dma_conf)
2153 {
2154 u32 rx_count = priv->plat->rx_queues_to_use;
2155 u32 queue;
2156 int ret;
2157
2158 /* RX queues buffers and DMA */
2159 for (queue = 0; queue < rx_count; queue++) {
2160 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2161 if (ret)
2162 goto err_dma;
2163 }
2164
2165 return 0;
2166
2167 err_dma:
2168 free_dma_rx_desc_resources(priv, dma_conf);
2169
2170 return ret;
2171 }
2172
2173 /**
2174 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2175 * @priv: private structure
2176 * @dma_conf: structure to take the dma data
2177 * @queue: TX queue index
2178 * Description: according to which descriptor can be used (extend or basic)
2179 * this function allocates the resources for TX and RX paths. In case of
2180 * reception, for example, it pre-allocated the RX socket buffer in order to
2181 * allow zero-copy mechanism.
2182 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2183 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2184 struct stmmac_dma_conf *dma_conf,
2185 u32 queue)
2186 {
2187 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2188 size_t size;
2189 void *addr;
2190
2191 tx_q->queue_index = queue;
2192 tx_q->priv_data = priv;
2193
2194 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2195 sizeof(*tx_q->tx_skbuff_dma),
2196 GFP_KERNEL);
2197 if (!tx_q->tx_skbuff_dma)
2198 return -ENOMEM;
2199
2200 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2201 sizeof(struct sk_buff *),
2202 GFP_KERNEL);
2203 if (!tx_q->tx_skbuff)
2204 return -ENOMEM;
2205
2206 if (priv->extend_desc)
2207 size = sizeof(struct dma_extended_desc);
2208 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2209 size = sizeof(struct dma_edesc);
2210 else
2211 size = sizeof(struct dma_desc);
2212
2213 size *= dma_conf->dma_tx_size;
2214
2215 addr = dma_alloc_coherent(priv->device, size,
2216 &tx_q->dma_tx_phy, GFP_KERNEL);
2217 if (!addr)
2218 return -ENOMEM;
2219
2220 if (priv->extend_desc)
2221 tx_q->dma_etx = addr;
2222 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2223 tx_q->dma_entx = addr;
2224 else
2225 tx_q->dma_tx = addr;
2226
2227 return 0;
2228 }
2229
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2230 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2231 struct stmmac_dma_conf *dma_conf)
2232 {
2233 u32 tx_count = priv->plat->tx_queues_to_use;
2234 u32 queue;
2235 int ret;
2236
2237 /* TX queues buffers and DMA */
2238 for (queue = 0; queue < tx_count; queue++) {
2239 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2240 if (ret)
2241 goto err_dma;
2242 }
2243
2244 return 0;
2245
2246 err_dma:
2247 free_dma_tx_desc_resources(priv, dma_conf);
2248 return ret;
2249 }
2250
2251 /**
2252 * alloc_dma_desc_resources - alloc TX/RX resources.
2253 * @priv: private structure
2254 * @dma_conf: structure to take the dma data
2255 * Description: according to which descriptor can be used (extend or basic)
2256 * this function allocates the resources for TX and RX paths. In case of
2257 * reception, for example, it pre-allocated the RX socket buffer in order to
2258 * allow zero-copy mechanism.
2259 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2260 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2261 struct stmmac_dma_conf *dma_conf)
2262 {
2263 /* RX Allocation */
2264 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2265
2266 if (ret)
2267 return ret;
2268
2269 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2270
2271 return ret;
2272 }
2273
2274 /**
2275 * free_dma_desc_resources - free dma desc resources
2276 * @priv: private structure
2277 * @dma_conf: structure to take the dma data
2278 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2279 static void free_dma_desc_resources(struct stmmac_priv *priv,
2280 struct stmmac_dma_conf *dma_conf)
2281 {
2282 /* Release the DMA TX socket buffers */
2283 free_dma_tx_desc_resources(priv, dma_conf);
2284
2285 /* Release the DMA RX socket buffers later
2286 * to ensure all pending XDP_TX buffers are returned.
2287 */
2288 free_dma_rx_desc_resources(priv, dma_conf);
2289 }
2290
2291 /**
2292 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2293 * @priv: driver private structure
2294 * Description: It is used for enabling the rx queues in the MAC
2295 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2296 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2297 {
2298 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299 int queue;
2300 u8 mode;
2301
2302 for (queue = 0; queue < rx_queues_count; queue++) {
2303 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2304 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2305 }
2306 }
2307
2308 /**
2309 * stmmac_start_rx_dma - start RX DMA channel
2310 * @priv: driver private structure
2311 * @chan: RX channel index
2312 * Description:
2313 * This starts a RX DMA channel
2314 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2315 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2316 {
2317 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2318 stmmac_start_rx(priv, priv->ioaddr, chan);
2319 }
2320
2321 /**
2322 * stmmac_start_tx_dma - start TX DMA channel
2323 * @priv: driver private structure
2324 * @chan: TX channel index
2325 * Description:
2326 * This starts a TX DMA channel
2327 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2328 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2329 {
2330 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2331 stmmac_start_tx(priv, priv->ioaddr, chan);
2332 }
2333
2334 /**
2335 * stmmac_stop_rx_dma - stop RX DMA channel
2336 * @priv: driver private structure
2337 * @chan: RX channel index
2338 * Description:
2339 * This stops a RX DMA channel
2340 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2341 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2342 {
2343 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2344 stmmac_stop_rx(priv, priv->ioaddr, chan);
2345 }
2346
2347 /**
2348 * stmmac_stop_tx_dma - stop TX DMA channel
2349 * @priv: driver private structure
2350 * @chan: TX channel index
2351 * Description:
2352 * This stops a TX DMA channel
2353 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2354 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2355 {
2356 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2357 stmmac_stop_tx(priv, priv->ioaddr, chan);
2358 }
2359
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2360 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2361 {
2362 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2363 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2364 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2365 u32 chan;
2366
2367 for (chan = 0; chan < dma_csr_ch; chan++) {
2368 struct stmmac_channel *ch = &priv->channel[chan];
2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&ch->lock, flags);
2372 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2373 spin_unlock_irqrestore(&ch->lock, flags);
2374 }
2375 }
2376
2377 /**
2378 * stmmac_start_all_dma - start all RX and TX DMA channels
2379 * @priv: driver private structure
2380 * Description:
2381 * This starts all the RX and TX DMA channels
2382 */
stmmac_start_all_dma(struct stmmac_priv * priv)2383 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2384 {
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 u32 chan = 0;
2388
2389 for (chan = 0; chan < rx_channels_count; chan++)
2390 stmmac_start_rx_dma(priv, chan);
2391
2392 for (chan = 0; chan < tx_channels_count; chan++)
2393 stmmac_start_tx_dma(priv, chan);
2394 }
2395
2396 /**
2397 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2398 * @priv: driver private structure
2399 * Description:
2400 * This stops the RX and TX DMA channels
2401 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2402 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2403 {
2404 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2405 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2406 u32 chan = 0;
2407
2408 for (chan = 0; chan < rx_channels_count; chan++)
2409 stmmac_stop_rx_dma(priv, chan);
2410
2411 for (chan = 0; chan < tx_channels_count; chan++)
2412 stmmac_stop_tx_dma(priv, chan);
2413 }
2414
2415 /**
2416 * stmmac_dma_operation_mode - HW DMA operation mode
2417 * @priv: driver private structure
2418 * Description: it is used for configuring the DMA operation mode register in
2419 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2420 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2421 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2422 {
2423 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2424 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2425 int rxfifosz = priv->plat->rx_fifo_size;
2426 int txfifosz = priv->plat->tx_fifo_size;
2427 u32 txmode = 0;
2428 u32 rxmode = 0;
2429 u32 chan = 0;
2430 u8 qmode = 0;
2431
2432 if (rxfifosz == 0)
2433 rxfifosz = priv->dma_cap.rx_fifo_size;
2434 if (txfifosz == 0)
2435 txfifosz = priv->dma_cap.tx_fifo_size;
2436
2437 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2438 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2439 rxfifosz /= rx_channels_count;
2440 txfifosz /= tx_channels_count;
2441 }
2442
2443 if (priv->plat->force_thresh_dma_mode) {
2444 txmode = tc;
2445 rxmode = tc;
2446 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2447 /*
2448 * In case of GMAC, SF mode can be enabled
2449 * to perform the TX COE in HW. This depends on:
2450 * 1) TX COE if actually supported
2451 * 2) There is no bugged Jumbo frame support
2452 * that needs to not insert csum in the TDES.
2453 */
2454 txmode = SF_DMA_MODE;
2455 rxmode = SF_DMA_MODE;
2456 priv->xstats.threshold = SF_DMA_MODE;
2457 } else {
2458 txmode = tc;
2459 rxmode = SF_DMA_MODE;
2460 }
2461
2462 /* configure all channels */
2463 for (chan = 0; chan < rx_channels_count; chan++) {
2464 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2465 u32 buf_size;
2466
2467 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2468
2469 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2470 rxfifosz, qmode);
2471
2472 if (rx_q->xsk_pool) {
2473 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2474 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2475 buf_size,
2476 chan);
2477 } else {
2478 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2479 priv->dma_conf.dma_buf_sz,
2480 chan);
2481 }
2482 }
2483
2484 for (chan = 0; chan < tx_channels_count; chan++) {
2485 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2486
2487 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2488 txfifosz, qmode);
2489 }
2490 }
2491
stmmac_xsk_request_timestamp(void * _priv)2492 static void stmmac_xsk_request_timestamp(void *_priv)
2493 {
2494 struct stmmac_metadata_request *meta_req = _priv;
2495
2496 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2497 *meta_req->set_ic = true;
2498 }
2499
stmmac_xsk_fill_timestamp(void * _priv)2500 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2501 {
2502 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2503 struct stmmac_priv *priv = tx_compl->priv;
2504 struct dma_desc *desc = tx_compl->desc;
2505 bool found = false;
2506 u64 ns = 0;
2507
2508 if (!priv->hwts_tx_en)
2509 return 0;
2510
2511 /* check tx tstamp status */
2512 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2513 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2514 found = true;
2515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2516 found = true;
2517 }
2518
2519 if (found) {
2520 ns -= priv->plat->cdc_error_adj;
2521 return ns_to_ktime(ns);
2522 }
2523
2524 return 0;
2525 }
2526
2527 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2528 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2529 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2530 };
2531
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2532 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2533 {
2534 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2535 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2536 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2537 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2538 unsigned int entry = tx_q->cur_tx;
2539 struct dma_desc *tx_desc = NULL;
2540 struct xdp_desc xdp_desc;
2541 bool work_done = true;
2542 u32 tx_set_ic_bit = 0;
2543
2544 /* Avoids TX time-out as we are sharing with slow path */
2545 txq_trans_cond_update(nq);
2546
2547 budget = min(budget, stmmac_tx_avail(priv, queue));
2548
2549 while (budget-- > 0) {
2550 struct stmmac_metadata_request meta_req;
2551 struct xsk_tx_metadata *meta = NULL;
2552 dma_addr_t dma_addr;
2553 bool set_ic;
2554
2555 /* We are sharing with slow path and stop XSK TX desc submission when
2556 * available TX ring is less than threshold.
2557 */
2558 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2559 !netif_carrier_ok(priv->dev)) {
2560 work_done = false;
2561 break;
2562 }
2563
2564 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2565 break;
2566
2567 if (priv->est && priv->est->enable &&
2568 priv->est->max_sdu[queue] &&
2569 xdp_desc.len > priv->est->max_sdu[queue]) {
2570 priv->xstats.max_sdu_txq_drop[queue]++;
2571 continue;
2572 }
2573
2574 if (likely(priv->extend_desc))
2575 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2576 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577 tx_desc = &tx_q->dma_entx[entry].basic;
2578 else
2579 tx_desc = tx_q->dma_tx + entry;
2580
2581 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2582 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2583 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2584
2585 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2586
2587 /* To return XDP buffer to XSK pool, we simple call
2588 * xsk_tx_completed(), so we don't need to fill up
2589 * 'buf' and 'xdpf'.
2590 */
2591 tx_q->tx_skbuff_dma[entry].buf = 0;
2592 tx_q->xdpf[entry] = NULL;
2593
2594 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2595 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2596 tx_q->tx_skbuff_dma[entry].last_segment = true;
2597 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2598
2599 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2600
2601 tx_q->tx_count_frames++;
2602
2603 if (!priv->tx_coal_frames[queue])
2604 set_ic = false;
2605 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2606 set_ic = true;
2607 else
2608 set_ic = false;
2609
2610 meta_req.priv = priv;
2611 meta_req.tx_desc = tx_desc;
2612 meta_req.set_ic = &set_ic;
2613 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2614 &meta_req);
2615 if (set_ic) {
2616 tx_q->tx_count_frames = 0;
2617 stmmac_set_tx_ic(priv, tx_desc);
2618 tx_set_ic_bit++;
2619 }
2620
2621 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2622 true, priv->mode, true, true,
2623 xdp_desc.len);
2624
2625 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2626
2627 xsk_tx_metadata_to_compl(meta,
2628 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2629
2630 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2631 entry = tx_q->cur_tx;
2632 }
2633 u64_stats_update_begin(&txq_stats->napi_syncp);
2634 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2635 u64_stats_update_end(&txq_stats->napi_syncp);
2636
2637 if (tx_desc) {
2638 stmmac_flush_tx_descriptors(priv, queue);
2639 xsk_tx_release(pool);
2640 }
2641
2642 /* Return true if all of the 3 conditions are met
2643 * a) TX Budget is still available
2644 * b) work_done = true when XSK TX desc peek is empty (no more
2645 * pending XSK TX for transmission)
2646 */
2647 return !!budget && work_done;
2648 }
2649
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2650 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2651 {
2652 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2653 tc += 64;
2654
2655 if (priv->plat->force_thresh_dma_mode)
2656 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2657 else
2658 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2659 chan);
2660
2661 priv->xstats.threshold = tc;
2662 }
2663 }
2664
2665 /**
2666 * stmmac_tx_clean - to manage the transmission completion
2667 * @priv: driver private structure
2668 * @budget: napi budget limiting this functions packet handling
2669 * @queue: TX queue index
2670 * @pending_packets: signal to arm the TX coal timer
2671 * Description: it reclaims the transmit resources after transmission completes.
2672 * If some packets still needs to be handled, due to TX coalesce, set
2673 * pending_packets to true to make NAPI arm the TX coal timer.
2674 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2675 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2676 bool *pending_packets)
2677 {
2678 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2679 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2680 unsigned int bytes_compl = 0, pkts_compl = 0;
2681 unsigned int entry, xmits = 0, count = 0;
2682 u32 tx_packets = 0, tx_errors = 0;
2683
2684 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2685
2686 tx_q->xsk_frames_done = 0;
2687
2688 entry = tx_q->dirty_tx;
2689
2690 /* Try to clean all TX complete frame in 1 shot */
2691 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2692 struct xdp_frame *xdpf;
2693 struct sk_buff *skb;
2694 struct dma_desc *p;
2695 int status;
2696
2697 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2698 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2699 xdpf = tx_q->xdpf[entry];
2700 skb = NULL;
2701 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2702 xdpf = NULL;
2703 skb = tx_q->tx_skbuff[entry];
2704 } else {
2705 xdpf = NULL;
2706 skb = NULL;
2707 }
2708
2709 if (priv->extend_desc)
2710 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2711 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2712 p = &tx_q->dma_entx[entry].basic;
2713 else
2714 p = tx_q->dma_tx + entry;
2715
2716 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2717 /* Check if the descriptor is owned by the DMA */
2718 if (unlikely(status & tx_dma_own))
2719 break;
2720
2721 count++;
2722
2723 /* Make sure descriptor fields are read after reading
2724 * the own bit.
2725 */
2726 dma_rmb();
2727
2728 /* Just consider the last segment and ...*/
2729 if (likely(!(status & tx_not_ls))) {
2730 /* ... verify the status error condition */
2731 if (unlikely(status & tx_err)) {
2732 tx_errors++;
2733 if (unlikely(status & tx_err_bump_tc))
2734 stmmac_bump_dma_threshold(priv, queue);
2735 } else {
2736 tx_packets++;
2737 }
2738 if (skb) {
2739 stmmac_get_tx_hwtstamp(priv, p, skb);
2740 } else if (tx_q->xsk_pool &&
2741 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2742 struct stmmac_xsk_tx_complete tx_compl = {
2743 .priv = priv,
2744 .desc = p,
2745 };
2746
2747 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2748 &stmmac_xsk_tx_metadata_ops,
2749 &tx_compl);
2750 }
2751 }
2752
2753 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2754 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2755 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2756 dma_unmap_page(priv->device,
2757 tx_q->tx_skbuff_dma[entry].buf,
2758 tx_q->tx_skbuff_dma[entry].len,
2759 DMA_TO_DEVICE);
2760 else
2761 dma_unmap_single(priv->device,
2762 tx_q->tx_skbuff_dma[entry].buf,
2763 tx_q->tx_skbuff_dma[entry].len,
2764 DMA_TO_DEVICE);
2765 tx_q->tx_skbuff_dma[entry].buf = 0;
2766 tx_q->tx_skbuff_dma[entry].len = 0;
2767 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2768 }
2769
2770 stmmac_clean_desc3(priv, tx_q, p);
2771
2772 tx_q->tx_skbuff_dma[entry].last_segment = false;
2773 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2774
2775 if (xdpf &&
2776 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2777 xdp_return_frame_rx_napi(xdpf);
2778 tx_q->xdpf[entry] = NULL;
2779 }
2780
2781 if (xdpf &&
2782 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2783 xdp_return_frame(xdpf);
2784 tx_q->xdpf[entry] = NULL;
2785 }
2786
2787 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2788 tx_q->xsk_frames_done++;
2789
2790 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2791 if (likely(skb)) {
2792 pkts_compl++;
2793 bytes_compl += skb->len;
2794 dev_consume_skb_any(skb);
2795 tx_q->tx_skbuff[entry] = NULL;
2796 }
2797 }
2798
2799 stmmac_release_tx_desc(priv, p, priv->mode);
2800
2801 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2802 }
2803 tx_q->dirty_tx = entry;
2804
2805 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2806 pkts_compl, bytes_compl);
2807
2808 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2809 queue))) &&
2810 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2811
2812 netif_dbg(priv, tx_done, priv->dev,
2813 "%s: restart transmit\n", __func__);
2814 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2815 }
2816
2817 if (tx_q->xsk_pool) {
2818 bool work_done;
2819
2820 if (tx_q->xsk_frames_done)
2821 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2822
2823 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2824 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2825
2826 /* For XSK TX, we try to send as many as possible.
2827 * If XSK work done (XSK TX desc empty and budget still
2828 * available), return "budget - 1" to reenable TX IRQ.
2829 * Else, return "budget" to make NAPI continue polling.
2830 */
2831 work_done = stmmac_xdp_xmit_zc(priv, queue,
2832 STMMAC_XSK_TX_BUDGET_MAX);
2833 if (work_done)
2834 xmits = budget - 1;
2835 else
2836 xmits = budget;
2837 }
2838
2839 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2840 stmmac_restart_sw_lpi_timer(priv);
2841
2842 /* We still have pending packets, let's call for a new scheduling */
2843 if (tx_q->dirty_tx != tx_q->cur_tx)
2844 *pending_packets = true;
2845
2846 u64_stats_update_begin(&txq_stats->napi_syncp);
2847 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2848 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2849 u64_stats_inc(&txq_stats->napi.tx_clean);
2850 u64_stats_update_end(&txq_stats->napi_syncp);
2851
2852 priv->xstats.tx_errors += tx_errors;
2853
2854 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2855
2856 /* Combine decisions from TX clean and XSK TX */
2857 return max(count, xmits);
2858 }
2859
2860 /**
2861 * stmmac_tx_err - to manage the tx error
2862 * @priv: driver private structure
2863 * @chan: channel index
2864 * Description: it cleans the descriptors and restarts the transmission
2865 * in case of transmission errors.
2866 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2867 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2868 {
2869 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2870
2871 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2872
2873 stmmac_stop_tx_dma(priv, chan);
2874 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2875 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2876 stmmac_reset_tx_queue(priv, chan);
2877 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2878 tx_q->dma_tx_phy, chan);
2879 stmmac_start_tx_dma(priv, chan);
2880
2881 priv->xstats.tx_errors++;
2882 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2883 }
2884
2885 /**
2886 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2887 * @priv: driver private structure
2888 * @txmode: TX operating mode
2889 * @rxmode: RX operating mode
2890 * @chan: channel index
2891 * Description: it is used for configuring of the DMA operation mode in
2892 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2893 * mode.
2894 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2895 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2896 u32 rxmode, u32 chan)
2897 {
2898 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2899 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2900 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2901 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2902 int rxfifosz = priv->plat->rx_fifo_size;
2903 int txfifosz = priv->plat->tx_fifo_size;
2904
2905 if (rxfifosz == 0)
2906 rxfifosz = priv->dma_cap.rx_fifo_size;
2907 if (txfifosz == 0)
2908 txfifosz = priv->dma_cap.tx_fifo_size;
2909
2910 /* Adjust for real per queue fifo size */
2911 rxfifosz /= rx_channels_count;
2912 txfifosz /= tx_channels_count;
2913
2914 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2915 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2916 }
2917
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2918 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2919 {
2920 int ret;
2921
2922 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2923 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2924 if (ret && (ret != -EINVAL)) {
2925 stmmac_global_err(priv);
2926 return true;
2927 }
2928
2929 return false;
2930 }
2931
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2932 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2933 {
2934 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2935 &priv->xstats, chan, dir);
2936 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2937 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2938 struct stmmac_channel *ch = &priv->channel[chan];
2939 struct napi_struct *rx_napi;
2940 struct napi_struct *tx_napi;
2941 unsigned long flags;
2942
2943 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2944 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2945
2946 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2947 if (napi_schedule_prep(rx_napi)) {
2948 spin_lock_irqsave(&ch->lock, flags);
2949 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2950 spin_unlock_irqrestore(&ch->lock, flags);
2951 __napi_schedule(rx_napi);
2952 }
2953 }
2954
2955 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2956 if (napi_schedule_prep(tx_napi)) {
2957 spin_lock_irqsave(&ch->lock, flags);
2958 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2959 spin_unlock_irqrestore(&ch->lock, flags);
2960 __napi_schedule(tx_napi);
2961 }
2962 }
2963
2964 return status;
2965 }
2966
2967 /**
2968 * stmmac_dma_interrupt - DMA ISR
2969 * @priv: driver private structure
2970 * Description: this is the DMA ISR. It is called by the main ISR.
2971 * It calls the dwmac dma routine and schedule poll method in case of some
2972 * work can be done.
2973 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2974 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2975 {
2976 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2977 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2978 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2979 tx_channel_count : rx_channel_count;
2980 u32 chan;
2981 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2982
2983 /* Make sure we never check beyond our status buffer. */
2984 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2985 channels_to_check = ARRAY_SIZE(status);
2986
2987 for (chan = 0; chan < channels_to_check; chan++)
2988 status[chan] = stmmac_napi_check(priv, chan,
2989 DMA_DIR_RXTX);
2990
2991 for (chan = 0; chan < tx_channel_count; chan++) {
2992 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2993 /* Try to bump up the dma threshold on this failure */
2994 stmmac_bump_dma_threshold(priv, chan);
2995 } else if (unlikely(status[chan] == tx_hard_error)) {
2996 stmmac_tx_err(priv, chan);
2997 }
2998 }
2999 }
3000
3001 /**
3002 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3003 * @priv: driver private structure
3004 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3005 */
stmmac_mmc_setup(struct stmmac_priv * priv)3006 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3007 {
3008 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3009 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3010
3011 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3012
3013 if (priv->dma_cap.rmon) {
3014 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3015 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3016 } else
3017 netdev_info(priv->dev, "No MAC Management Counters available\n");
3018 }
3019
3020 /**
3021 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3022 * @priv: driver private structure
3023 * Description:
3024 * new GMAC chip generations have a new register to indicate the
3025 * presence of the optional feature/functions.
3026 * This can be also used to override the value passed through the
3027 * platform and necessary for old MAC10/100 and GMAC chips.
3028 */
stmmac_get_hw_features(struct stmmac_priv * priv)3029 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3030 {
3031 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3032 }
3033
3034 /**
3035 * stmmac_check_ether_addr - check if the MAC addr is valid
3036 * @priv: driver private structure
3037 * Description:
3038 * it is to verify if the MAC address is valid, in case of failures it
3039 * generates a random MAC address
3040 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3041 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3042 {
3043 u8 addr[ETH_ALEN];
3044
3045 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3046 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3047 if (is_valid_ether_addr(addr))
3048 eth_hw_addr_set(priv->dev, addr);
3049 else
3050 eth_hw_addr_random(priv->dev);
3051 dev_info(priv->device, "device MAC address %pM\n",
3052 priv->dev->dev_addr);
3053 }
3054 }
3055
3056 /**
3057 * stmmac_init_dma_engine - DMA init.
3058 * @priv: driver private structure
3059 * Description:
3060 * It inits the DMA invoking the specific MAC/GMAC callback.
3061 * Some DMA parameters can be passed from the platform;
3062 * in case of these are not passed a default is kept for the MAC or GMAC.
3063 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3064 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3065 {
3066 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3067 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3068 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3069 struct stmmac_rx_queue *rx_q;
3070 struct stmmac_tx_queue *tx_q;
3071 u32 chan = 0;
3072 int ret = 0;
3073
3074 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3075 dev_err(priv->device, "Invalid DMA configuration\n");
3076 return -EINVAL;
3077 }
3078
3079 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3080 priv->plat->dma_cfg->atds = 1;
3081
3082 ret = stmmac_reset(priv, priv->ioaddr);
3083 if (ret) {
3084 dev_err(priv->device, "Failed to reset the dma\n");
3085 return ret;
3086 }
3087
3088 /* DMA Configuration */
3089 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3090
3091 if (priv->plat->axi)
3092 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3093
3094 /* DMA CSR Channel configuration */
3095 for (chan = 0; chan < dma_csr_ch; chan++) {
3096 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3097 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3098 }
3099
3100 /* DMA RX Channel Configuration */
3101 for (chan = 0; chan < rx_channels_count; chan++) {
3102 rx_q = &priv->dma_conf.rx_queue[chan];
3103
3104 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3105 rx_q->dma_rx_phy, chan);
3106
3107 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3108 (rx_q->buf_alloc_num *
3109 sizeof(struct dma_desc));
3110 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3111 rx_q->rx_tail_addr, chan);
3112 }
3113
3114 /* DMA TX Channel Configuration */
3115 for (chan = 0; chan < tx_channels_count; chan++) {
3116 tx_q = &priv->dma_conf.tx_queue[chan];
3117
3118 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3119 tx_q->dma_tx_phy, chan);
3120
3121 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3122 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3123 tx_q->tx_tail_addr, chan);
3124 }
3125
3126 return ret;
3127 }
3128
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3129 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3130 {
3131 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3132 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3133 struct stmmac_channel *ch;
3134 struct napi_struct *napi;
3135
3136 if (!tx_coal_timer)
3137 return;
3138
3139 ch = &priv->channel[tx_q->queue_index];
3140 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3141
3142 /* Arm timer only if napi is not already scheduled.
3143 * Try to cancel any timer if napi is scheduled, timer will be armed
3144 * again in the next scheduled napi.
3145 */
3146 if (unlikely(!napi_is_scheduled(napi)))
3147 hrtimer_start(&tx_q->txtimer,
3148 STMMAC_COAL_TIMER(tx_coal_timer),
3149 HRTIMER_MODE_REL);
3150 else
3151 hrtimer_try_to_cancel(&tx_q->txtimer);
3152 }
3153
3154 /**
3155 * stmmac_tx_timer - mitigation sw timer for tx.
3156 * @t: data pointer
3157 * Description:
3158 * This is the timer handler to directly invoke the stmmac_tx_clean.
3159 */
stmmac_tx_timer(struct hrtimer * t)3160 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3161 {
3162 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3163 struct stmmac_priv *priv = tx_q->priv_data;
3164 struct stmmac_channel *ch;
3165 struct napi_struct *napi;
3166
3167 ch = &priv->channel[tx_q->queue_index];
3168 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3169
3170 if (likely(napi_schedule_prep(napi))) {
3171 unsigned long flags;
3172
3173 spin_lock_irqsave(&ch->lock, flags);
3174 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3175 spin_unlock_irqrestore(&ch->lock, flags);
3176 __napi_schedule(napi);
3177 }
3178
3179 return HRTIMER_NORESTART;
3180 }
3181
3182 /**
3183 * stmmac_init_coalesce - init mitigation options.
3184 * @priv: driver private structure
3185 * Description:
3186 * This inits the coalesce parameters: i.e. timer rate,
3187 * timer handler and default threshold used for enabling the
3188 * interrupt on completion bit.
3189 */
stmmac_init_coalesce(struct stmmac_priv * priv)3190 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3191 {
3192 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3193 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3194 u32 chan;
3195
3196 for (chan = 0; chan < tx_channel_count; chan++) {
3197 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3198
3199 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3200 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3201
3202 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3203 tx_q->txtimer.function = stmmac_tx_timer;
3204 }
3205
3206 for (chan = 0; chan < rx_channel_count; chan++)
3207 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3208 }
3209
stmmac_set_rings_length(struct stmmac_priv * priv)3210 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3211 {
3212 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3213 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3214 u32 chan;
3215
3216 /* set TX ring length */
3217 for (chan = 0; chan < tx_channels_count; chan++)
3218 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3219 (priv->dma_conf.dma_tx_size - 1), chan);
3220
3221 /* set RX ring length */
3222 for (chan = 0; chan < rx_channels_count; chan++)
3223 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3224 (priv->dma_conf.dma_rx_size - 1), chan);
3225 }
3226
3227 /**
3228 * stmmac_set_tx_queue_weight - Set TX queue weight
3229 * @priv: driver private structure
3230 * Description: It is used for setting TX queues weight
3231 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3232 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3233 {
3234 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3235 u32 weight;
3236 u32 queue;
3237
3238 for (queue = 0; queue < tx_queues_count; queue++) {
3239 weight = priv->plat->tx_queues_cfg[queue].weight;
3240 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3241 }
3242 }
3243
3244 /**
3245 * stmmac_configure_cbs - Configure CBS in TX queue
3246 * @priv: driver private structure
3247 * Description: It is used for configuring CBS in AVB TX queues
3248 */
stmmac_configure_cbs(struct stmmac_priv * priv)3249 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3250 {
3251 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3252 u32 mode_to_use;
3253 u32 queue;
3254
3255 /* queue 0 is reserved for legacy traffic */
3256 for (queue = 1; queue < tx_queues_count; queue++) {
3257 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3258 if (mode_to_use == MTL_QUEUE_DCB)
3259 continue;
3260
3261 stmmac_config_cbs(priv, priv->hw,
3262 priv->plat->tx_queues_cfg[queue].send_slope,
3263 priv->plat->tx_queues_cfg[queue].idle_slope,
3264 priv->plat->tx_queues_cfg[queue].high_credit,
3265 priv->plat->tx_queues_cfg[queue].low_credit,
3266 queue);
3267 }
3268 }
3269
3270 /**
3271 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3272 * @priv: driver private structure
3273 * Description: It is used for mapping RX queues to RX dma channels
3274 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3275 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3276 {
3277 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3278 u32 queue;
3279 u32 chan;
3280
3281 for (queue = 0; queue < rx_queues_count; queue++) {
3282 chan = priv->plat->rx_queues_cfg[queue].chan;
3283 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3284 }
3285 }
3286
3287 /**
3288 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3289 * @priv: driver private structure
3290 * Description: It is used for configuring the RX Queue Priority
3291 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3292 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3293 {
3294 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3295 u32 queue;
3296 u32 prio;
3297
3298 for (queue = 0; queue < rx_queues_count; queue++) {
3299 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3300 continue;
3301
3302 prio = priv->plat->rx_queues_cfg[queue].prio;
3303 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3304 }
3305 }
3306
3307 /**
3308 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3309 * @priv: driver private structure
3310 * Description: It is used for configuring the TX Queue Priority
3311 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3312 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3313 {
3314 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3315 u32 queue;
3316 u32 prio;
3317
3318 for (queue = 0; queue < tx_queues_count; queue++) {
3319 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3320 continue;
3321
3322 prio = priv->plat->tx_queues_cfg[queue].prio;
3323 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3324 }
3325 }
3326
3327 /**
3328 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3329 * @priv: driver private structure
3330 * Description: It is used for configuring the RX queue routing
3331 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3332 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3333 {
3334 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3335 u32 queue;
3336 u8 packet;
3337
3338 for (queue = 0; queue < rx_queues_count; queue++) {
3339 /* no specific packet type routing specified for the queue */
3340 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3341 continue;
3342
3343 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3344 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3345 }
3346 }
3347
stmmac_mac_config_rss(struct stmmac_priv * priv)3348 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3349 {
3350 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3351 priv->rss.enable = false;
3352 return;
3353 }
3354
3355 if (priv->dev->features & NETIF_F_RXHASH)
3356 priv->rss.enable = true;
3357 else
3358 priv->rss.enable = false;
3359
3360 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3361 priv->plat->rx_queues_to_use);
3362 }
3363
3364 /**
3365 * stmmac_mtl_configuration - Configure MTL
3366 * @priv: driver private structure
3367 * Description: It is used for configurring MTL
3368 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3369 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3370 {
3371 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3372 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3373
3374 if (tx_queues_count > 1)
3375 stmmac_set_tx_queue_weight(priv);
3376
3377 /* Configure MTL RX algorithms */
3378 if (rx_queues_count > 1)
3379 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3380 priv->plat->rx_sched_algorithm);
3381
3382 /* Configure MTL TX algorithms */
3383 if (tx_queues_count > 1)
3384 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3385 priv->plat->tx_sched_algorithm);
3386
3387 /* Configure CBS in AVB TX queues */
3388 if (tx_queues_count > 1)
3389 stmmac_configure_cbs(priv);
3390
3391 /* Map RX MTL to DMA channels */
3392 stmmac_rx_queue_dma_chan_map(priv);
3393
3394 /* Enable MAC RX Queues */
3395 stmmac_mac_enable_rx_queues(priv);
3396
3397 /* Set RX priorities */
3398 if (rx_queues_count > 1)
3399 stmmac_mac_config_rx_queues_prio(priv);
3400
3401 /* Set TX priorities */
3402 if (tx_queues_count > 1)
3403 stmmac_mac_config_tx_queues_prio(priv);
3404
3405 /* Set RX routing */
3406 if (rx_queues_count > 1)
3407 stmmac_mac_config_rx_queues_routing(priv);
3408
3409 /* Receive Side Scaling */
3410 if (rx_queues_count > 1)
3411 stmmac_mac_config_rss(priv);
3412 }
3413
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3414 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3415 {
3416 if (priv->dma_cap.asp) {
3417 netdev_info(priv->dev, "Enabling Safety Features\n");
3418 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3419 priv->plat->safety_feat_cfg);
3420 } else {
3421 netdev_info(priv->dev, "No Safety Features support found\n");
3422 }
3423 }
3424
3425 /**
3426 * stmmac_hw_setup - setup mac in a usable state.
3427 * @dev : pointer to the device structure.
3428 * @ptp_register: register PTP if set
3429 * Description:
3430 * this is the main function to setup the HW in a usable state because the
3431 * dma engine is reset, the core registers are configured (e.g. AXI,
3432 * Checksum features, timers). The DMA is ready to start receiving and
3433 * transmitting.
3434 * Return value:
3435 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3436 * file on failure.
3437 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3438 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3439 {
3440 struct stmmac_priv *priv = netdev_priv(dev);
3441 u32 rx_cnt = priv->plat->rx_queues_to_use;
3442 u32 tx_cnt = priv->plat->tx_queues_to_use;
3443 bool sph_en;
3444 u32 chan;
3445 int ret;
3446
3447 /* Make sure RX clock is enabled */
3448 if (priv->hw->phylink_pcs)
3449 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3450
3451 /* DMA initialization and SW reset */
3452 ret = stmmac_init_dma_engine(priv);
3453 if (ret < 0) {
3454 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3455 __func__);
3456 return ret;
3457 }
3458
3459 /* Copy the MAC addr into the HW */
3460 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3461
3462 /* PS and related bits will be programmed according to the speed */
3463 if (priv->hw->pcs) {
3464 int speed = priv->plat->mac_port_sel_speed;
3465
3466 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3467 (speed == SPEED_1000)) {
3468 priv->hw->ps = speed;
3469 } else {
3470 dev_warn(priv->device, "invalid port speed\n");
3471 priv->hw->ps = 0;
3472 }
3473 }
3474
3475 /* Initialize the MAC Core */
3476 stmmac_core_init(priv, priv->hw, dev);
3477
3478 /* Initialize MTL*/
3479 stmmac_mtl_configuration(priv);
3480
3481 /* Initialize Safety Features */
3482 stmmac_safety_feat_configuration(priv);
3483
3484 ret = stmmac_rx_ipc(priv, priv->hw);
3485 if (!ret) {
3486 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3487 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3488 priv->hw->rx_csum = 0;
3489 }
3490
3491 /* Enable the MAC Rx/Tx */
3492 stmmac_mac_set(priv, priv->ioaddr, true);
3493
3494 /* Set the HW DMA mode and the COE */
3495 stmmac_dma_operation_mode(priv);
3496
3497 stmmac_mmc_setup(priv);
3498
3499 if (ptp_register) {
3500 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3501 if (ret < 0)
3502 netdev_warn(priv->dev,
3503 "failed to enable PTP reference clock: %pe\n",
3504 ERR_PTR(ret));
3505 }
3506
3507 ret = stmmac_init_ptp(priv);
3508 if (ret == -EOPNOTSUPP)
3509 netdev_info(priv->dev, "PTP not supported by HW\n");
3510 else if (ret)
3511 netdev_warn(priv->dev, "PTP init failed\n");
3512 else if (ptp_register)
3513 stmmac_ptp_register(priv);
3514
3515 if (priv->use_riwt) {
3516 u32 queue;
3517
3518 for (queue = 0; queue < rx_cnt; queue++) {
3519 if (!priv->rx_riwt[queue])
3520 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3521
3522 stmmac_rx_watchdog(priv, priv->ioaddr,
3523 priv->rx_riwt[queue], queue);
3524 }
3525 }
3526
3527 if (priv->hw->pcs)
3528 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3529
3530 /* set TX and RX rings length */
3531 stmmac_set_rings_length(priv);
3532
3533 /* Enable TSO */
3534 if (priv->tso) {
3535 for (chan = 0; chan < tx_cnt; chan++) {
3536 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3537
3538 /* TSO and TBS cannot co-exist */
3539 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3540 continue;
3541
3542 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3543 }
3544 }
3545
3546 /* Enable Split Header */
3547 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3548 for (chan = 0; chan < rx_cnt; chan++)
3549 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3550
3551
3552 /* VLAN Tag Insertion */
3553 if (priv->dma_cap.vlins)
3554 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3555
3556 /* TBS */
3557 for (chan = 0; chan < tx_cnt; chan++) {
3558 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3559 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3560
3561 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3562 }
3563
3564 /* Configure real RX and TX queues */
3565 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3566 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3567
3568 /* Start the ball rolling... */
3569 stmmac_start_all_dma(priv);
3570
3571 stmmac_set_hw_vlan_mode(priv, priv->hw);
3572
3573 return 0;
3574 }
3575
stmmac_hw_teardown(struct net_device * dev)3576 static void stmmac_hw_teardown(struct net_device *dev)
3577 {
3578 struct stmmac_priv *priv = netdev_priv(dev);
3579
3580 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3581 }
3582
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3583 static void stmmac_free_irq(struct net_device *dev,
3584 enum request_irq_err irq_err, int irq_idx)
3585 {
3586 struct stmmac_priv *priv = netdev_priv(dev);
3587 int j;
3588
3589 switch (irq_err) {
3590 case REQ_IRQ_ERR_ALL:
3591 irq_idx = priv->plat->tx_queues_to_use;
3592 fallthrough;
3593 case REQ_IRQ_ERR_TX:
3594 for (j = irq_idx - 1; j >= 0; j--) {
3595 if (priv->tx_irq[j] > 0) {
3596 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3597 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3598 }
3599 }
3600 irq_idx = priv->plat->rx_queues_to_use;
3601 fallthrough;
3602 case REQ_IRQ_ERR_RX:
3603 for (j = irq_idx - 1; j >= 0; j--) {
3604 if (priv->rx_irq[j] > 0) {
3605 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3606 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3607 }
3608 }
3609
3610 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3611 free_irq(priv->sfty_ue_irq, dev);
3612 fallthrough;
3613 case REQ_IRQ_ERR_SFTY_UE:
3614 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3615 free_irq(priv->sfty_ce_irq, dev);
3616 fallthrough;
3617 case REQ_IRQ_ERR_SFTY_CE:
3618 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3619 free_irq(priv->lpi_irq, dev);
3620 fallthrough;
3621 case REQ_IRQ_ERR_LPI:
3622 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3623 free_irq(priv->wol_irq, dev);
3624 fallthrough;
3625 case REQ_IRQ_ERR_SFTY:
3626 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3627 free_irq(priv->sfty_irq, dev);
3628 fallthrough;
3629 case REQ_IRQ_ERR_WOL:
3630 free_irq(dev->irq, dev);
3631 fallthrough;
3632 case REQ_IRQ_ERR_MAC:
3633 case REQ_IRQ_ERR_NO:
3634 /* If MAC IRQ request error, no more IRQ to free */
3635 break;
3636 }
3637 }
3638
stmmac_request_irq_multi_msi(struct net_device * dev)3639 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3640 {
3641 struct stmmac_priv *priv = netdev_priv(dev);
3642 enum request_irq_err irq_err;
3643 int irq_idx = 0;
3644 char *int_name;
3645 int ret;
3646 int i;
3647
3648 /* For common interrupt */
3649 int_name = priv->int_name_mac;
3650 sprintf(int_name, "%s:%s", dev->name, "mac");
3651 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3652 0, int_name, dev);
3653 if (unlikely(ret < 0)) {
3654 netdev_err(priv->dev,
3655 "%s: alloc mac MSI %d (error: %d)\n",
3656 __func__, dev->irq, ret);
3657 irq_err = REQ_IRQ_ERR_MAC;
3658 goto irq_error;
3659 }
3660
3661 /* Request the Wake IRQ in case of another line
3662 * is used for WoL
3663 */
3664 priv->wol_irq_disabled = true;
3665 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3666 int_name = priv->int_name_wol;
3667 sprintf(int_name, "%s:%s", dev->name, "wol");
3668 ret = request_irq(priv->wol_irq,
3669 stmmac_mac_interrupt,
3670 0, int_name, dev);
3671 if (unlikely(ret < 0)) {
3672 netdev_err(priv->dev,
3673 "%s: alloc wol MSI %d (error: %d)\n",
3674 __func__, priv->wol_irq, ret);
3675 irq_err = REQ_IRQ_ERR_WOL;
3676 goto irq_error;
3677 }
3678 }
3679
3680 /* Request the LPI IRQ in case of another line
3681 * is used for LPI
3682 */
3683 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3684 int_name = priv->int_name_lpi;
3685 sprintf(int_name, "%s:%s", dev->name, "lpi");
3686 ret = request_irq(priv->lpi_irq,
3687 stmmac_mac_interrupt,
3688 0, int_name, dev);
3689 if (unlikely(ret < 0)) {
3690 netdev_err(priv->dev,
3691 "%s: alloc lpi MSI %d (error: %d)\n",
3692 __func__, priv->lpi_irq, ret);
3693 irq_err = REQ_IRQ_ERR_LPI;
3694 goto irq_error;
3695 }
3696 }
3697
3698 /* Request the common Safety Feature Correctible/Uncorrectible
3699 * Error line in case of another line is used
3700 */
3701 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3702 int_name = priv->int_name_sfty;
3703 sprintf(int_name, "%s:%s", dev->name, "safety");
3704 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3705 0, int_name, dev);
3706 if (unlikely(ret < 0)) {
3707 netdev_err(priv->dev,
3708 "%s: alloc sfty MSI %d (error: %d)\n",
3709 __func__, priv->sfty_irq, ret);
3710 irq_err = REQ_IRQ_ERR_SFTY;
3711 goto irq_error;
3712 }
3713 }
3714
3715 /* Request the Safety Feature Correctible Error line in
3716 * case of another line is used
3717 */
3718 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3719 int_name = priv->int_name_sfty_ce;
3720 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3721 ret = request_irq(priv->sfty_ce_irq,
3722 stmmac_safety_interrupt,
3723 0, int_name, dev);
3724 if (unlikely(ret < 0)) {
3725 netdev_err(priv->dev,
3726 "%s: alloc sfty ce MSI %d (error: %d)\n",
3727 __func__, priv->sfty_ce_irq, ret);
3728 irq_err = REQ_IRQ_ERR_SFTY_CE;
3729 goto irq_error;
3730 }
3731 }
3732
3733 /* Request the Safety Feature Uncorrectible Error line in
3734 * case of another line is used
3735 */
3736 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3737 int_name = priv->int_name_sfty_ue;
3738 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3739 ret = request_irq(priv->sfty_ue_irq,
3740 stmmac_safety_interrupt,
3741 0, int_name, dev);
3742 if (unlikely(ret < 0)) {
3743 netdev_err(priv->dev,
3744 "%s: alloc sfty ue MSI %d (error: %d)\n",
3745 __func__, priv->sfty_ue_irq, ret);
3746 irq_err = REQ_IRQ_ERR_SFTY_UE;
3747 goto irq_error;
3748 }
3749 }
3750
3751 /* Request Rx MSI irq */
3752 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3753 if (i >= MTL_MAX_RX_QUEUES)
3754 break;
3755 if (priv->rx_irq[i] == 0)
3756 continue;
3757
3758 int_name = priv->int_name_rx_irq[i];
3759 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3760 ret = request_irq(priv->rx_irq[i],
3761 stmmac_msi_intr_rx,
3762 0, int_name, &priv->dma_conf.rx_queue[i]);
3763 if (unlikely(ret < 0)) {
3764 netdev_err(priv->dev,
3765 "%s: alloc rx-%d MSI %d (error: %d)\n",
3766 __func__, i, priv->rx_irq[i], ret);
3767 irq_err = REQ_IRQ_ERR_RX;
3768 irq_idx = i;
3769 goto irq_error;
3770 }
3771 irq_set_affinity_hint(priv->rx_irq[i],
3772 cpumask_of(i % num_online_cpus()));
3773 }
3774
3775 /* Request Tx MSI irq */
3776 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3777 if (i >= MTL_MAX_TX_QUEUES)
3778 break;
3779 if (priv->tx_irq[i] == 0)
3780 continue;
3781
3782 int_name = priv->int_name_tx_irq[i];
3783 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3784 ret = request_irq(priv->tx_irq[i],
3785 stmmac_msi_intr_tx,
3786 0, int_name, &priv->dma_conf.tx_queue[i]);
3787 if (unlikely(ret < 0)) {
3788 netdev_err(priv->dev,
3789 "%s: alloc tx-%d MSI %d (error: %d)\n",
3790 __func__, i, priv->tx_irq[i], ret);
3791 irq_err = REQ_IRQ_ERR_TX;
3792 irq_idx = i;
3793 goto irq_error;
3794 }
3795 irq_set_affinity_hint(priv->tx_irq[i],
3796 cpumask_of(i % num_online_cpus()));
3797 }
3798
3799 return 0;
3800
3801 irq_error:
3802 stmmac_free_irq(dev, irq_err, irq_idx);
3803 return ret;
3804 }
3805
stmmac_request_irq_single(struct net_device * dev)3806 static int stmmac_request_irq_single(struct net_device *dev)
3807 {
3808 struct stmmac_priv *priv = netdev_priv(dev);
3809 enum request_irq_err irq_err;
3810 int ret;
3811
3812 ret = request_irq(dev->irq, stmmac_interrupt,
3813 IRQF_SHARED, dev->name, dev);
3814 if (unlikely(ret < 0)) {
3815 netdev_err(priv->dev,
3816 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3817 __func__, dev->irq, ret);
3818 irq_err = REQ_IRQ_ERR_MAC;
3819 goto irq_error;
3820 }
3821
3822 /* Request the Wake IRQ in case of another line
3823 * is used for WoL
3824 */
3825 priv->wol_irq_disabled = true;
3826 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3827 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3828 IRQF_SHARED, dev->name, dev);
3829 if (unlikely(ret < 0)) {
3830 netdev_err(priv->dev,
3831 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3832 __func__, priv->wol_irq, ret);
3833 irq_err = REQ_IRQ_ERR_WOL;
3834 goto irq_error;
3835 }
3836 }
3837
3838 /* Request the IRQ lines */
3839 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3840 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3841 IRQF_SHARED, dev->name, dev);
3842 if (unlikely(ret < 0)) {
3843 netdev_err(priv->dev,
3844 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3845 __func__, priv->lpi_irq, ret);
3846 irq_err = REQ_IRQ_ERR_LPI;
3847 goto irq_error;
3848 }
3849 }
3850
3851 /* Request the common Safety Feature Correctible/Uncorrectible
3852 * Error line in case of another line is used
3853 */
3854 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3855 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3856 IRQF_SHARED, dev->name, dev);
3857 if (unlikely(ret < 0)) {
3858 netdev_err(priv->dev,
3859 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3860 __func__, priv->sfty_irq, ret);
3861 irq_err = REQ_IRQ_ERR_SFTY;
3862 goto irq_error;
3863 }
3864 }
3865
3866 return 0;
3867
3868 irq_error:
3869 stmmac_free_irq(dev, irq_err, 0);
3870 return ret;
3871 }
3872
stmmac_request_irq(struct net_device * dev)3873 static int stmmac_request_irq(struct net_device *dev)
3874 {
3875 struct stmmac_priv *priv = netdev_priv(dev);
3876 int ret;
3877
3878 /* Request the IRQ lines */
3879 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3880 ret = stmmac_request_irq_multi_msi(dev);
3881 else
3882 ret = stmmac_request_irq_single(dev);
3883
3884 return ret;
3885 }
3886
3887 /**
3888 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3889 * @priv: driver private structure
3890 * @mtu: MTU to setup the dma queue and buf with
3891 * Description: Allocate and generate a dma_conf based on the provided MTU.
3892 * Allocate the Tx/Rx DMA queue and init them.
3893 * Return value:
3894 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3895 */
3896 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3897 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3898 {
3899 struct stmmac_dma_conf *dma_conf;
3900 int chan, bfsize, ret;
3901
3902 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3903 if (!dma_conf) {
3904 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3905 __func__);
3906 return ERR_PTR(-ENOMEM);
3907 }
3908
3909 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3910 if (bfsize < 0)
3911 bfsize = 0;
3912
3913 if (bfsize < BUF_SIZE_16KiB)
3914 bfsize = stmmac_set_bfsize(mtu, 0);
3915
3916 dma_conf->dma_buf_sz = bfsize;
3917 /* Chose the tx/rx size from the already defined one in the
3918 * priv struct. (if defined)
3919 */
3920 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3921 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3922
3923 if (!dma_conf->dma_tx_size)
3924 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3925 if (!dma_conf->dma_rx_size)
3926 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3927
3928 /* Earlier check for TBS */
3929 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3930 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3931 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3932
3933 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3934 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3935 }
3936
3937 ret = alloc_dma_desc_resources(priv, dma_conf);
3938 if (ret < 0) {
3939 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3940 __func__);
3941 goto alloc_error;
3942 }
3943
3944 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3945 if (ret < 0) {
3946 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3947 __func__);
3948 goto init_error;
3949 }
3950
3951 return dma_conf;
3952
3953 init_error:
3954 free_dma_desc_resources(priv, dma_conf);
3955 alloc_error:
3956 kfree(dma_conf);
3957 return ERR_PTR(ret);
3958 }
3959
3960 /**
3961 * __stmmac_open - open entry point of the driver
3962 * @dev : pointer to the device structure.
3963 * @dma_conf : structure to take the dma data
3964 * Description:
3965 * This function is the open entry point of the driver.
3966 * Return value:
3967 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3968 * file on failure.
3969 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3970 static int __stmmac_open(struct net_device *dev,
3971 struct stmmac_dma_conf *dma_conf)
3972 {
3973 struct stmmac_priv *priv = netdev_priv(dev);
3974 int mode = priv->plat->phy_interface;
3975 u32 chan;
3976 int ret;
3977
3978 /* Initialise the tx lpi timer, converting from msec to usec */
3979 if (!priv->tx_lpi_timer)
3980 priv->tx_lpi_timer = eee_timer * 1000;
3981
3982 ret = pm_runtime_resume_and_get(priv->device);
3983 if (ret < 0)
3984 return ret;
3985
3986 if ((!priv->hw->xpcs ||
3987 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3988 ret = stmmac_init_phy(dev);
3989 if (ret) {
3990 netdev_err(priv->dev,
3991 "%s: Cannot attach to PHY (error: %d)\n",
3992 __func__, ret);
3993 goto init_phy_error;
3994 }
3995 }
3996
3997 buf_sz = dma_conf->dma_buf_sz;
3998 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3999 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4000 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4001 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4002
4003 stmmac_reset_queues_param(priv);
4004
4005 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4006 priv->plat->serdes_powerup) {
4007 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4008 if (ret < 0) {
4009 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4010 __func__);
4011 goto init_error;
4012 }
4013 }
4014
4015 ret = stmmac_hw_setup(dev, true);
4016 if (ret < 0) {
4017 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4018 goto init_error;
4019 }
4020
4021 stmmac_init_coalesce(priv);
4022
4023 phylink_start(priv->phylink);
4024 /* We may have called phylink_speed_down before */
4025 phylink_speed_up(priv->phylink);
4026
4027 ret = stmmac_request_irq(dev);
4028 if (ret)
4029 goto irq_error;
4030
4031 stmmac_enable_all_queues(priv);
4032 netif_tx_start_all_queues(priv->dev);
4033 stmmac_enable_all_dma_irq(priv);
4034
4035 return 0;
4036
4037 irq_error:
4038 phylink_stop(priv->phylink);
4039
4040 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4041 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4042
4043 stmmac_hw_teardown(dev);
4044 init_error:
4045 phylink_disconnect_phy(priv->phylink);
4046 init_phy_error:
4047 pm_runtime_put(priv->device);
4048 return ret;
4049 }
4050
stmmac_open(struct net_device * dev)4051 static int stmmac_open(struct net_device *dev)
4052 {
4053 struct stmmac_priv *priv = netdev_priv(dev);
4054 struct stmmac_dma_conf *dma_conf;
4055 int ret;
4056
4057 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4058 if (IS_ERR(dma_conf))
4059 return PTR_ERR(dma_conf);
4060
4061 ret = __stmmac_open(dev, dma_conf);
4062 if (ret)
4063 free_dma_desc_resources(priv, dma_conf);
4064
4065 kfree(dma_conf);
4066 return ret;
4067 }
4068
4069 /**
4070 * stmmac_release - close entry point of the driver
4071 * @dev : device pointer.
4072 * Description:
4073 * This is the stop entry point of the driver.
4074 */
stmmac_release(struct net_device * dev)4075 static int stmmac_release(struct net_device *dev)
4076 {
4077 struct stmmac_priv *priv = netdev_priv(dev);
4078 u32 chan;
4079
4080 if (device_may_wakeup(priv->device))
4081 phylink_speed_down(priv->phylink, false);
4082 /* Stop and disconnect the PHY */
4083 phylink_stop(priv->phylink);
4084 phylink_disconnect_phy(priv->phylink);
4085
4086 stmmac_disable_all_queues(priv);
4087
4088 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4089 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4090
4091 netif_tx_disable(dev);
4092
4093 /* Free the IRQ lines */
4094 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4095
4096 /* Stop TX/RX DMA and clear the descriptors */
4097 stmmac_stop_all_dma(priv);
4098
4099 /* Release and free the Rx/Tx resources */
4100 free_dma_desc_resources(priv, &priv->dma_conf);
4101
4102 /* Disable the MAC Rx/Tx */
4103 stmmac_mac_set(priv, priv->ioaddr, false);
4104
4105 /* Powerdown Serdes if there is */
4106 if (priv->plat->serdes_powerdown)
4107 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4108
4109 stmmac_release_ptp(priv);
4110
4111 if (stmmac_fpe_supported(priv))
4112 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4113
4114 pm_runtime_put(priv->device);
4115
4116 return 0;
4117 }
4118
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4119 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4120 struct stmmac_tx_queue *tx_q)
4121 {
4122 u16 tag = 0x0, inner_tag = 0x0;
4123 u32 inner_type = 0x0;
4124 struct dma_desc *p;
4125
4126 if (!priv->dma_cap.vlins)
4127 return false;
4128 if (!skb_vlan_tag_present(skb))
4129 return false;
4130 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4131 inner_tag = skb_vlan_tag_get(skb);
4132 inner_type = STMMAC_VLAN_INSERT;
4133 }
4134
4135 tag = skb_vlan_tag_get(skb);
4136
4137 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4138 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4139 else
4140 p = &tx_q->dma_tx[tx_q->cur_tx];
4141
4142 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4143 return false;
4144
4145 stmmac_set_tx_owner(priv, p);
4146 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4147 return true;
4148 }
4149
4150 /**
4151 * stmmac_tso_allocator - close entry point of the driver
4152 * @priv: driver private structure
4153 * @des: buffer start address
4154 * @total_len: total length to fill in descriptors
4155 * @last_segment: condition for the last descriptor
4156 * @queue: TX queue index
4157 * Description:
4158 * This function fills descriptor and request new descriptors according to
4159 * buffer length to fill
4160 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4161 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4162 int total_len, bool last_segment, u32 queue)
4163 {
4164 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4165 struct dma_desc *desc;
4166 u32 buff_size;
4167 int tmp_len;
4168
4169 tmp_len = total_len;
4170
4171 while (tmp_len > 0) {
4172 dma_addr_t curr_addr;
4173
4174 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4175 priv->dma_conf.dma_tx_size);
4176 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4177
4178 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4179 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4180 else
4181 desc = &tx_q->dma_tx[tx_q->cur_tx];
4182
4183 curr_addr = des + (total_len - tmp_len);
4184 stmmac_set_desc_addr(priv, desc, curr_addr);
4185 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4186 TSO_MAX_BUFF_SIZE : tmp_len;
4187
4188 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4189 0, 1,
4190 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4191 0, 0);
4192
4193 tmp_len -= TSO_MAX_BUFF_SIZE;
4194 }
4195 }
4196
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4197 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4198 {
4199 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4200 int desc_size;
4201
4202 if (likely(priv->extend_desc))
4203 desc_size = sizeof(struct dma_extended_desc);
4204 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4205 desc_size = sizeof(struct dma_edesc);
4206 else
4207 desc_size = sizeof(struct dma_desc);
4208
4209 /* The own bit must be the latest setting done when prepare the
4210 * descriptor and then barrier is needed to make sure that
4211 * all is coherent before granting the DMA engine.
4212 */
4213 wmb();
4214
4215 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4216 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4217 }
4218
4219 /**
4220 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4221 * @skb : the socket buffer
4222 * @dev : device pointer
4223 * Description: this is the transmit function that is called on TSO frames
4224 * (support available on GMAC4 and newer chips).
4225 * Diagram below show the ring programming in case of TSO frames:
4226 *
4227 * First Descriptor
4228 * --------
4229 * | DES0 |---> buffer1 = L2/L3/L4 header
4230 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4231 * | | width is 32-bit, but we never use it.
4232 * | | Also can be used as the most-significant 8-bits or 16-bits of
4233 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4234 * | | or 48-bit, and we always use it.
4235 * | DES2 |---> buffer1 len
4236 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4237 * --------
4238 * --------
4239 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4240 * | DES1 |---> same as the First Descriptor
4241 * | DES2 |---> buffer1 len
4242 * | DES3 |
4243 * --------
4244 * |
4245 * ...
4246 * |
4247 * --------
4248 * | DES0 |---> buffer1 = Split TCP Payload
4249 * | DES1 |---> same as the First Descriptor
4250 * | DES2 |---> buffer1 len
4251 * | DES3 |
4252 * --------
4253 *
4254 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4255 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4256 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4257 {
4258 struct dma_desc *desc, *first, *mss_desc = NULL;
4259 struct stmmac_priv *priv = netdev_priv(dev);
4260 unsigned int first_entry, tx_packets;
4261 struct stmmac_txq_stats *txq_stats;
4262 struct stmmac_tx_queue *tx_q;
4263 u32 pay_len, mss, queue;
4264 int i, first_tx, nfrags;
4265 u8 proto_hdr_len, hdr;
4266 dma_addr_t des;
4267 bool set_ic;
4268
4269 /* Always insert VLAN tag to SKB payload for TSO frames.
4270 *
4271 * Never insert VLAN tag by HW, since segments splited by
4272 * TSO engine will be un-tagged by mistake.
4273 */
4274 if (skb_vlan_tag_present(skb)) {
4275 skb = __vlan_hwaccel_push_inside(skb);
4276 if (unlikely(!skb)) {
4277 priv->xstats.tx_dropped++;
4278 return NETDEV_TX_OK;
4279 }
4280 }
4281
4282 nfrags = skb_shinfo(skb)->nr_frags;
4283 queue = skb_get_queue_mapping(skb);
4284
4285 tx_q = &priv->dma_conf.tx_queue[queue];
4286 txq_stats = &priv->xstats.txq_stats[queue];
4287 first_tx = tx_q->cur_tx;
4288
4289 /* Compute header lengths */
4290 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4291 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4292 hdr = sizeof(struct udphdr);
4293 } else {
4294 proto_hdr_len = skb_tcp_all_headers(skb);
4295 hdr = tcp_hdrlen(skb);
4296 }
4297
4298 /* Desc availability based on threshold should be enough safe */
4299 if (unlikely(stmmac_tx_avail(priv, queue) <
4300 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4301 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4302 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4303 queue));
4304 /* This is a hard error, log it. */
4305 netdev_err(priv->dev,
4306 "%s: Tx Ring full when queue awake\n",
4307 __func__);
4308 }
4309 return NETDEV_TX_BUSY;
4310 }
4311
4312 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4313
4314 mss = skb_shinfo(skb)->gso_size;
4315
4316 /* set new MSS value if needed */
4317 if (mss != tx_q->mss) {
4318 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4319 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4320 else
4321 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4322
4323 stmmac_set_mss(priv, mss_desc, mss);
4324 tx_q->mss = mss;
4325 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4326 priv->dma_conf.dma_tx_size);
4327 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4328 }
4329
4330 if (netif_msg_tx_queued(priv)) {
4331 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4332 __func__, hdr, proto_hdr_len, pay_len, mss);
4333 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4334 skb->data_len);
4335 }
4336
4337 first_entry = tx_q->cur_tx;
4338 WARN_ON(tx_q->tx_skbuff[first_entry]);
4339
4340 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4341 desc = &tx_q->dma_entx[first_entry].basic;
4342 else
4343 desc = &tx_q->dma_tx[first_entry];
4344 first = desc;
4345
4346 /* first descriptor: fill Headers on Buf1 */
4347 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4348 DMA_TO_DEVICE);
4349 if (dma_mapping_error(priv->device, des))
4350 goto dma_map_err;
4351
4352 stmmac_set_desc_addr(priv, first, des);
4353 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4354 (nfrags == 0), queue);
4355
4356 /* In case two or more DMA transmit descriptors are allocated for this
4357 * non-paged SKB data, the DMA buffer address should be saved to
4358 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4359 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4360 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4361 * since the tail areas of the DMA buffer can be accessed by DMA engine
4362 * sooner or later.
4363 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4364 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4365 * this DMA buffer right after the DMA engine completely finishes the
4366 * full buffer transmission.
4367 */
4368 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4369 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4370 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4371 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4372
4373 /* Prepare fragments */
4374 for (i = 0; i < nfrags; i++) {
4375 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4376
4377 des = skb_frag_dma_map(priv->device, frag, 0,
4378 skb_frag_size(frag),
4379 DMA_TO_DEVICE);
4380 if (dma_mapping_error(priv->device, des))
4381 goto dma_map_err;
4382
4383 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4384 (i == nfrags - 1), queue);
4385
4386 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4387 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4388 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4389 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4390 }
4391
4392 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4393
4394 /* Only the last descriptor gets to point to the skb. */
4395 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4396 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4397
4398 /* Manage tx mitigation */
4399 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4400 tx_q->tx_count_frames += tx_packets;
4401
4402 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4403 set_ic = true;
4404 else if (!priv->tx_coal_frames[queue])
4405 set_ic = false;
4406 else if (tx_packets > priv->tx_coal_frames[queue])
4407 set_ic = true;
4408 else if ((tx_q->tx_count_frames %
4409 priv->tx_coal_frames[queue]) < tx_packets)
4410 set_ic = true;
4411 else
4412 set_ic = false;
4413
4414 if (set_ic) {
4415 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4416 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4417 else
4418 desc = &tx_q->dma_tx[tx_q->cur_tx];
4419
4420 tx_q->tx_count_frames = 0;
4421 stmmac_set_tx_ic(priv, desc);
4422 }
4423
4424 /* We've used all descriptors we need for this skb, however,
4425 * advance cur_tx so that it references a fresh descriptor.
4426 * ndo_start_xmit will fill this descriptor the next time it's
4427 * called and stmmac_tx_clean may clean up to this descriptor.
4428 */
4429 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4430
4431 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4432 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4433 __func__);
4434 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4435 }
4436
4437 u64_stats_update_begin(&txq_stats->q_syncp);
4438 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4439 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4440 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4441 if (set_ic)
4442 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4443 u64_stats_update_end(&txq_stats->q_syncp);
4444
4445 if (priv->sarc_type)
4446 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4447
4448 skb_tx_timestamp(skb);
4449
4450 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4451 priv->hwts_tx_en)) {
4452 /* declare that device is doing timestamping */
4453 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4454 stmmac_enable_tx_timestamp(priv, first);
4455 }
4456
4457 /* Complete the first descriptor before granting the DMA */
4458 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4459 tx_q->tx_skbuff_dma[first_entry].last_segment,
4460 hdr / 4, (skb->len - proto_hdr_len));
4461
4462 /* If context desc is used to change MSS */
4463 if (mss_desc) {
4464 /* Make sure that first descriptor has been completely
4465 * written, including its own bit. This is because MSS is
4466 * actually before first descriptor, so we need to make
4467 * sure that MSS's own bit is the last thing written.
4468 */
4469 dma_wmb();
4470 stmmac_set_tx_owner(priv, mss_desc);
4471 }
4472
4473 if (netif_msg_pktdata(priv)) {
4474 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4475 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4476 tx_q->cur_tx, first, nfrags);
4477 pr_info(">>> frame to be transmitted: ");
4478 print_pkt(skb->data, skb_headlen(skb));
4479 }
4480
4481 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4482
4483 stmmac_flush_tx_descriptors(priv, queue);
4484 stmmac_tx_timer_arm(priv, queue);
4485
4486 return NETDEV_TX_OK;
4487
4488 dma_map_err:
4489 dev_err(priv->device, "Tx dma map failed\n");
4490 dev_kfree_skb(skb);
4491 priv->xstats.tx_dropped++;
4492 return NETDEV_TX_OK;
4493 }
4494
4495 /**
4496 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4497 * @skb: socket buffer to check
4498 *
4499 * Check if a packet has an ethertype that will trigger the IP header checks
4500 * and IP/TCP checksum engine of the stmmac core.
4501 *
4502 * Return: true if the ethertype can trigger the checksum engine, false
4503 * otherwise
4504 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4505 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4506 {
4507 int depth = 0;
4508 __be16 proto;
4509
4510 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4511 &depth);
4512
4513 return (depth <= ETH_HLEN) &&
4514 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4515 }
4516
4517 /**
4518 * stmmac_xmit - Tx entry point of the driver
4519 * @skb : the socket buffer
4520 * @dev : device pointer
4521 * Description : this is the tx entry point of the driver.
4522 * It programs the chain or the ring and supports oversized frames
4523 * and SG feature.
4524 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4525 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4526 {
4527 unsigned int first_entry, tx_packets, enh_desc;
4528 struct stmmac_priv *priv = netdev_priv(dev);
4529 unsigned int nopaged_len = skb_headlen(skb);
4530 int i, csum_insertion = 0, is_jumbo = 0;
4531 u32 queue = skb_get_queue_mapping(skb);
4532 int nfrags = skb_shinfo(skb)->nr_frags;
4533 int gso = skb_shinfo(skb)->gso_type;
4534 struct stmmac_txq_stats *txq_stats;
4535 struct dma_edesc *tbs_desc = NULL;
4536 struct dma_desc *desc, *first;
4537 struct stmmac_tx_queue *tx_q;
4538 bool has_vlan, set_ic;
4539 int entry, first_tx;
4540 dma_addr_t des;
4541
4542 tx_q = &priv->dma_conf.tx_queue[queue];
4543 txq_stats = &priv->xstats.txq_stats[queue];
4544 first_tx = tx_q->cur_tx;
4545
4546 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4547 stmmac_stop_sw_lpi(priv);
4548
4549 /* Manage oversized TCP frames for GMAC4 device */
4550 if (skb_is_gso(skb) && priv->tso) {
4551 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4552 return stmmac_tso_xmit(skb, dev);
4553 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4554 return stmmac_tso_xmit(skb, dev);
4555 }
4556
4557 if (priv->est && priv->est->enable &&
4558 priv->est->max_sdu[queue] &&
4559 skb->len > priv->est->max_sdu[queue]){
4560 priv->xstats.max_sdu_txq_drop[queue]++;
4561 goto max_sdu_err;
4562 }
4563
4564 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4565 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4566 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4567 queue));
4568 /* This is a hard error, log it. */
4569 netdev_err(priv->dev,
4570 "%s: Tx Ring full when queue awake\n",
4571 __func__);
4572 }
4573 return NETDEV_TX_BUSY;
4574 }
4575
4576 /* Check if VLAN can be inserted by HW */
4577 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4578
4579 entry = tx_q->cur_tx;
4580 first_entry = entry;
4581 WARN_ON(tx_q->tx_skbuff[first_entry]);
4582
4583 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4584 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4585 * queues. In that case, checksum offloading for those queues that don't
4586 * support tx coe needs to fallback to software checksum calculation.
4587 *
4588 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4589 * also have to be checksummed in software.
4590 */
4591 if (csum_insertion &&
4592 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4593 !stmmac_has_ip_ethertype(skb))) {
4594 if (unlikely(skb_checksum_help(skb)))
4595 goto dma_map_err;
4596 csum_insertion = !csum_insertion;
4597 }
4598
4599 if (likely(priv->extend_desc))
4600 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4601 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4602 desc = &tx_q->dma_entx[entry].basic;
4603 else
4604 desc = tx_q->dma_tx + entry;
4605
4606 first = desc;
4607
4608 if (has_vlan)
4609 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4610
4611 enh_desc = priv->plat->enh_desc;
4612 /* To program the descriptors according to the size of the frame */
4613 if (enh_desc)
4614 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4615
4616 if (unlikely(is_jumbo)) {
4617 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4618 if (unlikely(entry < 0) && (entry != -EINVAL))
4619 goto dma_map_err;
4620 }
4621
4622 for (i = 0; i < nfrags; i++) {
4623 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4624 int len = skb_frag_size(frag);
4625 bool last_segment = (i == (nfrags - 1));
4626
4627 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4628 WARN_ON(tx_q->tx_skbuff[entry]);
4629
4630 if (likely(priv->extend_desc))
4631 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4632 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4633 desc = &tx_q->dma_entx[entry].basic;
4634 else
4635 desc = tx_q->dma_tx + entry;
4636
4637 des = skb_frag_dma_map(priv->device, frag, 0, len,
4638 DMA_TO_DEVICE);
4639 if (dma_mapping_error(priv->device, des))
4640 goto dma_map_err; /* should reuse desc w/o issues */
4641
4642 tx_q->tx_skbuff_dma[entry].buf = des;
4643
4644 stmmac_set_desc_addr(priv, desc, des);
4645
4646 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4647 tx_q->tx_skbuff_dma[entry].len = len;
4648 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4649 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4650
4651 /* Prepare the descriptor and set the own bit too */
4652 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4653 priv->mode, 1, last_segment, skb->len);
4654 }
4655
4656 /* Only the last descriptor gets to point to the skb. */
4657 tx_q->tx_skbuff[entry] = skb;
4658 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4659
4660 /* According to the coalesce parameter the IC bit for the latest
4661 * segment is reset and the timer re-started to clean the tx status.
4662 * This approach takes care about the fragments: desc is the first
4663 * element in case of no SG.
4664 */
4665 tx_packets = (entry + 1) - first_tx;
4666 tx_q->tx_count_frames += tx_packets;
4667
4668 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4669 set_ic = true;
4670 else if (!priv->tx_coal_frames[queue])
4671 set_ic = false;
4672 else if (tx_packets > priv->tx_coal_frames[queue])
4673 set_ic = true;
4674 else if ((tx_q->tx_count_frames %
4675 priv->tx_coal_frames[queue]) < tx_packets)
4676 set_ic = true;
4677 else
4678 set_ic = false;
4679
4680 if (set_ic) {
4681 if (likely(priv->extend_desc))
4682 desc = &tx_q->dma_etx[entry].basic;
4683 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4684 desc = &tx_q->dma_entx[entry].basic;
4685 else
4686 desc = &tx_q->dma_tx[entry];
4687
4688 tx_q->tx_count_frames = 0;
4689 stmmac_set_tx_ic(priv, desc);
4690 }
4691
4692 /* We've used all descriptors we need for this skb, however,
4693 * advance cur_tx so that it references a fresh descriptor.
4694 * ndo_start_xmit will fill this descriptor the next time it's
4695 * called and stmmac_tx_clean may clean up to this descriptor.
4696 */
4697 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4698 tx_q->cur_tx = entry;
4699
4700 if (netif_msg_pktdata(priv)) {
4701 netdev_dbg(priv->dev,
4702 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4703 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4704 entry, first, nfrags);
4705
4706 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4707 print_pkt(skb->data, skb->len);
4708 }
4709
4710 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4711 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4712 __func__);
4713 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4714 }
4715
4716 u64_stats_update_begin(&txq_stats->q_syncp);
4717 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4718 if (set_ic)
4719 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4720 u64_stats_update_end(&txq_stats->q_syncp);
4721
4722 if (priv->sarc_type)
4723 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4724
4725 skb_tx_timestamp(skb);
4726
4727 /* Ready to fill the first descriptor and set the OWN bit w/o any
4728 * problems because all the descriptors are actually ready to be
4729 * passed to the DMA engine.
4730 */
4731 if (likely(!is_jumbo)) {
4732 bool last_segment = (nfrags == 0);
4733
4734 des = dma_map_single(priv->device, skb->data,
4735 nopaged_len, DMA_TO_DEVICE);
4736 if (dma_mapping_error(priv->device, des))
4737 goto dma_map_err;
4738
4739 tx_q->tx_skbuff_dma[first_entry].buf = des;
4740 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4741 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4742
4743 stmmac_set_desc_addr(priv, first, des);
4744
4745 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4746 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4747
4748 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4749 priv->hwts_tx_en)) {
4750 /* declare that device is doing timestamping */
4751 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4752 stmmac_enable_tx_timestamp(priv, first);
4753 }
4754
4755 /* Prepare the first descriptor setting the OWN bit too */
4756 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4757 csum_insertion, priv->mode, 0, last_segment,
4758 skb->len);
4759 }
4760
4761 if (tx_q->tbs & STMMAC_TBS_EN) {
4762 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4763
4764 tbs_desc = &tx_q->dma_entx[first_entry];
4765 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4766 }
4767
4768 stmmac_set_tx_owner(priv, first);
4769
4770 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4771
4772 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4773
4774 stmmac_flush_tx_descriptors(priv, queue);
4775 stmmac_tx_timer_arm(priv, queue);
4776
4777 return NETDEV_TX_OK;
4778
4779 dma_map_err:
4780 netdev_err(priv->dev, "Tx DMA map failed\n");
4781 max_sdu_err:
4782 dev_kfree_skb(skb);
4783 priv->xstats.tx_dropped++;
4784 return NETDEV_TX_OK;
4785 }
4786
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4787 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4788 {
4789 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4790 __be16 vlan_proto = veth->h_vlan_proto;
4791 u16 vlanid;
4792
4793 if ((vlan_proto == htons(ETH_P_8021Q) &&
4794 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4795 (vlan_proto == htons(ETH_P_8021AD) &&
4796 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4797 /* pop the vlan tag */
4798 vlanid = ntohs(veth->h_vlan_TCI);
4799 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4800 skb_pull(skb, VLAN_HLEN);
4801 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4802 }
4803 }
4804
4805 /**
4806 * stmmac_rx_refill - refill used skb preallocated buffers
4807 * @priv: driver private structure
4808 * @queue: RX queue index
4809 * Description : this is to reallocate the skb for the reception process
4810 * that is based on zero-copy.
4811 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4812 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4813 {
4814 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4815 int dirty = stmmac_rx_dirty(priv, queue);
4816 unsigned int entry = rx_q->dirty_rx;
4817 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4818
4819 if (priv->dma_cap.host_dma_width <= 32)
4820 gfp |= GFP_DMA32;
4821
4822 while (dirty-- > 0) {
4823 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4824 struct dma_desc *p;
4825 bool use_rx_wd;
4826
4827 if (priv->extend_desc)
4828 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4829 else
4830 p = rx_q->dma_rx + entry;
4831
4832 if (!buf->page) {
4833 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4834 if (!buf->page)
4835 break;
4836 }
4837
4838 if (priv->sph && !buf->sec_page) {
4839 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4840 if (!buf->sec_page)
4841 break;
4842
4843 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4844 }
4845
4846 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4847
4848 stmmac_set_desc_addr(priv, p, buf->addr);
4849 if (priv->sph)
4850 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4851 else
4852 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4853 stmmac_refill_desc3(priv, rx_q, p);
4854
4855 rx_q->rx_count_frames++;
4856 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4857 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4858 rx_q->rx_count_frames = 0;
4859
4860 use_rx_wd = !priv->rx_coal_frames[queue];
4861 use_rx_wd |= rx_q->rx_count_frames > 0;
4862 if (!priv->use_riwt)
4863 use_rx_wd = false;
4864
4865 dma_wmb();
4866 stmmac_set_rx_owner(priv, p, use_rx_wd);
4867
4868 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4869 }
4870 rx_q->dirty_rx = entry;
4871 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4872 (rx_q->dirty_rx * sizeof(struct dma_desc));
4873 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4874 }
4875
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4876 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4877 struct dma_desc *p,
4878 int status, unsigned int len)
4879 {
4880 unsigned int plen = 0, hlen = 0;
4881 int coe = priv->hw->rx_csum;
4882
4883 /* Not first descriptor, buffer is always zero */
4884 if (priv->sph && len)
4885 return 0;
4886
4887 /* First descriptor, get split header length */
4888 stmmac_get_rx_header_len(priv, p, &hlen);
4889 if (priv->sph && hlen) {
4890 priv->xstats.rx_split_hdr_pkt_n++;
4891 return hlen;
4892 }
4893
4894 /* First descriptor, not last descriptor and not split header */
4895 if (status & rx_not_ls)
4896 return priv->dma_conf.dma_buf_sz;
4897
4898 plen = stmmac_get_rx_frame_len(priv, p, coe);
4899
4900 /* First descriptor and last descriptor and not split header */
4901 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4902 }
4903
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4904 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4905 struct dma_desc *p,
4906 int status, unsigned int len)
4907 {
4908 int coe = priv->hw->rx_csum;
4909 unsigned int plen = 0;
4910
4911 /* Not split header, buffer is not available */
4912 if (!priv->sph)
4913 return 0;
4914
4915 /* Not last descriptor */
4916 if (status & rx_not_ls)
4917 return priv->dma_conf.dma_buf_sz;
4918
4919 plen = stmmac_get_rx_frame_len(priv, p, coe);
4920
4921 /* Last descriptor */
4922 return plen - len;
4923 }
4924
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4925 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4926 struct xdp_frame *xdpf, bool dma_map)
4927 {
4928 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4929 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4930 unsigned int entry = tx_q->cur_tx;
4931 struct dma_desc *tx_desc;
4932 dma_addr_t dma_addr;
4933 bool set_ic;
4934
4935 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4936 return STMMAC_XDP_CONSUMED;
4937
4938 if (priv->est && priv->est->enable &&
4939 priv->est->max_sdu[queue] &&
4940 xdpf->len > priv->est->max_sdu[queue]) {
4941 priv->xstats.max_sdu_txq_drop[queue]++;
4942 return STMMAC_XDP_CONSUMED;
4943 }
4944
4945 if (likely(priv->extend_desc))
4946 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4947 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4948 tx_desc = &tx_q->dma_entx[entry].basic;
4949 else
4950 tx_desc = tx_q->dma_tx + entry;
4951
4952 if (dma_map) {
4953 dma_addr = dma_map_single(priv->device, xdpf->data,
4954 xdpf->len, DMA_TO_DEVICE);
4955 if (dma_mapping_error(priv->device, dma_addr))
4956 return STMMAC_XDP_CONSUMED;
4957
4958 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4959 } else {
4960 struct page *page = virt_to_page(xdpf->data);
4961
4962 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4963 xdpf->headroom;
4964 dma_sync_single_for_device(priv->device, dma_addr,
4965 xdpf->len, DMA_BIDIRECTIONAL);
4966
4967 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4968 }
4969
4970 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4971 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4972 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4973 tx_q->tx_skbuff_dma[entry].last_segment = true;
4974 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4975
4976 tx_q->xdpf[entry] = xdpf;
4977
4978 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4979
4980 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4981 true, priv->mode, true, true,
4982 xdpf->len);
4983
4984 tx_q->tx_count_frames++;
4985
4986 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4987 set_ic = true;
4988 else
4989 set_ic = false;
4990
4991 if (set_ic) {
4992 tx_q->tx_count_frames = 0;
4993 stmmac_set_tx_ic(priv, tx_desc);
4994 u64_stats_update_begin(&txq_stats->q_syncp);
4995 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4996 u64_stats_update_end(&txq_stats->q_syncp);
4997 }
4998
4999 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5000
5001 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5002 tx_q->cur_tx = entry;
5003
5004 return STMMAC_XDP_TX;
5005 }
5006
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5007 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5008 int cpu)
5009 {
5010 int index = cpu;
5011
5012 if (unlikely(index < 0))
5013 index = 0;
5014
5015 while (index >= priv->plat->tx_queues_to_use)
5016 index -= priv->plat->tx_queues_to_use;
5017
5018 return index;
5019 }
5020
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5021 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5022 struct xdp_buff *xdp)
5023 {
5024 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5025 int cpu = smp_processor_id();
5026 struct netdev_queue *nq;
5027 int queue;
5028 int res;
5029
5030 if (unlikely(!xdpf))
5031 return STMMAC_XDP_CONSUMED;
5032
5033 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5034 nq = netdev_get_tx_queue(priv->dev, queue);
5035
5036 __netif_tx_lock(nq, cpu);
5037 /* Avoids TX time-out as we are sharing with slow path */
5038 txq_trans_cond_update(nq);
5039
5040 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5041 if (res == STMMAC_XDP_TX)
5042 stmmac_flush_tx_descriptors(priv, queue);
5043
5044 __netif_tx_unlock(nq);
5045
5046 return res;
5047 }
5048
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5049 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5050 struct bpf_prog *prog,
5051 struct xdp_buff *xdp)
5052 {
5053 u32 act;
5054 int res;
5055
5056 act = bpf_prog_run_xdp(prog, xdp);
5057 switch (act) {
5058 case XDP_PASS:
5059 res = STMMAC_XDP_PASS;
5060 break;
5061 case XDP_TX:
5062 res = stmmac_xdp_xmit_back(priv, xdp);
5063 break;
5064 case XDP_REDIRECT:
5065 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5066 res = STMMAC_XDP_CONSUMED;
5067 else
5068 res = STMMAC_XDP_REDIRECT;
5069 break;
5070 default:
5071 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5072 fallthrough;
5073 case XDP_ABORTED:
5074 trace_xdp_exception(priv->dev, prog, act);
5075 fallthrough;
5076 case XDP_DROP:
5077 res = STMMAC_XDP_CONSUMED;
5078 break;
5079 }
5080
5081 return res;
5082 }
5083
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5084 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5085 struct xdp_buff *xdp)
5086 {
5087 struct bpf_prog *prog;
5088 int res;
5089
5090 prog = READ_ONCE(priv->xdp_prog);
5091 if (!prog) {
5092 res = STMMAC_XDP_PASS;
5093 goto out;
5094 }
5095
5096 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5097 out:
5098 return ERR_PTR(-res);
5099 }
5100
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5101 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5102 int xdp_status)
5103 {
5104 int cpu = smp_processor_id();
5105 int queue;
5106
5107 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5108
5109 if (xdp_status & STMMAC_XDP_TX)
5110 stmmac_tx_timer_arm(priv, queue);
5111
5112 if (xdp_status & STMMAC_XDP_REDIRECT)
5113 xdp_do_flush();
5114 }
5115
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5116 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5117 struct xdp_buff *xdp)
5118 {
5119 unsigned int metasize = xdp->data - xdp->data_meta;
5120 unsigned int datasize = xdp->data_end - xdp->data;
5121 struct sk_buff *skb;
5122
5123 skb = napi_alloc_skb(&ch->rxtx_napi,
5124 xdp->data_end - xdp->data_hard_start);
5125 if (unlikely(!skb))
5126 return NULL;
5127
5128 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5129 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5130 if (metasize)
5131 skb_metadata_set(skb, metasize);
5132
5133 return skb;
5134 }
5135
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5136 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5137 struct dma_desc *p, struct dma_desc *np,
5138 struct xdp_buff *xdp)
5139 {
5140 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5141 struct stmmac_channel *ch = &priv->channel[queue];
5142 unsigned int len = xdp->data_end - xdp->data;
5143 enum pkt_hash_types hash_type;
5144 int coe = priv->hw->rx_csum;
5145 struct sk_buff *skb;
5146 u32 hash;
5147
5148 skb = stmmac_construct_skb_zc(ch, xdp);
5149 if (!skb) {
5150 priv->xstats.rx_dropped++;
5151 return;
5152 }
5153
5154 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5155 if (priv->hw->hw_vlan_en)
5156 /* MAC level stripping. */
5157 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5158 else
5159 /* Driver level stripping. */
5160 stmmac_rx_vlan(priv->dev, skb);
5161 skb->protocol = eth_type_trans(skb, priv->dev);
5162
5163 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5164 skb_checksum_none_assert(skb);
5165 else
5166 skb->ip_summed = CHECKSUM_UNNECESSARY;
5167
5168 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5169 skb_set_hash(skb, hash, hash_type);
5170
5171 skb_record_rx_queue(skb, queue);
5172 napi_gro_receive(&ch->rxtx_napi, skb);
5173
5174 u64_stats_update_begin(&rxq_stats->napi_syncp);
5175 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5176 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5177 u64_stats_update_end(&rxq_stats->napi_syncp);
5178 }
5179
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5180 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5181 {
5182 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5183 unsigned int entry = rx_q->dirty_rx;
5184 struct dma_desc *rx_desc = NULL;
5185 bool ret = true;
5186
5187 budget = min(budget, stmmac_rx_dirty(priv, queue));
5188
5189 while (budget-- > 0 && entry != rx_q->cur_rx) {
5190 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5191 dma_addr_t dma_addr;
5192 bool use_rx_wd;
5193
5194 if (!buf->xdp) {
5195 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5196 if (!buf->xdp) {
5197 ret = false;
5198 break;
5199 }
5200 }
5201
5202 if (priv->extend_desc)
5203 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5204 else
5205 rx_desc = rx_q->dma_rx + entry;
5206
5207 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5208 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5209 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5210 stmmac_refill_desc3(priv, rx_q, rx_desc);
5211
5212 rx_q->rx_count_frames++;
5213 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5214 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5215 rx_q->rx_count_frames = 0;
5216
5217 use_rx_wd = !priv->rx_coal_frames[queue];
5218 use_rx_wd |= rx_q->rx_count_frames > 0;
5219 if (!priv->use_riwt)
5220 use_rx_wd = false;
5221
5222 dma_wmb();
5223 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5224
5225 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5226 }
5227
5228 if (rx_desc) {
5229 rx_q->dirty_rx = entry;
5230 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5231 (rx_q->dirty_rx * sizeof(struct dma_desc));
5232 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5233 }
5234
5235 return ret;
5236 }
5237
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5238 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5239 {
5240 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5241 * to represent incoming packet, whereas cb field in the same structure
5242 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5243 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5244 */
5245 return (struct stmmac_xdp_buff *)xdp;
5246 }
5247
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5248 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5249 {
5250 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5251 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5252 unsigned int count = 0, error = 0, len = 0;
5253 int dirty = stmmac_rx_dirty(priv, queue);
5254 unsigned int next_entry = rx_q->cur_rx;
5255 u32 rx_errors = 0, rx_dropped = 0;
5256 unsigned int desc_size;
5257 struct bpf_prog *prog;
5258 bool failure = false;
5259 int xdp_status = 0;
5260 int status = 0;
5261
5262 if (netif_msg_rx_status(priv)) {
5263 void *rx_head;
5264
5265 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5266 if (priv->extend_desc) {
5267 rx_head = (void *)rx_q->dma_erx;
5268 desc_size = sizeof(struct dma_extended_desc);
5269 } else {
5270 rx_head = (void *)rx_q->dma_rx;
5271 desc_size = sizeof(struct dma_desc);
5272 }
5273
5274 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5275 rx_q->dma_rx_phy, desc_size);
5276 }
5277 while (count < limit) {
5278 struct stmmac_rx_buffer *buf;
5279 struct stmmac_xdp_buff *ctx;
5280 unsigned int buf1_len = 0;
5281 struct dma_desc *np, *p;
5282 int entry;
5283 int res;
5284
5285 if (!count && rx_q->state_saved) {
5286 error = rx_q->state.error;
5287 len = rx_q->state.len;
5288 } else {
5289 rx_q->state_saved = false;
5290 error = 0;
5291 len = 0;
5292 }
5293
5294 if (count >= limit)
5295 break;
5296
5297 read_again:
5298 buf1_len = 0;
5299 entry = next_entry;
5300 buf = &rx_q->buf_pool[entry];
5301
5302 if (dirty >= STMMAC_RX_FILL_BATCH) {
5303 failure = failure ||
5304 !stmmac_rx_refill_zc(priv, queue, dirty);
5305 dirty = 0;
5306 }
5307
5308 if (priv->extend_desc)
5309 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5310 else
5311 p = rx_q->dma_rx + entry;
5312
5313 /* read the status of the incoming frame */
5314 status = stmmac_rx_status(priv, &priv->xstats, p);
5315 /* check if managed by the DMA otherwise go ahead */
5316 if (unlikely(status & dma_own))
5317 break;
5318
5319 /* Prefetch the next RX descriptor */
5320 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5321 priv->dma_conf.dma_rx_size);
5322 next_entry = rx_q->cur_rx;
5323
5324 if (priv->extend_desc)
5325 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5326 else
5327 np = rx_q->dma_rx + next_entry;
5328
5329 prefetch(np);
5330
5331 /* Ensure a valid XSK buffer before proceed */
5332 if (!buf->xdp)
5333 break;
5334
5335 if (priv->extend_desc)
5336 stmmac_rx_extended_status(priv, &priv->xstats,
5337 rx_q->dma_erx + entry);
5338 if (unlikely(status == discard_frame)) {
5339 xsk_buff_free(buf->xdp);
5340 buf->xdp = NULL;
5341 dirty++;
5342 error = 1;
5343 if (!priv->hwts_rx_en)
5344 rx_errors++;
5345 }
5346
5347 if (unlikely(error && (status & rx_not_ls)))
5348 goto read_again;
5349 if (unlikely(error)) {
5350 count++;
5351 continue;
5352 }
5353
5354 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5355 if (likely(status & rx_not_ls)) {
5356 xsk_buff_free(buf->xdp);
5357 buf->xdp = NULL;
5358 dirty++;
5359 count++;
5360 goto read_again;
5361 }
5362
5363 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5364 ctx->priv = priv;
5365 ctx->desc = p;
5366 ctx->ndesc = np;
5367
5368 /* XDP ZC Frame only support primary buffers for now */
5369 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5370 len += buf1_len;
5371
5372 /* ACS is disabled; strip manually. */
5373 if (likely(!(status & rx_not_ls))) {
5374 buf1_len -= ETH_FCS_LEN;
5375 len -= ETH_FCS_LEN;
5376 }
5377
5378 /* RX buffer is good and fit into a XSK pool buffer */
5379 buf->xdp->data_end = buf->xdp->data + buf1_len;
5380 xsk_buff_dma_sync_for_cpu(buf->xdp);
5381
5382 prog = READ_ONCE(priv->xdp_prog);
5383 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5384
5385 switch (res) {
5386 case STMMAC_XDP_PASS:
5387 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5388 xsk_buff_free(buf->xdp);
5389 break;
5390 case STMMAC_XDP_CONSUMED:
5391 xsk_buff_free(buf->xdp);
5392 rx_dropped++;
5393 break;
5394 case STMMAC_XDP_TX:
5395 case STMMAC_XDP_REDIRECT:
5396 xdp_status |= res;
5397 break;
5398 }
5399
5400 buf->xdp = NULL;
5401 dirty++;
5402 count++;
5403 }
5404
5405 if (status & rx_not_ls) {
5406 rx_q->state_saved = true;
5407 rx_q->state.error = error;
5408 rx_q->state.len = len;
5409 }
5410
5411 stmmac_finalize_xdp_rx(priv, xdp_status);
5412
5413 u64_stats_update_begin(&rxq_stats->napi_syncp);
5414 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5415 u64_stats_update_end(&rxq_stats->napi_syncp);
5416
5417 priv->xstats.rx_dropped += rx_dropped;
5418 priv->xstats.rx_errors += rx_errors;
5419
5420 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5421 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5422 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5423 else
5424 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5425
5426 return (int)count;
5427 }
5428
5429 return failure ? limit : (int)count;
5430 }
5431
5432 /**
5433 * stmmac_rx - manage the receive process
5434 * @priv: driver private structure
5435 * @limit: napi bugget
5436 * @queue: RX queue index.
5437 * Description : this the function called by the napi poll method.
5438 * It gets all the frames inside the ring.
5439 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5440 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5441 {
5442 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5443 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5444 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5445 struct stmmac_channel *ch = &priv->channel[queue];
5446 unsigned int count = 0, error = 0, len = 0;
5447 int status = 0, coe = priv->hw->rx_csum;
5448 unsigned int next_entry = rx_q->cur_rx;
5449 enum dma_data_direction dma_dir;
5450 unsigned int desc_size;
5451 struct sk_buff *skb = NULL;
5452 struct stmmac_xdp_buff ctx;
5453 int xdp_status = 0;
5454 int buf_sz;
5455
5456 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5457 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5458 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5459
5460 if (netif_msg_rx_status(priv)) {
5461 void *rx_head;
5462
5463 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5464 if (priv->extend_desc) {
5465 rx_head = (void *)rx_q->dma_erx;
5466 desc_size = sizeof(struct dma_extended_desc);
5467 } else {
5468 rx_head = (void *)rx_q->dma_rx;
5469 desc_size = sizeof(struct dma_desc);
5470 }
5471
5472 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5473 rx_q->dma_rx_phy, desc_size);
5474 }
5475 while (count < limit) {
5476 unsigned int buf1_len = 0, buf2_len = 0;
5477 enum pkt_hash_types hash_type;
5478 struct stmmac_rx_buffer *buf;
5479 struct dma_desc *np, *p;
5480 int entry;
5481 u32 hash;
5482
5483 if (!count && rx_q->state_saved) {
5484 skb = rx_q->state.skb;
5485 error = rx_q->state.error;
5486 len = rx_q->state.len;
5487 } else {
5488 rx_q->state_saved = false;
5489 skb = NULL;
5490 error = 0;
5491 len = 0;
5492 }
5493
5494 read_again:
5495 if (count >= limit)
5496 break;
5497
5498 buf1_len = 0;
5499 buf2_len = 0;
5500 entry = next_entry;
5501 buf = &rx_q->buf_pool[entry];
5502
5503 if (priv->extend_desc)
5504 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5505 else
5506 p = rx_q->dma_rx + entry;
5507
5508 /* read the status of the incoming frame */
5509 status = stmmac_rx_status(priv, &priv->xstats, p);
5510 /* check if managed by the DMA otherwise go ahead */
5511 if (unlikely(status & dma_own))
5512 break;
5513
5514 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5515 priv->dma_conf.dma_rx_size);
5516 next_entry = rx_q->cur_rx;
5517
5518 if (priv->extend_desc)
5519 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5520 else
5521 np = rx_q->dma_rx + next_entry;
5522
5523 prefetch(np);
5524
5525 if (priv->extend_desc)
5526 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5527 if (unlikely(status == discard_frame)) {
5528 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5529 buf->page = NULL;
5530 error = 1;
5531 if (!priv->hwts_rx_en)
5532 rx_errors++;
5533 }
5534
5535 if (unlikely(error && (status & rx_not_ls)))
5536 goto read_again;
5537 if (unlikely(error)) {
5538 dev_kfree_skb(skb);
5539 skb = NULL;
5540 count++;
5541 continue;
5542 }
5543
5544 /* Buffer is good. Go on. */
5545
5546 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5547 len += buf1_len;
5548 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5549 len += buf2_len;
5550
5551 /* ACS is disabled; strip manually. */
5552 if (likely(!(status & rx_not_ls))) {
5553 if (buf2_len) {
5554 buf2_len -= ETH_FCS_LEN;
5555 len -= ETH_FCS_LEN;
5556 } else if (buf1_len) {
5557 buf1_len -= ETH_FCS_LEN;
5558 len -= ETH_FCS_LEN;
5559 }
5560 }
5561
5562 if (!skb) {
5563 unsigned int pre_len, sync_len;
5564
5565 dma_sync_single_for_cpu(priv->device, buf->addr,
5566 buf1_len, dma_dir);
5567 net_prefetch(page_address(buf->page) +
5568 buf->page_offset);
5569
5570 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5571 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5572 buf->page_offset, buf1_len, true);
5573
5574 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5575 buf->page_offset;
5576
5577 ctx.priv = priv;
5578 ctx.desc = p;
5579 ctx.ndesc = np;
5580
5581 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5582 /* Due xdp_adjust_tail: DMA sync for_device
5583 * cover max len CPU touch
5584 */
5585 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5586 buf->page_offset;
5587 sync_len = max(sync_len, pre_len);
5588
5589 /* For Not XDP_PASS verdict */
5590 if (IS_ERR(skb)) {
5591 unsigned int xdp_res = -PTR_ERR(skb);
5592
5593 if (xdp_res & STMMAC_XDP_CONSUMED) {
5594 page_pool_put_page(rx_q->page_pool,
5595 virt_to_head_page(ctx.xdp.data),
5596 sync_len, true);
5597 buf->page = NULL;
5598 rx_dropped++;
5599
5600 /* Clear skb as it was set as
5601 * status by XDP program.
5602 */
5603 skb = NULL;
5604
5605 if (unlikely((status & rx_not_ls)))
5606 goto read_again;
5607
5608 count++;
5609 continue;
5610 } else if (xdp_res & (STMMAC_XDP_TX |
5611 STMMAC_XDP_REDIRECT)) {
5612 xdp_status |= xdp_res;
5613 buf->page = NULL;
5614 skb = NULL;
5615 count++;
5616 continue;
5617 }
5618 }
5619 }
5620
5621 if (!skb) {
5622 unsigned int head_pad_len;
5623
5624 /* XDP program may expand or reduce tail */
5625 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5626
5627 skb = napi_build_skb(page_address(buf->page),
5628 rx_q->napi_skb_frag_size);
5629 if (!skb) {
5630 page_pool_recycle_direct(rx_q->page_pool,
5631 buf->page);
5632 rx_dropped++;
5633 count++;
5634 goto drain_data;
5635 }
5636
5637 /* XDP program may adjust header */
5638 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5639 skb_reserve(skb, head_pad_len);
5640 skb_put(skb, buf1_len);
5641 skb_mark_for_recycle(skb);
5642 buf->page = NULL;
5643 } else if (buf1_len) {
5644 dma_sync_single_for_cpu(priv->device, buf->addr,
5645 buf1_len, dma_dir);
5646 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5647 buf->page, buf->page_offset, buf1_len,
5648 priv->dma_conf.dma_buf_sz);
5649 buf->page = NULL;
5650 }
5651
5652 if (buf2_len) {
5653 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5654 buf2_len, dma_dir);
5655 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5656 buf->sec_page, 0, buf2_len,
5657 priv->dma_conf.dma_buf_sz);
5658 buf->sec_page = NULL;
5659 }
5660
5661 drain_data:
5662 if (likely(status & rx_not_ls))
5663 goto read_again;
5664 if (!skb)
5665 continue;
5666
5667 /* Got entire packet into SKB. Finish it. */
5668
5669 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5670
5671 if (priv->hw->hw_vlan_en)
5672 /* MAC level stripping. */
5673 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5674 else
5675 /* Driver level stripping. */
5676 stmmac_rx_vlan(priv->dev, skb);
5677
5678 skb->protocol = eth_type_trans(skb, priv->dev);
5679
5680 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5681 skb_checksum_none_assert(skb);
5682 else
5683 skb->ip_summed = CHECKSUM_UNNECESSARY;
5684
5685 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5686 skb_set_hash(skb, hash, hash_type);
5687
5688 skb_record_rx_queue(skb, queue);
5689 napi_gro_receive(&ch->rx_napi, skb);
5690 skb = NULL;
5691
5692 rx_packets++;
5693 rx_bytes += len;
5694 count++;
5695 }
5696
5697 if (status & rx_not_ls || skb) {
5698 rx_q->state_saved = true;
5699 rx_q->state.skb = skb;
5700 rx_q->state.error = error;
5701 rx_q->state.len = len;
5702 }
5703
5704 stmmac_finalize_xdp_rx(priv, xdp_status);
5705
5706 stmmac_rx_refill(priv, queue);
5707
5708 u64_stats_update_begin(&rxq_stats->napi_syncp);
5709 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5710 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5711 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5712 u64_stats_update_end(&rxq_stats->napi_syncp);
5713
5714 priv->xstats.rx_dropped += rx_dropped;
5715 priv->xstats.rx_errors += rx_errors;
5716
5717 return count;
5718 }
5719
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5720 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5721 {
5722 struct stmmac_channel *ch =
5723 container_of(napi, struct stmmac_channel, rx_napi);
5724 struct stmmac_priv *priv = ch->priv_data;
5725 struct stmmac_rxq_stats *rxq_stats;
5726 u32 chan = ch->index;
5727 int work_done;
5728
5729 rxq_stats = &priv->xstats.rxq_stats[chan];
5730 u64_stats_update_begin(&rxq_stats->napi_syncp);
5731 u64_stats_inc(&rxq_stats->napi.poll);
5732 u64_stats_update_end(&rxq_stats->napi_syncp);
5733
5734 work_done = stmmac_rx(priv, budget, chan);
5735 if (work_done < budget && napi_complete_done(napi, work_done)) {
5736 unsigned long flags;
5737
5738 spin_lock_irqsave(&ch->lock, flags);
5739 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5740 spin_unlock_irqrestore(&ch->lock, flags);
5741 }
5742
5743 return work_done;
5744 }
5745
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5746 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5747 {
5748 struct stmmac_channel *ch =
5749 container_of(napi, struct stmmac_channel, tx_napi);
5750 struct stmmac_priv *priv = ch->priv_data;
5751 struct stmmac_txq_stats *txq_stats;
5752 bool pending_packets = false;
5753 u32 chan = ch->index;
5754 int work_done;
5755
5756 txq_stats = &priv->xstats.txq_stats[chan];
5757 u64_stats_update_begin(&txq_stats->napi_syncp);
5758 u64_stats_inc(&txq_stats->napi.poll);
5759 u64_stats_update_end(&txq_stats->napi_syncp);
5760
5761 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5762 work_done = min(work_done, budget);
5763
5764 if (work_done < budget && napi_complete_done(napi, work_done)) {
5765 unsigned long flags;
5766
5767 spin_lock_irqsave(&ch->lock, flags);
5768 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5769 spin_unlock_irqrestore(&ch->lock, flags);
5770 }
5771
5772 /* TX still have packet to handle, check if we need to arm tx timer */
5773 if (pending_packets)
5774 stmmac_tx_timer_arm(priv, chan);
5775
5776 return work_done;
5777 }
5778
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5779 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5780 {
5781 struct stmmac_channel *ch =
5782 container_of(napi, struct stmmac_channel, rxtx_napi);
5783 struct stmmac_priv *priv = ch->priv_data;
5784 bool tx_pending_packets = false;
5785 int rx_done, tx_done, rxtx_done;
5786 struct stmmac_rxq_stats *rxq_stats;
5787 struct stmmac_txq_stats *txq_stats;
5788 u32 chan = ch->index;
5789
5790 rxq_stats = &priv->xstats.rxq_stats[chan];
5791 u64_stats_update_begin(&rxq_stats->napi_syncp);
5792 u64_stats_inc(&rxq_stats->napi.poll);
5793 u64_stats_update_end(&rxq_stats->napi_syncp);
5794
5795 txq_stats = &priv->xstats.txq_stats[chan];
5796 u64_stats_update_begin(&txq_stats->napi_syncp);
5797 u64_stats_inc(&txq_stats->napi.poll);
5798 u64_stats_update_end(&txq_stats->napi_syncp);
5799
5800 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5801 tx_done = min(tx_done, budget);
5802
5803 rx_done = stmmac_rx_zc(priv, budget, chan);
5804
5805 rxtx_done = max(tx_done, rx_done);
5806
5807 /* If either TX or RX work is not complete, return budget
5808 * and keep pooling
5809 */
5810 if (rxtx_done >= budget)
5811 return budget;
5812
5813 /* all work done, exit the polling mode */
5814 if (napi_complete_done(napi, rxtx_done)) {
5815 unsigned long flags;
5816
5817 spin_lock_irqsave(&ch->lock, flags);
5818 /* Both RX and TX work done are compelte,
5819 * so enable both RX & TX IRQs.
5820 */
5821 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5822 spin_unlock_irqrestore(&ch->lock, flags);
5823 }
5824
5825 /* TX still have packet to handle, check if we need to arm tx timer */
5826 if (tx_pending_packets)
5827 stmmac_tx_timer_arm(priv, chan);
5828
5829 return min(rxtx_done, budget - 1);
5830 }
5831
5832 /**
5833 * stmmac_tx_timeout
5834 * @dev : Pointer to net device structure
5835 * @txqueue: the index of the hanging transmit queue
5836 * Description: this function is called when a packet transmission fails to
5837 * complete within a reasonable time. The driver will mark the error in the
5838 * netdev structure and arrange for the device to be reset to a sane state
5839 * in order to transmit a new packet.
5840 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5841 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5842 {
5843 struct stmmac_priv *priv = netdev_priv(dev);
5844
5845 stmmac_global_err(priv);
5846 }
5847
5848 /**
5849 * stmmac_set_rx_mode - entry point for multicast addressing
5850 * @dev : pointer to the device structure
5851 * Description:
5852 * This function is a driver entry point which gets called by the kernel
5853 * whenever multicast addresses must be enabled/disabled.
5854 * Return value:
5855 * void.
5856 */
stmmac_set_rx_mode(struct net_device * dev)5857 static void stmmac_set_rx_mode(struct net_device *dev)
5858 {
5859 struct stmmac_priv *priv = netdev_priv(dev);
5860
5861 stmmac_set_filter(priv, priv->hw, dev);
5862 }
5863
5864 /**
5865 * stmmac_change_mtu - entry point to change MTU size for the device.
5866 * @dev : device pointer.
5867 * @new_mtu : the new MTU size for the device.
5868 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5869 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5870 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5871 * Return value:
5872 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5873 * file on failure.
5874 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5875 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5876 {
5877 struct stmmac_priv *priv = netdev_priv(dev);
5878 int txfifosz = priv->plat->tx_fifo_size;
5879 struct stmmac_dma_conf *dma_conf;
5880 const int mtu = new_mtu;
5881 int ret;
5882
5883 if (txfifosz == 0)
5884 txfifosz = priv->dma_cap.tx_fifo_size;
5885
5886 txfifosz /= priv->plat->tx_queues_to_use;
5887
5888 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5889 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5890 return -EINVAL;
5891 }
5892
5893 new_mtu = STMMAC_ALIGN(new_mtu);
5894
5895 /* If condition true, FIFO is too small or MTU too large */
5896 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5897 return -EINVAL;
5898
5899 if (netif_running(dev)) {
5900 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5901 /* Try to allocate the new DMA conf with the new mtu */
5902 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5903 if (IS_ERR(dma_conf)) {
5904 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5905 mtu);
5906 return PTR_ERR(dma_conf);
5907 }
5908
5909 stmmac_release(dev);
5910
5911 ret = __stmmac_open(dev, dma_conf);
5912 if (ret) {
5913 free_dma_desc_resources(priv, dma_conf);
5914 kfree(dma_conf);
5915 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5916 return ret;
5917 }
5918
5919 kfree(dma_conf);
5920
5921 stmmac_set_rx_mode(dev);
5922 }
5923
5924 WRITE_ONCE(dev->mtu, mtu);
5925 netdev_update_features(dev);
5926
5927 return 0;
5928 }
5929
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5930 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5931 netdev_features_t features)
5932 {
5933 struct stmmac_priv *priv = netdev_priv(dev);
5934
5935 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5936 features &= ~NETIF_F_RXCSUM;
5937
5938 if (!priv->plat->tx_coe)
5939 features &= ~NETIF_F_CSUM_MASK;
5940
5941 /* Some GMAC devices have a bugged Jumbo frame support that
5942 * needs to have the Tx COE disabled for oversized frames
5943 * (due to limited buffer sizes). In this case we disable
5944 * the TX csum insertion in the TDES and not use SF.
5945 */
5946 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5947 features &= ~NETIF_F_CSUM_MASK;
5948
5949 /* Disable tso if asked by ethtool */
5950 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5951 if (features & NETIF_F_TSO)
5952 priv->tso = true;
5953 else
5954 priv->tso = false;
5955 }
5956
5957 return features;
5958 }
5959
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5960 static int stmmac_set_features(struct net_device *netdev,
5961 netdev_features_t features)
5962 {
5963 struct stmmac_priv *priv = netdev_priv(netdev);
5964
5965 /* Keep the COE Type in case of csum is supporting */
5966 if (features & NETIF_F_RXCSUM)
5967 priv->hw->rx_csum = priv->plat->rx_coe;
5968 else
5969 priv->hw->rx_csum = 0;
5970 /* No check needed because rx_coe has been set before and it will be
5971 * fixed in case of issue.
5972 */
5973 stmmac_rx_ipc(priv, priv->hw);
5974
5975 if (priv->sph_cap) {
5976 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5977 u32 chan;
5978
5979 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5980 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5981 }
5982
5983 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5984 priv->hw->hw_vlan_en = true;
5985 else
5986 priv->hw->hw_vlan_en = false;
5987
5988 stmmac_set_hw_vlan_mode(priv, priv->hw);
5989
5990 return 0;
5991 }
5992
stmmac_common_interrupt(struct stmmac_priv * priv)5993 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5994 {
5995 u32 rx_cnt = priv->plat->rx_queues_to_use;
5996 u32 tx_cnt = priv->plat->tx_queues_to_use;
5997 u32 queues_count;
5998 u32 queue;
5999 bool xmac;
6000
6001 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6002 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6003
6004 if (priv->irq_wake)
6005 pm_wakeup_event(priv->device, 0);
6006
6007 if (priv->dma_cap.estsel)
6008 stmmac_est_irq_status(priv, priv, priv->dev,
6009 &priv->xstats, tx_cnt);
6010
6011 if (stmmac_fpe_supported(priv))
6012 stmmac_fpe_irq_status(priv);
6013
6014 /* To handle GMAC own interrupts */
6015 if ((priv->plat->has_gmac) || xmac) {
6016 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6017
6018 if (unlikely(status)) {
6019 /* For LPI we need to save the tx status */
6020 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6021 priv->tx_path_in_lpi_mode = true;
6022 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6023 priv->tx_path_in_lpi_mode = false;
6024 }
6025
6026 for (queue = 0; queue < queues_count; queue++)
6027 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6028
6029 /* PCS link status */
6030 if (priv->hw->pcs &&
6031 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6032 if (priv->xstats.pcs_link)
6033 netif_carrier_on(priv->dev);
6034 else
6035 netif_carrier_off(priv->dev);
6036 }
6037
6038 stmmac_timestamp_interrupt(priv, priv);
6039 }
6040 }
6041
6042 /**
6043 * stmmac_interrupt - main ISR
6044 * @irq: interrupt number.
6045 * @dev_id: to pass the net device pointer.
6046 * Description: this is the main driver interrupt service routine.
6047 * It can call:
6048 * o DMA service routine (to manage incoming frame reception and transmission
6049 * status)
6050 * o Core interrupts to manage: remote wake-up, management counter, LPI
6051 * interrupts.
6052 */
stmmac_interrupt(int irq,void * dev_id)6053 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6054 {
6055 struct net_device *dev = (struct net_device *)dev_id;
6056 struct stmmac_priv *priv = netdev_priv(dev);
6057
6058 /* Check if adapter is up */
6059 if (test_bit(STMMAC_DOWN, &priv->state))
6060 return IRQ_HANDLED;
6061
6062 /* Check ASP error if it isn't delivered via an individual IRQ */
6063 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6064 return IRQ_HANDLED;
6065
6066 /* To handle Common interrupts */
6067 stmmac_common_interrupt(priv);
6068
6069 /* To handle DMA interrupts */
6070 stmmac_dma_interrupt(priv);
6071
6072 return IRQ_HANDLED;
6073 }
6074
stmmac_mac_interrupt(int irq,void * dev_id)6075 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6076 {
6077 struct net_device *dev = (struct net_device *)dev_id;
6078 struct stmmac_priv *priv = netdev_priv(dev);
6079
6080 /* Check if adapter is up */
6081 if (test_bit(STMMAC_DOWN, &priv->state))
6082 return IRQ_HANDLED;
6083
6084 /* To handle Common interrupts */
6085 stmmac_common_interrupt(priv);
6086
6087 return IRQ_HANDLED;
6088 }
6089
stmmac_safety_interrupt(int irq,void * dev_id)6090 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6091 {
6092 struct net_device *dev = (struct net_device *)dev_id;
6093 struct stmmac_priv *priv = netdev_priv(dev);
6094
6095 /* Check if adapter is up */
6096 if (test_bit(STMMAC_DOWN, &priv->state))
6097 return IRQ_HANDLED;
6098
6099 /* Check if a fatal error happened */
6100 stmmac_safety_feat_interrupt(priv);
6101
6102 return IRQ_HANDLED;
6103 }
6104
stmmac_msi_intr_tx(int irq,void * data)6105 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6106 {
6107 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6108 struct stmmac_dma_conf *dma_conf;
6109 int chan = tx_q->queue_index;
6110 struct stmmac_priv *priv;
6111 int status;
6112
6113 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6114 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6115
6116 /* Check if adapter is up */
6117 if (test_bit(STMMAC_DOWN, &priv->state))
6118 return IRQ_HANDLED;
6119
6120 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6121
6122 if (unlikely(status & tx_hard_error_bump_tc)) {
6123 /* Try to bump up the dma threshold on this failure */
6124 stmmac_bump_dma_threshold(priv, chan);
6125 } else if (unlikely(status == tx_hard_error)) {
6126 stmmac_tx_err(priv, chan);
6127 }
6128
6129 return IRQ_HANDLED;
6130 }
6131
stmmac_msi_intr_rx(int irq,void * data)6132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6133 {
6134 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6135 struct stmmac_dma_conf *dma_conf;
6136 int chan = rx_q->queue_index;
6137 struct stmmac_priv *priv;
6138
6139 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6140 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6141
6142 /* Check if adapter is up */
6143 if (test_bit(STMMAC_DOWN, &priv->state))
6144 return IRQ_HANDLED;
6145
6146 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6147
6148 return IRQ_HANDLED;
6149 }
6150
6151 /**
6152 * stmmac_ioctl - Entry point for the Ioctl
6153 * @dev: Device pointer.
6154 * @rq: An IOCTL specefic structure, that can contain a pointer to
6155 * a proprietary structure used to pass information to the driver.
6156 * @cmd: IOCTL command
6157 * Description:
6158 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6159 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6160 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6161 {
6162 struct stmmac_priv *priv = netdev_priv (dev);
6163 int ret = -EOPNOTSUPP;
6164
6165 if (!netif_running(dev))
6166 return -EINVAL;
6167
6168 switch (cmd) {
6169 case SIOCGMIIPHY:
6170 case SIOCGMIIREG:
6171 case SIOCSMIIREG:
6172 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6173 break;
6174 case SIOCSHWTSTAMP:
6175 ret = stmmac_hwtstamp_set(dev, rq);
6176 break;
6177 case SIOCGHWTSTAMP:
6178 ret = stmmac_hwtstamp_get(dev, rq);
6179 break;
6180 default:
6181 break;
6182 }
6183
6184 return ret;
6185 }
6186
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6187 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6188 void *cb_priv)
6189 {
6190 struct stmmac_priv *priv = cb_priv;
6191 int ret = -EOPNOTSUPP;
6192
6193 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6194 return ret;
6195
6196 __stmmac_disable_all_queues(priv);
6197
6198 switch (type) {
6199 case TC_SETUP_CLSU32:
6200 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6201 break;
6202 case TC_SETUP_CLSFLOWER:
6203 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6204 break;
6205 default:
6206 break;
6207 }
6208
6209 stmmac_enable_all_queues(priv);
6210 return ret;
6211 }
6212
6213 static LIST_HEAD(stmmac_block_cb_list);
6214
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6215 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6216 void *type_data)
6217 {
6218 struct stmmac_priv *priv = netdev_priv(ndev);
6219
6220 switch (type) {
6221 case TC_QUERY_CAPS:
6222 return stmmac_tc_query_caps(priv, priv, type_data);
6223 case TC_SETUP_QDISC_MQPRIO:
6224 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6225 case TC_SETUP_BLOCK:
6226 return flow_block_cb_setup_simple(type_data,
6227 &stmmac_block_cb_list,
6228 stmmac_setup_tc_block_cb,
6229 priv, priv, true);
6230 case TC_SETUP_QDISC_CBS:
6231 return stmmac_tc_setup_cbs(priv, priv, type_data);
6232 case TC_SETUP_QDISC_TAPRIO:
6233 return stmmac_tc_setup_taprio(priv, priv, type_data);
6234 case TC_SETUP_QDISC_ETF:
6235 return stmmac_tc_setup_etf(priv, priv, type_data);
6236 default:
6237 return -EOPNOTSUPP;
6238 }
6239 }
6240
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6241 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6242 struct net_device *sb_dev)
6243 {
6244 int gso = skb_shinfo(skb)->gso_type;
6245
6246 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6247 /*
6248 * There is no way to determine the number of TSO/USO
6249 * capable Queues. Let's use always the Queue 0
6250 * because if TSO/USO is supported then at least this
6251 * one will be capable.
6252 */
6253 return 0;
6254 }
6255
6256 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6257 }
6258
stmmac_set_mac_address(struct net_device * ndev,void * addr)6259 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6260 {
6261 struct stmmac_priv *priv = netdev_priv(ndev);
6262 int ret = 0;
6263
6264 ret = pm_runtime_resume_and_get(priv->device);
6265 if (ret < 0)
6266 return ret;
6267
6268 ret = eth_mac_addr(ndev, addr);
6269 if (ret)
6270 goto set_mac_error;
6271
6272 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6273
6274 set_mac_error:
6275 pm_runtime_put(priv->device);
6276
6277 return ret;
6278 }
6279
6280 #ifdef CONFIG_DEBUG_FS
6281 static struct dentry *stmmac_fs_dir;
6282
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6283 static void sysfs_display_ring(void *head, int size, int extend_desc,
6284 struct seq_file *seq, dma_addr_t dma_phy_addr)
6285 {
6286 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6287 struct dma_desc *p = (struct dma_desc *)head;
6288 unsigned int desc_size;
6289 dma_addr_t dma_addr;
6290 int i;
6291
6292 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6293 for (i = 0; i < size; i++) {
6294 dma_addr = dma_phy_addr + i * desc_size;
6295 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6296 i, &dma_addr,
6297 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6298 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6299 if (extend_desc)
6300 p = &(++ep)->basic;
6301 else
6302 p++;
6303 }
6304 }
6305
stmmac_rings_status_show(struct seq_file * seq,void * v)6306 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6307 {
6308 struct net_device *dev = seq->private;
6309 struct stmmac_priv *priv = netdev_priv(dev);
6310 u32 rx_count = priv->plat->rx_queues_to_use;
6311 u32 tx_count = priv->plat->tx_queues_to_use;
6312 u32 queue;
6313
6314 if ((dev->flags & IFF_UP) == 0)
6315 return 0;
6316
6317 for (queue = 0; queue < rx_count; queue++) {
6318 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6319
6320 seq_printf(seq, "RX Queue %d:\n", queue);
6321
6322 if (priv->extend_desc) {
6323 seq_printf(seq, "Extended descriptor ring:\n");
6324 sysfs_display_ring((void *)rx_q->dma_erx,
6325 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6326 } else {
6327 seq_printf(seq, "Descriptor ring:\n");
6328 sysfs_display_ring((void *)rx_q->dma_rx,
6329 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6330 }
6331 }
6332
6333 for (queue = 0; queue < tx_count; queue++) {
6334 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6335
6336 seq_printf(seq, "TX Queue %d:\n", queue);
6337
6338 if (priv->extend_desc) {
6339 seq_printf(seq, "Extended descriptor ring:\n");
6340 sysfs_display_ring((void *)tx_q->dma_etx,
6341 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6342 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6343 seq_printf(seq, "Descriptor ring:\n");
6344 sysfs_display_ring((void *)tx_q->dma_tx,
6345 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6346 }
6347 }
6348
6349 return 0;
6350 }
6351 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6352
stmmac_dma_cap_show(struct seq_file * seq,void * v)6353 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6354 {
6355 static const char * const dwxgmac_timestamp_source[] = {
6356 "None",
6357 "Internal",
6358 "External",
6359 "Both",
6360 };
6361 static const char * const dwxgmac_safety_feature_desc[] = {
6362 "No",
6363 "All Safety Features with ECC and Parity",
6364 "All Safety Features without ECC or Parity",
6365 "All Safety Features with Parity Only",
6366 "ECC Only",
6367 "UNDEFINED",
6368 "UNDEFINED",
6369 "UNDEFINED",
6370 };
6371 struct net_device *dev = seq->private;
6372 struct stmmac_priv *priv = netdev_priv(dev);
6373
6374 if (!priv->hw_cap_support) {
6375 seq_printf(seq, "DMA HW features not supported\n");
6376 return 0;
6377 }
6378
6379 seq_printf(seq, "==============================\n");
6380 seq_printf(seq, "\tDMA HW features\n");
6381 seq_printf(seq, "==============================\n");
6382
6383 seq_printf(seq, "\t10/100 Mbps: %s\n",
6384 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6385 seq_printf(seq, "\t1000 Mbps: %s\n",
6386 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6387 seq_printf(seq, "\tHalf duplex: %s\n",
6388 (priv->dma_cap.half_duplex) ? "Y" : "N");
6389 if (priv->plat->has_xgmac) {
6390 seq_printf(seq,
6391 "\tNumber of Additional MAC address registers: %d\n",
6392 priv->dma_cap.multi_addr);
6393 } else {
6394 seq_printf(seq, "\tHash Filter: %s\n",
6395 (priv->dma_cap.hash_filter) ? "Y" : "N");
6396 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6397 (priv->dma_cap.multi_addr) ? "Y" : "N");
6398 }
6399 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6400 (priv->dma_cap.pcs) ? "Y" : "N");
6401 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6402 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6403 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6404 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6405 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6406 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6407 seq_printf(seq, "\tRMON module: %s\n",
6408 (priv->dma_cap.rmon) ? "Y" : "N");
6409 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6410 (priv->dma_cap.time_stamp) ? "Y" : "N");
6411 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6412 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6413 if (priv->plat->has_xgmac)
6414 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6415 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6416 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6417 (priv->dma_cap.eee) ? "Y" : "N");
6418 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6419 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6420 (priv->dma_cap.tx_coe) ? "Y" : "N");
6421 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6422 priv->plat->has_xgmac) {
6423 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6424 (priv->dma_cap.rx_coe) ? "Y" : "N");
6425 } else {
6426 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6427 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6428 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6429 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6430 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6431 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6432 }
6433 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6434 priv->dma_cap.number_rx_channel);
6435 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6436 priv->dma_cap.number_tx_channel);
6437 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6438 priv->dma_cap.number_rx_queues);
6439 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6440 priv->dma_cap.number_tx_queues);
6441 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6442 (priv->dma_cap.enh_desc) ? "Y" : "N");
6443 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6444 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6445 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6446 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6447 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6448 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6449 priv->dma_cap.pps_out_num);
6450 seq_printf(seq, "\tSafety Features: %s\n",
6451 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6452 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6453 priv->dma_cap.frpsel ? "Y" : "N");
6454 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6455 priv->dma_cap.host_dma_width);
6456 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6457 priv->dma_cap.rssen ? "Y" : "N");
6458 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6459 priv->dma_cap.vlhash ? "Y" : "N");
6460 seq_printf(seq, "\tSplit Header: %s\n",
6461 priv->dma_cap.sphen ? "Y" : "N");
6462 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6463 priv->dma_cap.vlins ? "Y" : "N");
6464 seq_printf(seq, "\tDouble VLAN: %s\n",
6465 priv->dma_cap.dvlan ? "Y" : "N");
6466 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6467 priv->dma_cap.l3l4fnum);
6468 seq_printf(seq, "\tARP Offloading: %s\n",
6469 priv->dma_cap.arpoffsel ? "Y" : "N");
6470 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6471 priv->dma_cap.estsel ? "Y" : "N");
6472 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6473 priv->dma_cap.fpesel ? "Y" : "N");
6474 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6475 priv->dma_cap.tbssel ? "Y" : "N");
6476 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6477 priv->dma_cap.tbs_ch_num);
6478 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6479 priv->dma_cap.sgfsel ? "Y" : "N");
6480 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6481 BIT(priv->dma_cap.ttsfd) >> 1);
6482 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6483 priv->dma_cap.numtc);
6484 seq_printf(seq, "\tDCB Feature: %s\n",
6485 priv->dma_cap.dcben ? "Y" : "N");
6486 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6487 priv->dma_cap.advthword ? "Y" : "N");
6488 seq_printf(seq, "\tPTP Offload: %s\n",
6489 priv->dma_cap.ptoen ? "Y" : "N");
6490 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6491 priv->dma_cap.osten ? "Y" : "N");
6492 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6493 priv->dma_cap.pfcen ? "Y" : "N");
6494 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6495 BIT(priv->dma_cap.frpes) << 6);
6496 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6497 BIT(priv->dma_cap.frpbs) << 6);
6498 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6499 priv->dma_cap.frppipe_num);
6500 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6501 priv->dma_cap.nrvf_num ?
6502 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6503 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6504 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6505 seq_printf(seq, "\tDepth of GCL: %lu\n",
6506 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6507 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6508 priv->dma_cap.cbtisel ? "Y" : "N");
6509 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6510 priv->dma_cap.aux_snapshot_n);
6511 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6512 priv->dma_cap.pou_ost_en ? "Y" : "N");
6513 seq_printf(seq, "\tEnhanced DMA: %s\n",
6514 priv->dma_cap.edma ? "Y" : "N");
6515 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6516 priv->dma_cap.ediffc ? "Y" : "N");
6517 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6518 priv->dma_cap.vxn ? "Y" : "N");
6519 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6520 priv->dma_cap.dbgmem ? "Y" : "N");
6521 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6522 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6523 return 0;
6524 }
6525 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6526
6527 /* Use network device events to rename debugfs file entries.
6528 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6529 static int stmmac_device_event(struct notifier_block *unused,
6530 unsigned long event, void *ptr)
6531 {
6532 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6533 struct stmmac_priv *priv = netdev_priv(dev);
6534
6535 if (dev->netdev_ops != &stmmac_netdev_ops)
6536 goto done;
6537
6538 switch (event) {
6539 case NETDEV_CHANGENAME:
6540 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6541 break;
6542 }
6543 done:
6544 return NOTIFY_DONE;
6545 }
6546
6547 static struct notifier_block stmmac_notifier = {
6548 .notifier_call = stmmac_device_event,
6549 };
6550
stmmac_init_fs(struct net_device * dev)6551 static void stmmac_init_fs(struct net_device *dev)
6552 {
6553 struct stmmac_priv *priv = netdev_priv(dev);
6554
6555 rtnl_lock();
6556
6557 /* Create per netdev entries */
6558 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6559
6560 /* Entry to report DMA RX/TX rings */
6561 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6562 &stmmac_rings_status_fops);
6563
6564 /* Entry to report the DMA HW features */
6565 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6566 &stmmac_dma_cap_fops);
6567
6568 rtnl_unlock();
6569 }
6570
stmmac_exit_fs(struct net_device * dev)6571 static void stmmac_exit_fs(struct net_device *dev)
6572 {
6573 struct stmmac_priv *priv = netdev_priv(dev);
6574
6575 debugfs_remove_recursive(priv->dbgfs_dir);
6576 }
6577 #endif /* CONFIG_DEBUG_FS */
6578
stmmac_vid_crc32_le(__le16 vid_le)6579 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6580 {
6581 unsigned char *data = (unsigned char *)&vid_le;
6582 unsigned char data_byte = 0;
6583 u32 crc = ~0x0;
6584 u32 temp = 0;
6585 int i, bits;
6586
6587 bits = get_bitmask_order(VLAN_VID_MASK);
6588 for (i = 0; i < bits; i++) {
6589 if ((i % 8) == 0)
6590 data_byte = data[i / 8];
6591
6592 temp = ((crc & 1) ^ data_byte) & 1;
6593 crc >>= 1;
6594 data_byte >>= 1;
6595
6596 if (temp)
6597 crc ^= 0xedb88320;
6598 }
6599
6600 return crc;
6601 }
6602
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6603 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6604 {
6605 u32 crc, hash = 0;
6606 u16 pmatch = 0;
6607 int count = 0;
6608 u16 vid = 0;
6609
6610 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6611 __le16 vid_le = cpu_to_le16(vid);
6612 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6613 hash |= (1 << crc);
6614 count++;
6615 }
6616
6617 if (!priv->dma_cap.vlhash) {
6618 if (count > 2) /* VID = 0 always passes filter */
6619 return -EOPNOTSUPP;
6620
6621 pmatch = vid;
6622 hash = 0;
6623 }
6624
6625 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6626 }
6627
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6628 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6629 {
6630 struct stmmac_priv *priv = netdev_priv(ndev);
6631 bool is_double = false;
6632 int ret;
6633
6634 ret = pm_runtime_resume_and_get(priv->device);
6635 if (ret < 0)
6636 return ret;
6637
6638 if (be16_to_cpu(proto) == ETH_P_8021AD)
6639 is_double = true;
6640
6641 set_bit(vid, priv->active_vlans);
6642 ret = stmmac_vlan_update(priv, is_double);
6643 if (ret) {
6644 clear_bit(vid, priv->active_vlans);
6645 goto err_pm_put;
6646 }
6647
6648 if (priv->hw->num_vlan) {
6649 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6650 if (ret)
6651 goto err_pm_put;
6652 }
6653 err_pm_put:
6654 pm_runtime_put(priv->device);
6655
6656 return ret;
6657 }
6658
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6659 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6660 {
6661 struct stmmac_priv *priv = netdev_priv(ndev);
6662 bool is_double = false;
6663 int ret;
6664
6665 ret = pm_runtime_resume_and_get(priv->device);
6666 if (ret < 0)
6667 return ret;
6668
6669 if (be16_to_cpu(proto) == ETH_P_8021AD)
6670 is_double = true;
6671
6672 clear_bit(vid, priv->active_vlans);
6673
6674 if (priv->hw->num_vlan) {
6675 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6676 if (ret)
6677 goto del_vlan_error;
6678 }
6679
6680 ret = stmmac_vlan_update(priv, is_double);
6681
6682 del_vlan_error:
6683 pm_runtime_put(priv->device);
6684
6685 return ret;
6686 }
6687
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6688 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6689 {
6690 struct stmmac_priv *priv = netdev_priv(dev);
6691
6692 switch (bpf->command) {
6693 case XDP_SETUP_PROG:
6694 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6695 case XDP_SETUP_XSK_POOL:
6696 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6697 bpf->xsk.queue_id);
6698 default:
6699 return -EOPNOTSUPP;
6700 }
6701 }
6702
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6703 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6704 struct xdp_frame **frames, u32 flags)
6705 {
6706 struct stmmac_priv *priv = netdev_priv(dev);
6707 int cpu = smp_processor_id();
6708 struct netdev_queue *nq;
6709 int i, nxmit = 0;
6710 int queue;
6711
6712 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6713 return -ENETDOWN;
6714
6715 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6716 return -EINVAL;
6717
6718 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6719 nq = netdev_get_tx_queue(priv->dev, queue);
6720
6721 __netif_tx_lock(nq, cpu);
6722 /* Avoids TX time-out as we are sharing with slow path */
6723 txq_trans_cond_update(nq);
6724
6725 for (i = 0; i < num_frames; i++) {
6726 int res;
6727
6728 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6729 if (res == STMMAC_XDP_CONSUMED)
6730 break;
6731
6732 nxmit++;
6733 }
6734
6735 if (flags & XDP_XMIT_FLUSH) {
6736 stmmac_flush_tx_descriptors(priv, queue);
6737 stmmac_tx_timer_arm(priv, queue);
6738 }
6739
6740 __netif_tx_unlock(nq);
6741
6742 return nxmit;
6743 }
6744
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6745 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6746 {
6747 struct stmmac_channel *ch = &priv->channel[queue];
6748 unsigned long flags;
6749
6750 spin_lock_irqsave(&ch->lock, flags);
6751 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6752 spin_unlock_irqrestore(&ch->lock, flags);
6753
6754 stmmac_stop_rx_dma(priv, queue);
6755 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6756 }
6757
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6758 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6759 {
6760 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6761 struct stmmac_channel *ch = &priv->channel[queue];
6762 unsigned long flags;
6763 u32 buf_size;
6764 int ret;
6765
6766 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6767 if (ret) {
6768 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6769 return;
6770 }
6771
6772 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6773 if (ret) {
6774 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6775 netdev_err(priv->dev, "Failed to init RX desc.\n");
6776 return;
6777 }
6778
6779 stmmac_reset_rx_queue(priv, queue);
6780 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6781
6782 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6783 rx_q->dma_rx_phy, rx_q->queue_index);
6784
6785 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6786 sizeof(struct dma_desc));
6787 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6788 rx_q->rx_tail_addr, rx_q->queue_index);
6789
6790 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6791 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6792 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6793 buf_size,
6794 rx_q->queue_index);
6795 } else {
6796 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6797 priv->dma_conf.dma_buf_sz,
6798 rx_q->queue_index);
6799 }
6800
6801 stmmac_start_rx_dma(priv, queue);
6802
6803 spin_lock_irqsave(&ch->lock, flags);
6804 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6805 spin_unlock_irqrestore(&ch->lock, flags);
6806 }
6807
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6808 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6809 {
6810 struct stmmac_channel *ch = &priv->channel[queue];
6811 unsigned long flags;
6812
6813 spin_lock_irqsave(&ch->lock, flags);
6814 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6815 spin_unlock_irqrestore(&ch->lock, flags);
6816
6817 stmmac_stop_tx_dma(priv, queue);
6818 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6819 }
6820
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6821 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6822 {
6823 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6824 struct stmmac_channel *ch = &priv->channel[queue];
6825 unsigned long flags;
6826 int ret;
6827
6828 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6829 if (ret) {
6830 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6831 return;
6832 }
6833
6834 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6835 if (ret) {
6836 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6837 netdev_err(priv->dev, "Failed to init TX desc.\n");
6838 return;
6839 }
6840
6841 stmmac_reset_tx_queue(priv, queue);
6842 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6843
6844 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6845 tx_q->dma_tx_phy, tx_q->queue_index);
6846
6847 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6848 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6849
6850 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6851 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6852 tx_q->tx_tail_addr, tx_q->queue_index);
6853
6854 stmmac_start_tx_dma(priv, queue);
6855
6856 spin_lock_irqsave(&ch->lock, flags);
6857 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6858 spin_unlock_irqrestore(&ch->lock, flags);
6859 }
6860
stmmac_xdp_release(struct net_device * dev)6861 void stmmac_xdp_release(struct net_device *dev)
6862 {
6863 struct stmmac_priv *priv = netdev_priv(dev);
6864 u32 chan;
6865
6866 /* Ensure tx function is not running */
6867 netif_tx_disable(dev);
6868
6869 /* Disable NAPI process */
6870 stmmac_disable_all_queues(priv);
6871
6872 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6873 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6874
6875 /* Free the IRQ lines */
6876 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6877
6878 /* Stop TX/RX DMA channels */
6879 stmmac_stop_all_dma(priv);
6880
6881 /* Release and free the Rx/Tx resources */
6882 free_dma_desc_resources(priv, &priv->dma_conf);
6883
6884 /* Disable the MAC Rx/Tx */
6885 stmmac_mac_set(priv, priv->ioaddr, false);
6886
6887 /* set trans_start so we don't get spurious
6888 * watchdogs during reset
6889 */
6890 netif_trans_update(dev);
6891 netif_carrier_off(dev);
6892 }
6893
stmmac_xdp_open(struct net_device * dev)6894 int stmmac_xdp_open(struct net_device *dev)
6895 {
6896 struct stmmac_priv *priv = netdev_priv(dev);
6897 u32 rx_cnt = priv->plat->rx_queues_to_use;
6898 u32 tx_cnt = priv->plat->tx_queues_to_use;
6899 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6900 struct stmmac_rx_queue *rx_q;
6901 struct stmmac_tx_queue *tx_q;
6902 u32 buf_size;
6903 bool sph_en;
6904 u32 chan;
6905 int ret;
6906
6907 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6908 if (ret < 0) {
6909 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6910 __func__);
6911 goto dma_desc_error;
6912 }
6913
6914 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6915 if (ret < 0) {
6916 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6917 __func__);
6918 goto init_error;
6919 }
6920
6921 stmmac_reset_queues_param(priv);
6922
6923 /* DMA CSR Channel configuration */
6924 for (chan = 0; chan < dma_csr_ch; chan++) {
6925 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6926 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6927 }
6928
6929 /* Adjust Split header */
6930 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6931
6932 /* DMA RX Channel Configuration */
6933 for (chan = 0; chan < rx_cnt; chan++) {
6934 rx_q = &priv->dma_conf.rx_queue[chan];
6935
6936 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6937 rx_q->dma_rx_phy, chan);
6938
6939 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6940 (rx_q->buf_alloc_num *
6941 sizeof(struct dma_desc));
6942 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6943 rx_q->rx_tail_addr, chan);
6944
6945 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6946 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6947 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6948 buf_size,
6949 rx_q->queue_index);
6950 } else {
6951 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6952 priv->dma_conf.dma_buf_sz,
6953 rx_q->queue_index);
6954 }
6955
6956 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6957 }
6958
6959 /* DMA TX Channel Configuration */
6960 for (chan = 0; chan < tx_cnt; chan++) {
6961 tx_q = &priv->dma_conf.tx_queue[chan];
6962
6963 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6964 tx_q->dma_tx_phy, chan);
6965
6966 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6967 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6968 tx_q->tx_tail_addr, chan);
6969
6970 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6971 tx_q->txtimer.function = stmmac_tx_timer;
6972 }
6973
6974 /* Enable the MAC Rx/Tx */
6975 stmmac_mac_set(priv, priv->ioaddr, true);
6976
6977 /* Start Rx & Tx DMA Channels */
6978 stmmac_start_all_dma(priv);
6979
6980 ret = stmmac_request_irq(dev);
6981 if (ret)
6982 goto irq_error;
6983
6984 /* Enable NAPI process*/
6985 stmmac_enable_all_queues(priv);
6986 netif_carrier_on(dev);
6987 netif_tx_start_all_queues(dev);
6988 stmmac_enable_all_dma_irq(priv);
6989
6990 return 0;
6991
6992 irq_error:
6993 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6994 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6995
6996 stmmac_hw_teardown(dev);
6997 init_error:
6998 free_dma_desc_resources(priv, &priv->dma_conf);
6999 dma_desc_error:
7000 return ret;
7001 }
7002
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7003 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7004 {
7005 struct stmmac_priv *priv = netdev_priv(dev);
7006 struct stmmac_rx_queue *rx_q;
7007 struct stmmac_tx_queue *tx_q;
7008 struct stmmac_channel *ch;
7009
7010 if (test_bit(STMMAC_DOWN, &priv->state) ||
7011 !netif_carrier_ok(priv->dev))
7012 return -ENETDOWN;
7013
7014 if (!stmmac_xdp_is_enabled(priv))
7015 return -EINVAL;
7016
7017 if (queue >= priv->plat->rx_queues_to_use ||
7018 queue >= priv->plat->tx_queues_to_use)
7019 return -EINVAL;
7020
7021 rx_q = &priv->dma_conf.rx_queue[queue];
7022 tx_q = &priv->dma_conf.tx_queue[queue];
7023 ch = &priv->channel[queue];
7024
7025 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7026 return -EINVAL;
7027
7028 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7029 /* EQoS does not have per-DMA channel SW interrupt,
7030 * so we schedule RX Napi straight-away.
7031 */
7032 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7033 __napi_schedule(&ch->rxtx_napi);
7034 }
7035
7036 return 0;
7037 }
7038
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7039 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7040 {
7041 struct stmmac_priv *priv = netdev_priv(dev);
7042 u32 tx_cnt = priv->plat->tx_queues_to_use;
7043 u32 rx_cnt = priv->plat->rx_queues_to_use;
7044 unsigned int start;
7045 int q;
7046
7047 for (q = 0; q < tx_cnt; q++) {
7048 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7049 u64 tx_packets;
7050 u64 tx_bytes;
7051
7052 do {
7053 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7054 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7055 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7056 do {
7057 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7058 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7059 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7060
7061 stats->tx_packets += tx_packets;
7062 stats->tx_bytes += tx_bytes;
7063 }
7064
7065 for (q = 0; q < rx_cnt; q++) {
7066 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7067 u64 rx_packets;
7068 u64 rx_bytes;
7069
7070 do {
7071 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7072 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7073 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7074 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7075
7076 stats->rx_packets += rx_packets;
7077 stats->rx_bytes += rx_bytes;
7078 }
7079
7080 stats->rx_dropped = priv->xstats.rx_dropped;
7081 stats->rx_errors = priv->xstats.rx_errors;
7082 stats->tx_dropped = priv->xstats.tx_dropped;
7083 stats->tx_errors = priv->xstats.tx_errors;
7084 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7085 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7086 stats->rx_length_errors = priv->xstats.rx_length;
7087 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7088 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7089 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7090 }
7091
7092 static const struct net_device_ops stmmac_netdev_ops = {
7093 .ndo_open = stmmac_open,
7094 .ndo_start_xmit = stmmac_xmit,
7095 .ndo_stop = stmmac_release,
7096 .ndo_change_mtu = stmmac_change_mtu,
7097 .ndo_fix_features = stmmac_fix_features,
7098 .ndo_set_features = stmmac_set_features,
7099 .ndo_set_rx_mode = stmmac_set_rx_mode,
7100 .ndo_tx_timeout = stmmac_tx_timeout,
7101 .ndo_eth_ioctl = stmmac_ioctl,
7102 .ndo_get_stats64 = stmmac_get_stats64,
7103 .ndo_setup_tc = stmmac_setup_tc,
7104 .ndo_select_queue = stmmac_select_queue,
7105 .ndo_set_mac_address = stmmac_set_mac_address,
7106 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7107 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7108 .ndo_bpf = stmmac_bpf,
7109 .ndo_xdp_xmit = stmmac_xdp_xmit,
7110 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7111 };
7112
stmmac_reset_subtask(struct stmmac_priv * priv)7113 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7114 {
7115 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7116 return;
7117 if (test_bit(STMMAC_DOWN, &priv->state))
7118 return;
7119
7120 netdev_err(priv->dev, "Reset adapter.\n");
7121
7122 rtnl_lock();
7123 netif_trans_update(priv->dev);
7124 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7125 usleep_range(1000, 2000);
7126
7127 set_bit(STMMAC_DOWN, &priv->state);
7128 dev_close(priv->dev);
7129 dev_open(priv->dev, NULL);
7130 clear_bit(STMMAC_DOWN, &priv->state);
7131 clear_bit(STMMAC_RESETING, &priv->state);
7132 rtnl_unlock();
7133 }
7134
stmmac_service_task(struct work_struct * work)7135 static void stmmac_service_task(struct work_struct *work)
7136 {
7137 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7138 service_task);
7139
7140 stmmac_reset_subtask(priv);
7141 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7142 }
7143
7144 /**
7145 * stmmac_hw_init - Init the MAC device
7146 * @priv: driver private structure
7147 * Description: this function is to configure the MAC device according to
7148 * some platform parameters or the HW capability register. It prepares the
7149 * driver to use either ring or chain modes and to setup either enhanced or
7150 * normal descriptors.
7151 */
stmmac_hw_init(struct stmmac_priv * priv)7152 static int stmmac_hw_init(struct stmmac_priv *priv)
7153 {
7154 int ret;
7155
7156 /* dwmac-sun8i only work in chain mode */
7157 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7158 chain_mode = 1;
7159 priv->chain_mode = chain_mode;
7160
7161 /* Initialize HW Interface */
7162 ret = stmmac_hwif_init(priv);
7163 if (ret)
7164 return ret;
7165
7166 /* Get the HW capability (new GMAC newer than 3.50a) */
7167 priv->hw_cap_support = stmmac_get_hw_features(priv);
7168 if (priv->hw_cap_support) {
7169 dev_info(priv->device, "DMA HW capability register supported\n");
7170
7171 /* We can override some gmac/dma configuration fields: e.g.
7172 * enh_desc, tx_coe (e.g. that are passed through the
7173 * platform) with the values from the HW capability
7174 * register (if supported).
7175 */
7176 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7177 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7178 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7179 priv->hw->pmt = priv->plat->pmt;
7180 if (priv->dma_cap.hash_tb_sz) {
7181 priv->hw->multicast_filter_bins =
7182 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7183 priv->hw->mcast_bits_log2 =
7184 ilog2(priv->hw->multicast_filter_bins);
7185 }
7186
7187 /* TXCOE doesn't work in thresh DMA mode */
7188 if (priv->plat->force_thresh_dma_mode)
7189 priv->plat->tx_coe = 0;
7190 else
7191 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7192
7193 /* In case of GMAC4 rx_coe is from HW cap register. */
7194 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7195
7196 if (priv->dma_cap.rx_coe_type2)
7197 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7198 else if (priv->dma_cap.rx_coe_type1)
7199 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7200
7201 } else {
7202 dev_info(priv->device, "No HW DMA feature register supported\n");
7203 }
7204
7205 if (priv->plat->rx_coe) {
7206 priv->hw->rx_csum = priv->plat->rx_coe;
7207 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7208 if (priv->synopsys_id < DWMAC_CORE_4_00)
7209 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7210 }
7211 if (priv->plat->tx_coe)
7212 dev_info(priv->device, "TX Checksum insertion supported\n");
7213
7214 if (priv->plat->pmt) {
7215 dev_info(priv->device, "Wake-Up On Lan supported\n");
7216 device_set_wakeup_capable(priv->device, 1);
7217 }
7218
7219 if (priv->dma_cap.tsoen)
7220 dev_info(priv->device, "TSO supported\n");
7221
7222 if (priv->dma_cap.number_rx_queues &&
7223 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7224 dev_warn(priv->device,
7225 "Number of Rx queues (%u) exceeds dma capability\n",
7226 priv->plat->rx_queues_to_use);
7227 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7228 }
7229 if (priv->dma_cap.number_tx_queues &&
7230 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7231 dev_warn(priv->device,
7232 "Number of Tx queues (%u) exceeds dma capability\n",
7233 priv->plat->tx_queues_to_use);
7234 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7235 }
7236
7237 if (priv->dma_cap.rx_fifo_size &&
7238 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7239 dev_warn(priv->device,
7240 "Rx FIFO size (%u) exceeds dma capability\n",
7241 priv->plat->rx_fifo_size);
7242 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7243 }
7244 if (priv->dma_cap.tx_fifo_size &&
7245 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7246 dev_warn(priv->device,
7247 "Tx FIFO size (%u) exceeds dma capability\n",
7248 priv->plat->tx_fifo_size);
7249 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7250 }
7251
7252 priv->hw->vlan_fail_q_en =
7253 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7254 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7255
7256 /* Run HW quirks, if any */
7257 if (priv->hwif_quirks) {
7258 ret = priv->hwif_quirks(priv);
7259 if (ret)
7260 return ret;
7261 }
7262
7263 /* Rx Watchdog is available in the COREs newer than the 3.40.
7264 * In some case, for example on bugged HW this feature
7265 * has to be disable and this can be done by passing the
7266 * riwt_off field from the platform.
7267 */
7268 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7269 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7270 priv->use_riwt = 1;
7271 dev_info(priv->device,
7272 "Enable RX Mitigation via HW Watchdog Timer\n");
7273 }
7274
7275 return 0;
7276 }
7277
stmmac_napi_add(struct net_device * dev)7278 static void stmmac_napi_add(struct net_device *dev)
7279 {
7280 struct stmmac_priv *priv = netdev_priv(dev);
7281 u32 queue, maxq;
7282
7283 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7284
7285 for (queue = 0; queue < maxq; queue++) {
7286 struct stmmac_channel *ch = &priv->channel[queue];
7287
7288 ch->priv_data = priv;
7289 ch->index = queue;
7290 spin_lock_init(&ch->lock);
7291
7292 if (queue < priv->plat->rx_queues_to_use) {
7293 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7294 }
7295 if (queue < priv->plat->tx_queues_to_use) {
7296 netif_napi_add_tx(dev, &ch->tx_napi,
7297 stmmac_napi_poll_tx);
7298 }
7299 if (queue < priv->plat->rx_queues_to_use &&
7300 queue < priv->plat->tx_queues_to_use) {
7301 netif_napi_add(dev, &ch->rxtx_napi,
7302 stmmac_napi_poll_rxtx);
7303 }
7304 }
7305 }
7306
stmmac_napi_del(struct net_device * dev)7307 static void stmmac_napi_del(struct net_device *dev)
7308 {
7309 struct stmmac_priv *priv = netdev_priv(dev);
7310 u32 queue, maxq;
7311
7312 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7313
7314 for (queue = 0; queue < maxq; queue++) {
7315 struct stmmac_channel *ch = &priv->channel[queue];
7316
7317 if (queue < priv->plat->rx_queues_to_use)
7318 netif_napi_del(&ch->rx_napi);
7319 if (queue < priv->plat->tx_queues_to_use)
7320 netif_napi_del(&ch->tx_napi);
7321 if (queue < priv->plat->rx_queues_to_use &&
7322 queue < priv->plat->tx_queues_to_use) {
7323 netif_napi_del(&ch->rxtx_napi);
7324 }
7325 }
7326 }
7327
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7328 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7329 {
7330 struct stmmac_priv *priv = netdev_priv(dev);
7331 int ret = 0, i;
7332
7333 if (netif_running(dev))
7334 stmmac_release(dev);
7335
7336 stmmac_napi_del(dev);
7337
7338 priv->plat->rx_queues_to_use = rx_cnt;
7339 priv->plat->tx_queues_to_use = tx_cnt;
7340 if (!netif_is_rxfh_configured(dev))
7341 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7342 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7343 rx_cnt);
7344
7345 stmmac_napi_add(dev);
7346
7347 if (netif_running(dev))
7348 ret = stmmac_open(dev);
7349
7350 return ret;
7351 }
7352
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7353 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7354 {
7355 struct stmmac_priv *priv = netdev_priv(dev);
7356 int ret = 0;
7357
7358 if (netif_running(dev))
7359 stmmac_release(dev);
7360
7361 priv->dma_conf.dma_rx_size = rx_size;
7362 priv->dma_conf.dma_tx_size = tx_size;
7363
7364 if (netif_running(dev))
7365 ret = stmmac_open(dev);
7366
7367 return ret;
7368 }
7369
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7370 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7371 {
7372 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7373 struct dma_desc *desc_contains_ts = ctx->desc;
7374 struct stmmac_priv *priv = ctx->priv;
7375 struct dma_desc *ndesc = ctx->ndesc;
7376 struct dma_desc *desc = ctx->desc;
7377 u64 ns = 0;
7378
7379 if (!priv->hwts_rx_en)
7380 return -ENODATA;
7381
7382 /* For GMAC4, the valid timestamp is from CTX next desc. */
7383 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7384 desc_contains_ts = ndesc;
7385
7386 /* Check if timestamp is available */
7387 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7388 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7389 ns -= priv->plat->cdc_error_adj;
7390 *timestamp = ns_to_ktime(ns);
7391 return 0;
7392 }
7393
7394 return -ENODATA;
7395 }
7396
7397 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7398 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7399 };
7400
7401 /**
7402 * stmmac_dvr_probe
7403 * @device: device pointer
7404 * @plat_dat: platform data pointer
7405 * @res: stmmac resource pointer
7406 * Description: this is the main probe function used to
7407 * call the alloc_etherdev, allocate the priv structure.
7408 * Return:
7409 * returns 0 on success, otherwise errno.
7410 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7411 int stmmac_dvr_probe(struct device *device,
7412 struct plat_stmmacenet_data *plat_dat,
7413 struct stmmac_resources *res)
7414 {
7415 struct net_device *ndev = NULL;
7416 struct stmmac_priv *priv;
7417 u32 rxq;
7418 int i, ret = 0;
7419
7420 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7421 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7422 if (!ndev)
7423 return -ENOMEM;
7424
7425 SET_NETDEV_DEV(ndev, device);
7426
7427 priv = netdev_priv(ndev);
7428 priv->device = device;
7429 priv->dev = ndev;
7430
7431 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7432 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7433 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7434 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7435 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7436 }
7437
7438 priv->xstats.pcpu_stats =
7439 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7440 if (!priv->xstats.pcpu_stats)
7441 return -ENOMEM;
7442
7443 stmmac_set_ethtool_ops(ndev);
7444 priv->pause = pause;
7445 priv->plat = plat_dat;
7446 priv->ioaddr = res->addr;
7447 priv->dev->base_addr = (unsigned long)res->addr;
7448 priv->plat->dma_cfg->multi_msi_en =
7449 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7450
7451 priv->dev->irq = res->irq;
7452 priv->wol_irq = res->wol_irq;
7453 priv->lpi_irq = res->lpi_irq;
7454 priv->sfty_irq = res->sfty_irq;
7455 priv->sfty_ce_irq = res->sfty_ce_irq;
7456 priv->sfty_ue_irq = res->sfty_ue_irq;
7457 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7458 priv->rx_irq[i] = res->rx_irq[i];
7459 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7460 priv->tx_irq[i] = res->tx_irq[i];
7461
7462 if (!is_zero_ether_addr(res->mac))
7463 eth_hw_addr_set(priv->dev, res->mac);
7464
7465 dev_set_drvdata(device, priv->dev);
7466
7467 /* Verify driver arguments */
7468 stmmac_verify_args();
7469
7470 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7471 if (!priv->af_xdp_zc_qps)
7472 return -ENOMEM;
7473
7474 /* Allocate workqueue */
7475 priv->wq = create_singlethread_workqueue("stmmac_wq");
7476 if (!priv->wq) {
7477 dev_err(priv->device, "failed to create workqueue\n");
7478 ret = -ENOMEM;
7479 goto error_wq_init;
7480 }
7481
7482 INIT_WORK(&priv->service_task, stmmac_service_task);
7483
7484 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7485
7486 /* Override with kernel parameters if supplied XXX CRS XXX
7487 * this needs to have multiple instances
7488 */
7489 if ((phyaddr >= 0) && (phyaddr <= 31))
7490 priv->plat->phy_addr = phyaddr;
7491
7492 if (priv->plat->stmmac_rst) {
7493 ret = reset_control_assert(priv->plat->stmmac_rst);
7494 reset_control_deassert(priv->plat->stmmac_rst);
7495 /* Some reset controllers have only reset callback instead of
7496 * assert + deassert callbacks pair.
7497 */
7498 if (ret == -ENOTSUPP)
7499 reset_control_reset(priv->plat->stmmac_rst);
7500 }
7501
7502 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7503 if (ret == -ENOTSUPP)
7504 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7505 ERR_PTR(ret));
7506
7507 /* Wait a bit for the reset to take effect */
7508 udelay(10);
7509
7510 /* Init MAC and get the capabilities */
7511 ret = stmmac_hw_init(priv);
7512 if (ret)
7513 goto error_hw_init;
7514
7515 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7516 */
7517 if (priv->synopsys_id < DWMAC_CORE_5_20)
7518 priv->plat->dma_cfg->dche = false;
7519
7520 stmmac_check_ether_addr(priv);
7521
7522 ndev->netdev_ops = &stmmac_netdev_ops;
7523
7524 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7525 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7526
7527 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7528 NETIF_F_RXCSUM;
7529 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7530 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7531
7532 ret = stmmac_tc_init(priv, priv);
7533 if (!ret) {
7534 ndev->hw_features |= NETIF_F_HW_TC;
7535 }
7536
7537 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7538 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7539 if (priv->plat->has_gmac4)
7540 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7541 priv->tso = true;
7542 dev_info(priv->device, "TSO feature enabled\n");
7543 }
7544
7545 if (priv->dma_cap.sphen &&
7546 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7547 ndev->hw_features |= NETIF_F_GRO;
7548 priv->sph_cap = true;
7549 priv->sph = priv->sph_cap;
7550 dev_info(priv->device, "SPH feature enabled\n");
7551 }
7552
7553 /* Ideally our host DMA address width is the same as for the
7554 * device. However, it may differ and then we have to use our
7555 * host DMA width for allocation and the device DMA width for
7556 * register handling.
7557 */
7558 if (priv->plat->host_dma_width)
7559 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7560 else
7561 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7562
7563 if (priv->dma_cap.host_dma_width) {
7564 ret = dma_set_mask_and_coherent(device,
7565 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7566 if (!ret) {
7567 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7568 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7569
7570 /*
7571 * If more than 32 bits can be addressed, make sure to
7572 * enable enhanced addressing mode.
7573 */
7574 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7575 priv->plat->dma_cfg->eame = true;
7576 } else {
7577 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7578 if (ret) {
7579 dev_err(priv->device, "Failed to set DMA Mask\n");
7580 goto error_hw_init;
7581 }
7582
7583 priv->dma_cap.host_dma_width = 32;
7584 }
7585 }
7586
7587 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7588 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7589 #ifdef STMMAC_VLAN_TAG_USED
7590 /* Both mac100 and gmac support receive VLAN tag detection */
7591 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7592 if (priv->plat->has_gmac4) {
7593 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7594 priv->hw->hw_vlan_en = true;
7595 }
7596 if (priv->dma_cap.vlhash) {
7597 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7598 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7599 }
7600 if (priv->dma_cap.vlins) {
7601 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7602 if (priv->dma_cap.dvlan)
7603 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7604 }
7605 #endif
7606 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7607
7608 priv->xstats.threshold = tc;
7609
7610 /* Initialize RSS */
7611 rxq = priv->plat->rx_queues_to_use;
7612 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7613 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7614 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7615
7616 if (priv->dma_cap.rssen && priv->plat->rss_en)
7617 ndev->features |= NETIF_F_RXHASH;
7618
7619 ndev->vlan_features |= ndev->features;
7620
7621 /* MTU range: 46 - hw-specific max */
7622 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7623 if (priv->plat->has_xgmac)
7624 ndev->max_mtu = XGMAC_JUMBO_LEN;
7625 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7626 ndev->max_mtu = JUMBO_LEN;
7627 else
7628 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7629 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7630 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7631 */
7632 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7633 (priv->plat->maxmtu >= ndev->min_mtu))
7634 ndev->max_mtu = priv->plat->maxmtu;
7635 else if (priv->plat->maxmtu < ndev->min_mtu)
7636 dev_warn(priv->device,
7637 "%s: warning: maxmtu having invalid value (%d)\n",
7638 __func__, priv->plat->maxmtu);
7639
7640 if (flow_ctrl)
7641 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7642
7643 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7644
7645 /* Setup channels NAPI */
7646 stmmac_napi_add(ndev);
7647
7648 mutex_init(&priv->lock);
7649
7650 stmmac_fpe_init(priv);
7651
7652 /* If a specific clk_csr value is passed from the platform
7653 * this means that the CSR Clock Range selection cannot be
7654 * changed at run-time and it is fixed. Viceversa the driver'll try to
7655 * set the MDC clock dynamically according to the csr actual
7656 * clock input.
7657 */
7658 if (priv->plat->clk_csr >= 0)
7659 priv->clk_csr = priv->plat->clk_csr;
7660 else
7661 stmmac_clk_csr_set(priv);
7662
7663 stmmac_check_pcs_mode(priv);
7664
7665 pm_runtime_get_noresume(device);
7666 pm_runtime_set_active(device);
7667 if (!pm_runtime_enabled(device))
7668 pm_runtime_enable(device);
7669
7670 ret = stmmac_mdio_register(ndev);
7671 if (ret < 0) {
7672 dev_err_probe(priv->device, ret,
7673 "MDIO bus (id: %d) registration failed\n",
7674 priv->plat->bus_id);
7675 goto error_mdio_register;
7676 }
7677
7678 if (priv->plat->speed_mode_2500)
7679 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7680
7681 ret = stmmac_pcs_setup(ndev);
7682 if (ret)
7683 goto error_pcs_setup;
7684
7685 ret = stmmac_phy_setup(priv);
7686 if (ret) {
7687 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7688 goto error_phy_setup;
7689 }
7690
7691 ret = register_netdev(ndev);
7692 if (ret) {
7693 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7694 __func__, ret);
7695 goto error_netdev_register;
7696 }
7697
7698 #ifdef CONFIG_DEBUG_FS
7699 stmmac_init_fs(ndev);
7700 #endif
7701
7702 if (priv->plat->dump_debug_regs)
7703 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7704
7705 /* Let pm_runtime_put() disable the clocks.
7706 * If CONFIG_PM is not enabled, the clocks will stay powered.
7707 */
7708 pm_runtime_put(device);
7709
7710 return ret;
7711
7712 error_netdev_register:
7713 phylink_destroy(priv->phylink);
7714 error_phy_setup:
7715 stmmac_pcs_clean(ndev);
7716 error_pcs_setup:
7717 stmmac_mdio_unregister(ndev);
7718 error_mdio_register:
7719 stmmac_napi_del(ndev);
7720 error_hw_init:
7721 destroy_workqueue(priv->wq);
7722 error_wq_init:
7723 bitmap_free(priv->af_xdp_zc_qps);
7724
7725 return ret;
7726 }
7727 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7728
7729 /**
7730 * stmmac_dvr_remove
7731 * @dev: device pointer
7732 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7733 * changes the link status, releases the DMA descriptor rings.
7734 */
stmmac_dvr_remove(struct device * dev)7735 void stmmac_dvr_remove(struct device *dev)
7736 {
7737 struct net_device *ndev = dev_get_drvdata(dev);
7738 struct stmmac_priv *priv = netdev_priv(ndev);
7739
7740 netdev_info(priv->dev, "%s: removing driver", __func__);
7741
7742 pm_runtime_get_sync(dev);
7743
7744 stmmac_stop_all_dma(priv);
7745 stmmac_mac_set(priv, priv->ioaddr, false);
7746 unregister_netdev(ndev);
7747
7748 #ifdef CONFIG_DEBUG_FS
7749 stmmac_exit_fs(ndev);
7750 #endif
7751 phylink_destroy(priv->phylink);
7752 if (priv->plat->stmmac_rst)
7753 reset_control_assert(priv->plat->stmmac_rst);
7754 reset_control_assert(priv->plat->stmmac_ahb_rst);
7755
7756 stmmac_pcs_clean(ndev);
7757 stmmac_mdio_unregister(ndev);
7758
7759 destroy_workqueue(priv->wq);
7760 mutex_destroy(&priv->lock);
7761 bitmap_free(priv->af_xdp_zc_qps);
7762
7763 pm_runtime_disable(dev);
7764 pm_runtime_put_noidle(dev);
7765 }
7766 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7767
7768 /**
7769 * stmmac_suspend - suspend callback
7770 * @dev: device pointer
7771 * Description: this is the function to suspend the device and it is called
7772 * by the platform driver to stop the network queue, release the resources,
7773 * program the PMT register (for WoL), clean and release driver resources.
7774 */
stmmac_suspend(struct device * dev)7775 int stmmac_suspend(struct device *dev)
7776 {
7777 struct net_device *ndev = dev_get_drvdata(dev);
7778 struct stmmac_priv *priv = netdev_priv(ndev);
7779 u32 chan;
7780
7781 if (!ndev || !netif_running(ndev))
7782 return 0;
7783
7784 mutex_lock(&priv->lock);
7785
7786 netif_device_detach(ndev);
7787
7788 stmmac_disable_all_queues(priv);
7789
7790 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7791 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7792
7793 if (priv->eee_sw_timer_en) {
7794 priv->tx_path_in_lpi_mode = false;
7795 del_timer_sync(&priv->eee_ctrl_timer);
7796 }
7797
7798 /* Stop TX/RX DMA */
7799 stmmac_stop_all_dma(priv);
7800
7801 if (priv->plat->serdes_powerdown)
7802 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7803
7804 /* Enable Power down mode by programming the PMT regs */
7805 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7806 stmmac_pmt(priv, priv->hw, priv->wolopts);
7807 priv->irq_wake = 1;
7808 } else {
7809 stmmac_mac_set(priv, priv->ioaddr, false);
7810 pinctrl_pm_select_sleep_state(priv->device);
7811 }
7812
7813 mutex_unlock(&priv->lock);
7814
7815 rtnl_lock();
7816 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7817 phylink_suspend(priv->phylink, true);
7818 } else {
7819 if (device_may_wakeup(priv->device))
7820 phylink_speed_down(priv->phylink, false);
7821 phylink_suspend(priv->phylink, false);
7822 }
7823 rtnl_unlock();
7824
7825 if (stmmac_fpe_supported(priv))
7826 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7827
7828 priv->speed = SPEED_UNKNOWN;
7829 return 0;
7830 }
7831 EXPORT_SYMBOL_GPL(stmmac_suspend);
7832
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7833 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7834 {
7835 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7836
7837 rx_q->cur_rx = 0;
7838 rx_q->dirty_rx = 0;
7839 }
7840
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7841 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7842 {
7843 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7844
7845 tx_q->cur_tx = 0;
7846 tx_q->dirty_tx = 0;
7847 tx_q->mss = 0;
7848
7849 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7850 }
7851
7852 /**
7853 * stmmac_reset_queues_param - reset queue parameters
7854 * @priv: device pointer
7855 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7856 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7857 {
7858 u32 rx_cnt = priv->plat->rx_queues_to_use;
7859 u32 tx_cnt = priv->plat->tx_queues_to_use;
7860 u32 queue;
7861
7862 for (queue = 0; queue < rx_cnt; queue++)
7863 stmmac_reset_rx_queue(priv, queue);
7864
7865 for (queue = 0; queue < tx_cnt; queue++)
7866 stmmac_reset_tx_queue(priv, queue);
7867 }
7868
7869 /**
7870 * stmmac_resume - resume callback
7871 * @dev: device pointer
7872 * Description: when resume this function is invoked to setup the DMA and CORE
7873 * in a usable state.
7874 */
stmmac_resume(struct device * dev)7875 int stmmac_resume(struct device *dev)
7876 {
7877 struct net_device *ndev = dev_get_drvdata(dev);
7878 struct stmmac_priv *priv = netdev_priv(ndev);
7879 int ret;
7880
7881 if (!netif_running(ndev))
7882 return 0;
7883
7884 /* Power Down bit, into the PM register, is cleared
7885 * automatically as soon as a magic packet or a Wake-up frame
7886 * is received. Anyway, it's better to manually clear
7887 * this bit because it can generate problems while resuming
7888 * from another devices (e.g. serial console).
7889 */
7890 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7891 mutex_lock(&priv->lock);
7892 stmmac_pmt(priv, priv->hw, 0);
7893 mutex_unlock(&priv->lock);
7894 priv->irq_wake = 0;
7895 } else {
7896 pinctrl_pm_select_default_state(priv->device);
7897 /* reset the phy so that it's ready */
7898 if (priv->mii)
7899 stmmac_mdio_reset(priv->mii);
7900 }
7901
7902 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7903 priv->plat->serdes_powerup) {
7904 ret = priv->plat->serdes_powerup(ndev,
7905 priv->plat->bsp_priv);
7906
7907 if (ret < 0)
7908 return ret;
7909 }
7910
7911 rtnl_lock();
7912 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7913 phylink_resume(priv->phylink);
7914 } else {
7915 phylink_resume(priv->phylink);
7916 if (device_may_wakeup(priv->device))
7917 phylink_speed_up(priv->phylink);
7918 }
7919 rtnl_unlock();
7920
7921 rtnl_lock();
7922 mutex_lock(&priv->lock);
7923
7924 stmmac_reset_queues_param(priv);
7925
7926 stmmac_free_tx_skbufs(priv);
7927 stmmac_clear_descriptors(priv, &priv->dma_conf);
7928
7929 stmmac_hw_setup(ndev, false);
7930 stmmac_init_coalesce(priv);
7931 stmmac_set_rx_mode(ndev);
7932
7933 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7934
7935 stmmac_enable_all_queues(priv);
7936 stmmac_enable_all_dma_irq(priv);
7937
7938 mutex_unlock(&priv->lock);
7939 rtnl_unlock();
7940
7941 netif_device_attach(ndev);
7942
7943 return 0;
7944 }
7945 EXPORT_SYMBOL_GPL(stmmac_resume);
7946
7947 #ifndef MODULE
stmmac_cmdline_opt(char * str)7948 static int __init stmmac_cmdline_opt(char *str)
7949 {
7950 char *opt;
7951
7952 if (!str || !*str)
7953 return 1;
7954 while ((opt = strsep(&str, ",")) != NULL) {
7955 if (!strncmp(opt, "debug:", 6)) {
7956 if (kstrtoint(opt + 6, 0, &debug))
7957 goto err;
7958 } else if (!strncmp(opt, "phyaddr:", 8)) {
7959 if (kstrtoint(opt + 8, 0, &phyaddr))
7960 goto err;
7961 } else if (!strncmp(opt, "buf_sz:", 7)) {
7962 if (kstrtoint(opt + 7, 0, &buf_sz))
7963 goto err;
7964 } else if (!strncmp(opt, "tc:", 3)) {
7965 if (kstrtoint(opt + 3, 0, &tc))
7966 goto err;
7967 } else if (!strncmp(opt, "watchdog:", 9)) {
7968 if (kstrtoint(opt + 9, 0, &watchdog))
7969 goto err;
7970 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7971 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7972 goto err;
7973 } else if (!strncmp(opt, "pause:", 6)) {
7974 if (kstrtoint(opt + 6, 0, &pause))
7975 goto err;
7976 } else if (!strncmp(opt, "eee_timer:", 10)) {
7977 if (kstrtoint(opt + 10, 0, &eee_timer))
7978 goto err;
7979 } else if (!strncmp(opt, "chain_mode:", 11)) {
7980 if (kstrtoint(opt + 11, 0, &chain_mode))
7981 goto err;
7982 }
7983 }
7984 return 1;
7985
7986 err:
7987 pr_err("%s: ERROR broken module parameter conversion", __func__);
7988 return 1;
7989 }
7990
7991 __setup("stmmaceth=", stmmac_cmdline_opt);
7992 #endif /* MODULE */
7993
stmmac_init(void)7994 static int __init stmmac_init(void)
7995 {
7996 #ifdef CONFIG_DEBUG_FS
7997 /* Create debugfs main directory if it doesn't exist yet */
7998 if (!stmmac_fs_dir)
7999 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8000 register_netdevice_notifier(&stmmac_notifier);
8001 #endif
8002
8003 return 0;
8004 }
8005
stmmac_exit(void)8006 static void __exit stmmac_exit(void)
8007 {
8008 #ifdef CONFIG_DEBUG_FS
8009 unregister_netdevice_notifier(&stmmac_notifier);
8010 debugfs_remove_recursive(stmmac_fs_dir);
8011 #endif
8012 }
8013
8014 module_init(stmmac_init)
8015 module_exit(stmmac_exit)
8016
8017 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8018 MODULE_AUTHOR("Giuseppe Cavallaro <[email protected]>");
8019 MODULE_LICENSE("GPL");
8020