1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "bcmasp_intf: " fmt
3
4 #include <asm/byteorder.h>
5 #include <linux/brcmphy.h>
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_net.h>
11 #include <linux/of_mdio.h>
12 #include <linux/phy.h>
13 #include <linux/phy_fixed.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/platform_device.h>
16 #include <net/ip.h>
17 #include <net/ipv6.h>
18
19 #include "bcmasp.h"
20 #include "bcmasp_intf_defs.h"
21
incr_ring(int index,int ring_count)22 static int incr_ring(int index, int ring_count)
23 {
24 index++;
25 if (index == ring_count)
26 return 0;
27
28 return index;
29 }
30
31 /* Points to last byte of descriptor */
incr_last_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
33 int ring_count)
34 {
35 dma_addr_t end = beg + (ring_count * DESC_SIZE);
36
37 addr += DESC_SIZE;
38 if (addr > end)
39 return beg + DESC_SIZE - 1;
40
41 return addr;
42 }
43
44 /* Points to first byte of descriptor */
incr_first_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
46 int ring_count)
47 {
48 dma_addr_t end = beg + (ring_count * DESC_SIZE);
49
50 addr += DESC_SIZE;
51 if (addr >= end)
52 return beg;
53
54 return addr;
55 }
56
bcmasp_enable_tx(struct bcmasp_intf * intf,int en)57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
58 {
59 if (en) {
60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
62 TX_EPKT_C_CFG_MISC_PT |
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
64 TX_EPKT_C_CFG_MISC);
65 } else {
66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
68 }
69 }
70
bcmasp_enable_rx(struct bcmasp_intf * intf,int en)71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
72 {
73 if (en)
74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
75 RX_EDPKT_CFG_ENABLE);
76 else
77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
78 }
79
bcmasp_set_rx_mode(struct net_device * dev)80 static void bcmasp_set_rx_mode(struct net_device *dev)
81 {
82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf *intf = netdev_priv(dev);
84 struct netdev_hw_addr *ha;
85 int ret;
86
87 spin_lock_bh(&intf->parent->mda_lock);
88
89 bcmasp_disable_all_filters(intf);
90
91 if (dev->flags & IFF_PROMISC)
92 goto set_promisc;
93
94 bcmasp_set_promisc(intf, 0);
95
96 bcmasp_set_broad(intf, 1);
97
98 bcmasp_set_oaddr(intf, dev->dev_addr, 1);
99
100 if (dev->flags & IFF_ALLMULTI) {
101 bcmasp_set_allmulti(intf, 1);
102 } else {
103 bcmasp_set_allmulti(intf, 0);
104
105 netdev_for_each_mc_addr(ha, dev) {
106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
107 if (ret) {
108 intf->mib.mc_filters_full_cnt++;
109 goto set_promisc;
110 }
111 }
112 }
113
114 netdev_for_each_uc_addr(ha, dev) {
115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
116 if (ret) {
117 intf->mib.uc_filters_full_cnt++;
118 goto set_promisc;
119 }
120 }
121
122 spin_unlock_bh(&intf->parent->mda_lock);
123 return;
124
125 set_promisc:
126 bcmasp_set_promisc(intf, 1);
127 intf->mib.promisc_filters_cnt++;
128
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf);
131
132 spin_unlock_bh(&intf->parent->mda_lock);
133 }
134
bcmasp_clean_txcb(struct bcmasp_intf * intf,int index)135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
136 {
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
138
139 txcb->skb = NULL;
140 dma_unmap_addr_set(txcb, dma_addr, 0);
141 dma_unmap_len_set(txcb, dma_len, 0);
142 txcb->last = false;
143 }
144
tx_spb_ring_full(struct bcmasp_intf * intf,int cnt)145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
146 {
147 int next_index, i;
148
149 /* Check if we have enough room for cnt descriptors */
150 for (i = 0; i < cnt; i++) {
151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
152 if (next_index == intf->tx_spb_clean_index)
153 return 1;
154 }
155
156 return 0;
157 }
158
bcmasp_csum_offload(struct net_device * dev,struct sk_buff * skb,bool * csum_hw)159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
160 struct sk_buff *skb,
161 bool *csum_hw)
162 {
163 struct bcmasp_intf *intf = netdev_priv(dev);
164 u32 header = 0, header2 = 0, epkt = 0;
165 struct bcmasp_pkt_offload *offload;
166 unsigned int header_cnt = 0;
167 u8 ip_proto;
168 int ret;
169
170 if (skb->ip_summed != CHECKSUM_PARTIAL)
171 return skb;
172
173 ret = skb_cow_head(skb, sizeof(*offload));
174 if (ret < 0) {
175 intf->mib.tx_realloc_offload_failed++;
176 goto help;
177 }
178
179 switch (skb->protocol) {
180 case htons(ETH_P_IP):
181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
184 ip_proto = ip_hdr(skb)->protocol;
185 header_cnt += 2;
186 break;
187 case htons(ETH_P_IPV6):
188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
191 ip_proto = ipv6_hdr(skb)->nexthdr;
192 header_cnt += 2;
193 break;
194 default:
195 goto help;
196 }
197
198 switch (ip_proto) {
199 case IPPROTO_TCP:
200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
202 header_cnt++;
203 break;
204 case IPPROTO_UDP:
205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
207 header_cnt++;
208 break;
209 default:
210 goto help;
211 }
212
213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
214
215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 epkt |= PKT_OFFLOAD_EPKT_OP;
218
219 offload->nop = htonl(PKT_OFFLOAD_NOP);
220 offload->header = htonl(header);
221 offload->header2 = htonl(header2);
222 offload->epkt = htonl(epkt);
223 offload->end = htonl(PKT_OFFLOAD_END_OP);
224 *csum_hw = true;
225
226 return skb;
227
228 help:
229 skb_checksum_help(skb);
230
231 return skb;
232 }
233
bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf * intf)234 static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
235 {
236 return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
237 }
238
bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf * intf,dma_addr_t addr)239 static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
240 {
241 rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
242 }
243
bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)244 static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
245 {
246 rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
247 }
248
bcmasp_tx_spb_dma_rq(struct bcmasp_intf * intf)249 static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
250 {
251 return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
252 }
253
bcmasp_tx_spb_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)254 static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
255 {
256 tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
257 }
258
259 static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 .tx_read = bcmasp_tx_spb_dma_rq,
264 .tx_write = bcmasp_tx_spb_dma_wq,
265 };
266
bcmasp_xmit(struct sk_buff * skb,struct net_device * dev)267 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
268 {
269 struct bcmasp_intf *intf = netdev_priv(dev);
270 unsigned int total_bytes, size;
271 int spb_index, nr_frags, i, j;
272 struct bcmasp_tx_cb *txcb;
273 dma_addr_t mapping, valid;
274 struct bcmasp_desc *desc;
275 bool csum_hw = false;
276 struct device *kdev;
277 skb_frag_t *frag;
278
279 kdev = &intf->parent->pdev->dev;
280
281 nr_frags = skb_shinfo(skb)->nr_frags;
282
283 if (tx_spb_ring_full(intf, nr_frags + 1)) {
284 netif_stop_queue(dev);
285 if (net_ratelimit())
286 netdev_err(dev, "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY;
288 }
289
290 /* Save skb len before adding csum offload header */
291 total_bytes = skb->len;
292 skb = bcmasp_csum_offload(dev, skb, &csum_hw);
293 if (!skb)
294 return NETDEV_TX_OK;
295
296 spb_index = intf->tx_spb_index;
297 valid = intf->tx_spb_dma_valid;
298 for (i = 0; i <= nr_frags; i++) {
299 if (!i) {
300 size = skb_headlen(skb);
301 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
303 return NETDEV_TX_OK;
304 size = skb->len;
305 }
306 mapping = dma_map_single(kdev, skb->data, size,
307 DMA_TO_DEVICE);
308 } else {
309 frag = &skb_shinfo(skb)->frags[i - 1];
310 size = skb_frag_size(frag);
311 mapping = skb_frag_dma_map(kdev, frag, 0, size,
312 DMA_TO_DEVICE);
313 }
314
315 if (dma_mapping_error(kdev, mapping)) {
316 intf->mib.tx_dma_failed++;
317 spb_index = intf->tx_spb_index;
318 for (j = 0; j < i; j++) {
319 bcmasp_clean_txcb(intf, spb_index);
320 spb_index = incr_ring(spb_index,
321 DESC_RING_COUNT);
322 }
323 /* Rewind so we do not have a hole */
324 spb_index = intf->tx_spb_index;
325 dev_kfree_skb(skb);
326 return NETDEV_TX_OK;
327 }
328
329 txcb = &intf->tx_cbs[spb_index];
330 desc = &intf->tx_spb_cpu[spb_index];
331 memset(desc, 0, sizeof(*desc));
332 txcb->skb = skb;
333 txcb->bytes_sent = total_bytes;
334 dma_unmap_addr_set(txcb, dma_addr, mapping);
335 dma_unmap_len_set(txcb, dma_len, size);
336 if (!i) {
337 desc->flags |= DESC_SOF;
338 if (csum_hw)
339 desc->flags |= DESC_EPKT_CMD;
340 }
341
342 if (i == nr_frags) {
343 desc->flags |= DESC_EOF;
344 txcb->last = true;
345 }
346
347 desc->buf = mapping;
348 desc->size = size;
349 desc->flags |= DESC_INT_EN;
350
351 netif_dbg(intf, tx_queued, dev,
352 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
353 __func__, &mapping, desc->size, desc->flags,
354 spb_index);
355
356 spb_index = incr_ring(spb_index, DESC_RING_COUNT);
357 valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
358 DESC_RING_COUNT);
359 }
360
361 /* Ensure all descriptors have been written to DRAM for the
362 * hardware to see up-to-date contents.
363 */
364 wmb();
365
366 intf->tx_spb_index = spb_index;
367 intf->tx_spb_dma_valid = valid;
368
369 skb_tx_timestamp(skb);
370
371 bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
372
373 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
374 netif_stop_queue(dev);
375
376 return NETDEV_TX_OK;
377 }
378
bcmasp_netif_start(struct net_device * dev)379 static void bcmasp_netif_start(struct net_device *dev)
380 {
381 struct bcmasp_intf *intf = netdev_priv(dev);
382
383 bcmasp_set_rx_mode(dev);
384 napi_enable(&intf->tx_napi);
385 napi_enable(&intf->rx_napi);
386
387 bcmasp_enable_rx_irq(intf, 1);
388 bcmasp_enable_tx_irq(intf, 1);
389 bcmasp_enable_phy_irq(intf, 1);
390
391 phy_start(dev->phydev);
392 }
393
umac_reset(struct bcmasp_intf * intf)394 static void umac_reset(struct bcmasp_intf *intf)
395 {
396 umac_wl(intf, 0x0, UMC_CMD);
397 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
398 usleep_range(10, 100);
399 /* We hold the umac in reset and bring it out of
400 * reset when phy link is up.
401 */
402 }
403
umac_set_hw_addr(struct bcmasp_intf * intf,const unsigned char * addr)404 static void umac_set_hw_addr(struct bcmasp_intf *intf,
405 const unsigned char *addr)
406 {
407 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
408 addr[3];
409 u32 mac1 = (addr[4] << 8) | addr[5];
410
411 umac_wl(intf, mac0, UMC_MAC0);
412 umac_wl(intf, mac1, UMC_MAC1);
413 }
414
umac_enable_set(struct bcmasp_intf * intf,u32 mask,unsigned int enable)415 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
416 unsigned int enable)
417 {
418 u32 reg;
419
420 reg = umac_rl(intf, UMC_CMD);
421 if (reg & UMC_CMD_SW_RESET)
422 return;
423 if (enable)
424 reg |= mask;
425 else
426 reg &= ~mask;
427 umac_wl(intf, reg, UMC_CMD);
428
429 /* UniMAC stops on a packet boundary, wait for a full-sized packet
430 * to be processed (1 msec).
431 */
432 if (enable == 0)
433 usleep_range(1000, 2000);
434 }
435
umac_init(struct bcmasp_intf * intf)436 static void umac_init(struct bcmasp_intf *intf)
437 {
438 umac_wl(intf, 0x800, UMC_FRM_LEN);
439 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
440 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
441 }
442
bcmasp_tx_reclaim(struct bcmasp_intf * intf)443 static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
444 {
445 struct bcmasp_intf_stats64 *stats = &intf->stats64;
446 struct device *kdev = &intf->parent->pdev->dev;
447 unsigned long read, released = 0;
448 struct bcmasp_tx_cb *txcb;
449 struct bcmasp_desc *desc;
450 dma_addr_t mapping;
451
452 read = bcmasp_intf_tx_read(intf);
453 while (intf->tx_spb_dma_read != read) {
454 txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
455 mapping = dma_unmap_addr(txcb, dma_addr);
456
457 dma_unmap_single(kdev, mapping,
458 dma_unmap_len(txcb, dma_len),
459 DMA_TO_DEVICE);
460
461 if (txcb->last) {
462 dev_consume_skb_any(txcb->skb);
463
464 u64_stats_update_begin(&stats->syncp);
465 u64_stats_inc(&stats->tx_packets);
466 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
467 u64_stats_update_end(&stats->syncp);
468 }
469
470 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
471
472 netif_dbg(intf, tx_done, intf->ndev,
473 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
474 __func__, &mapping, desc->size, desc->flags,
475 intf->tx_spb_clean_index);
476
477 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
478 released++;
479
480 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
481 DESC_RING_COUNT);
482 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
483 intf->tx_spb_dma_addr,
484 DESC_RING_COUNT);
485 }
486
487 return released;
488 }
489
bcmasp_tx_poll(struct napi_struct * napi,int budget)490 static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
491 {
492 struct bcmasp_intf *intf =
493 container_of(napi, struct bcmasp_intf, tx_napi);
494 int released = 0;
495
496 released = bcmasp_tx_reclaim(intf);
497
498 napi_complete(&intf->tx_napi);
499
500 bcmasp_enable_tx_irq(intf, 1);
501
502 if (released)
503 netif_wake_queue(intf->ndev);
504
505 return 0;
506 }
507
bcmasp_rx_poll(struct napi_struct * napi,int budget)508 static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
509 {
510 struct bcmasp_intf *intf =
511 container_of(napi, struct bcmasp_intf, rx_napi);
512 struct bcmasp_intf_stats64 *stats = &intf->stats64;
513 struct device *kdev = &intf->parent->pdev->dev;
514 unsigned long processed = 0;
515 struct bcmasp_desc *desc;
516 struct sk_buff *skb;
517 dma_addr_t valid;
518 void *data;
519 u64 flags;
520 u32 len;
521
522 valid = bcmasp_intf_rx_desc_read(intf) + 1;
523 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
524 valid = intf->rx_edpkt_dma_addr;
525
526 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
527 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
528
529 /* Ensure that descriptor has been fully written to DRAM by
530 * hardware before reading by the CPU
531 */
532 rmb();
533
534 /* Calculate virt addr by offsetting from physical addr */
535 data = intf->rx_ring_cpu +
536 (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
537
538 flags = DESC_FLAGS(desc->buf);
539 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
540 if (net_ratelimit()) {
541 netif_err(intf, rx_status, intf->ndev,
542 "flags=0x%llx\n", flags);
543 }
544
545 u64_stats_update_begin(&stats->syncp);
546 if (flags & DESC_CRC_ERR)
547 u64_stats_inc(&stats->rx_crc_errs);
548 if (flags & DESC_RX_SYM_ERR)
549 u64_stats_inc(&stats->rx_sym_errs);
550 u64_stats_update_end(&stats->syncp);
551
552 goto next;
553 }
554
555 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
556 DMA_FROM_DEVICE);
557
558 len = desc->size;
559
560 skb = napi_alloc_skb(napi, len);
561 if (!skb) {
562 u64_stats_update_begin(&stats->syncp);
563 u64_stats_inc(&stats->rx_dropped);
564 u64_stats_update_end(&stats->syncp);
565 intf->mib.alloc_rx_skb_failed++;
566
567 goto next;
568 }
569
570 skb_put(skb, len);
571 memcpy(skb->data, data, len);
572
573 skb_pull(skb, 2);
574 len -= 2;
575 if (likely(intf->crc_fwd)) {
576 skb_trim(skb, len - ETH_FCS_LEN);
577 len -= ETH_FCS_LEN;
578 }
579
580 if ((intf->ndev->features & NETIF_F_RXCSUM) &&
581 (desc->buf & DESC_CHKSUM))
582 skb->ip_summed = CHECKSUM_UNNECESSARY;
583
584 skb->protocol = eth_type_trans(skb, intf->ndev);
585
586 napi_gro_receive(napi, skb);
587
588 u64_stats_update_begin(&stats->syncp);
589 u64_stats_inc(&stats->rx_packets);
590 u64_stats_add(&stats->rx_bytes, len);
591 u64_stats_update_end(&stats->syncp);
592
593 next:
594 bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
595 desc->size));
596
597 processed++;
598 intf->rx_edpkt_dma_read =
599 incr_first_byte(intf->rx_edpkt_dma_read,
600 intf->rx_edpkt_dma_addr,
601 DESC_RING_COUNT);
602 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
603 DESC_RING_COUNT);
604 }
605
606 bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
607
608 if (processed < budget) {
609 napi_complete_done(&intf->rx_napi, processed);
610 bcmasp_enable_rx_irq(intf, 1);
611 }
612
613 return processed;
614 }
615
bcmasp_adj_link(struct net_device * dev)616 static void bcmasp_adj_link(struct net_device *dev)
617 {
618 struct bcmasp_intf *intf = netdev_priv(dev);
619 struct phy_device *phydev = dev->phydev;
620 u32 cmd_bits = 0, reg;
621 int changed = 0;
622
623 if (intf->old_link != phydev->link) {
624 changed = 1;
625 intf->old_link = phydev->link;
626 }
627
628 if (intf->old_duplex != phydev->duplex) {
629 changed = 1;
630 intf->old_duplex = phydev->duplex;
631 }
632
633 switch (phydev->speed) {
634 case SPEED_2500:
635 cmd_bits = UMC_CMD_SPEED_2500;
636 break;
637 case SPEED_1000:
638 cmd_bits = UMC_CMD_SPEED_1000;
639 break;
640 case SPEED_100:
641 cmd_bits = UMC_CMD_SPEED_100;
642 break;
643 case SPEED_10:
644 cmd_bits = UMC_CMD_SPEED_10;
645 break;
646 default:
647 break;
648 }
649 cmd_bits <<= UMC_CMD_SPEED_SHIFT;
650
651 if (phydev->duplex == DUPLEX_HALF)
652 cmd_bits |= UMC_CMD_HD_EN;
653
654 if (intf->old_pause != phydev->pause) {
655 changed = 1;
656 intf->old_pause = phydev->pause;
657 }
658
659 if (!phydev->pause)
660 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
661
662 if (!changed)
663 return;
664
665 if (phydev->link) {
666 reg = umac_rl(intf, UMC_CMD);
667 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
668 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
669 UMC_CMD_TX_PAUSE_IGNORE);
670 reg |= cmd_bits;
671 if (reg & UMC_CMD_SW_RESET) {
672 reg &= ~UMC_CMD_SW_RESET;
673 umac_wl(intf, reg, UMC_CMD);
674 udelay(2);
675 reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
676 }
677 umac_wl(intf, reg, UMC_CMD);
678
679 umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
680 reg = umac_rl(intf, UMC_EEE_CTRL);
681 if (phydev->enable_tx_lpi)
682 reg |= EEE_EN;
683 else
684 reg &= ~EEE_EN;
685 umac_wl(intf, reg, UMC_EEE_CTRL);
686 }
687
688 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
689 if (phydev->link)
690 reg |= RGMII_LINK;
691 else
692 reg &= ~RGMII_LINK;
693 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
694
695 if (changed)
696 phy_print_status(phydev);
697 }
698
bcmasp_alloc_buffers(struct bcmasp_intf * intf)699 static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
700 {
701 struct device *kdev = &intf->parent->pdev->dev;
702 struct page *buffer_pg;
703
704 /* Alloc RX */
705 intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
706 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
707 if (!buffer_pg)
708 return -ENOMEM;
709
710 intf->rx_ring_cpu = page_to_virt(buffer_pg);
711 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
712 DMA_FROM_DEVICE);
713 if (dma_mapping_error(kdev, intf->rx_ring_dma))
714 goto free_rx_buffer;
715
716 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
717 &intf->rx_edpkt_dma_addr, GFP_KERNEL);
718 if (!intf->rx_edpkt_cpu)
719 goto free_rx_buffer_dma;
720
721 /* Alloc TX */
722 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
723 &intf->tx_spb_dma_addr, GFP_KERNEL);
724 if (!intf->tx_spb_cpu)
725 goto free_rx_edpkt_dma;
726
727 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
728 GFP_KERNEL);
729 if (!intf->tx_cbs)
730 goto free_tx_spb_dma;
731
732 return 0;
733
734 free_tx_spb_dma:
735 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
736 intf->tx_spb_dma_addr);
737 free_rx_edpkt_dma:
738 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
739 intf->rx_edpkt_dma_addr);
740 free_rx_buffer_dma:
741 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
742 DMA_FROM_DEVICE);
743 free_rx_buffer:
744 __free_pages(buffer_pg, intf->rx_buf_order);
745
746 return -ENOMEM;
747 }
748
bcmasp_reclaim_free_buffers(struct bcmasp_intf * intf)749 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
750 {
751 struct device *kdev = &intf->parent->pdev->dev;
752
753 /* RX buffers */
754 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
755 intf->rx_edpkt_dma_addr);
756 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
757 DMA_FROM_DEVICE);
758 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
759
760 /* TX buffers */
761 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
762 intf->tx_spb_dma_addr);
763 kfree(intf->tx_cbs);
764 }
765
bcmasp_init_rx(struct bcmasp_intf * intf)766 static void bcmasp_init_rx(struct bcmasp_intf *intf)
767 {
768 /* Restart from index 0 */
769 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
770 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
771 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
772 intf->rx_edpkt_index = 0;
773
774 /* Make sure channels are disabled */
775 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
776
777 /* Rx SPB */
778 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
779 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
780 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
781 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
782 RX_EDPKT_RING_BUFFER_END);
783 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
784 RX_EDPKT_RING_BUFFER_VALID);
785
786 /* EDPKT */
787 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
788 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
789 (RX_EDPKT_CFG_CFG0_64_ALN <<
790 RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
791 (RX_EDPKT_CFG_CFG0_EFRM_STUF),
792 RX_EDPKT_CFG_CFG0);
793 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
794 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
795 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
796 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
797 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
798
799 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
800 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
801 UMAC2FB_CFG);
802 }
803
804
bcmasp_init_tx(struct bcmasp_intf * intf)805 static void bcmasp_init_tx(struct bcmasp_intf *intf)
806 {
807 /* Restart from index 0 */
808 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
809 intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
810 intf->tx_spb_index = 0;
811 intf->tx_spb_clean_index = 0;
812 memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
813
814 /* Make sure channels are disabled */
815 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
816 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
817
818 /* Tx SPB */
819 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
820 TX_SPB_CTRL_XF_CTRL2);
821 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
822 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
823 tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
824
825 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
826 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
827 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
828 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
829 }
830
bcmasp_ephy_enable_set(struct bcmasp_intf * intf,bool enable)831 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
832 {
833 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
834 RGMII_EPHY_CFG_IDDQ_GLOBAL;
835 u32 reg;
836
837 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
838 if (enable) {
839 reg &= ~RGMII_EPHY_CK25_DIS;
840 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
841 mdelay(1);
842
843 reg &= ~mask;
844 reg |= RGMII_EPHY_RESET;
845 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
846 mdelay(1);
847
848 reg &= ~RGMII_EPHY_RESET;
849 } else {
850 reg |= mask | RGMII_EPHY_RESET;
851 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
852 mdelay(1);
853 reg |= RGMII_EPHY_CK25_DIS;
854 }
855 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
856 mdelay(1);
857
858 /* Set or clear the LED control override to avoid lighting up LEDs
859 * while the EPHY is powered off and drawing unnecessary current.
860 */
861 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
862 if (enable)
863 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
864 else
865 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
866 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
867 }
868
bcmasp_rgmii_mode_en_set(struct bcmasp_intf * intf,bool enable)869 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
870 {
871 u32 reg;
872
873 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
874 reg &= ~RGMII_OOB_DIS;
875 if (enable)
876 reg |= RGMII_MODE_EN;
877 else
878 reg &= ~RGMII_MODE_EN;
879 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
880 }
881
bcmasp_netif_deinit(struct net_device * dev)882 static void bcmasp_netif_deinit(struct net_device *dev)
883 {
884 struct bcmasp_intf *intf = netdev_priv(dev);
885 u32 reg, timeout = 1000;
886
887 napi_disable(&intf->tx_napi);
888
889 bcmasp_enable_tx(intf, 0);
890
891 /* Flush any TX packets in the pipe */
892 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
893 do {
894 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
895 if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
896 break;
897 usleep_range(1000, 2000);
898 } while (timeout-- > 0);
899 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
900
901 bcmasp_tx_reclaim(intf);
902
903 umac_enable_set(intf, UMC_CMD_TX_EN, 0);
904
905 phy_stop(dev->phydev);
906
907 umac_enable_set(intf, UMC_CMD_RX_EN, 0);
908
909 bcmasp_flush_rx_port(intf);
910 usleep_range(1000, 2000);
911 bcmasp_enable_rx(intf, 0);
912
913 napi_disable(&intf->rx_napi);
914
915 /* Disable interrupts */
916 bcmasp_enable_tx_irq(intf, 0);
917 bcmasp_enable_rx_irq(intf, 0);
918 bcmasp_enable_phy_irq(intf, 0);
919
920 netif_napi_del(&intf->tx_napi);
921 netif_napi_del(&intf->rx_napi);
922 }
923
bcmasp_stop(struct net_device * dev)924 static int bcmasp_stop(struct net_device *dev)
925 {
926 struct bcmasp_intf *intf = netdev_priv(dev);
927
928 netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
929
930 /* Stop tx from updating HW */
931 netif_tx_disable(dev);
932
933 bcmasp_netif_deinit(dev);
934
935 bcmasp_reclaim_free_buffers(intf);
936
937 phy_disconnect(dev->phydev);
938
939 /* Disable internal EPHY or external PHY */
940 if (intf->internal_phy)
941 bcmasp_ephy_enable_set(intf, false);
942 else
943 bcmasp_rgmii_mode_en_set(intf, false);
944
945 /* Disable the interface clocks */
946 bcmasp_core_clock_set_intf(intf, false);
947
948 clk_disable_unprepare(intf->parent->clk);
949
950 return 0;
951 }
952
bcmasp_configure_port(struct bcmasp_intf * intf)953 static void bcmasp_configure_port(struct bcmasp_intf *intf)
954 {
955 u32 reg, id_mode_dis = 0;
956
957 reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
958 reg &= ~RGMII_PORT_MODE_MASK;
959
960 switch (intf->phy_interface) {
961 case PHY_INTERFACE_MODE_RGMII:
962 /* RGMII_NO_ID: TXC transitions at the same time as TXD
963 * (requires PCB or receiver-side delay)
964 * RGMII: Add 2ns delay on TXC (90 degree shift)
965 *
966 * ID is implicitly disabled for 100Mbps (RG)MII operation.
967 */
968 id_mode_dis = RGMII_ID_MODE_DIS;
969 fallthrough;
970 case PHY_INTERFACE_MODE_RGMII_TXID:
971 reg |= RGMII_PORT_MODE_EXT_GPHY;
972 break;
973 case PHY_INTERFACE_MODE_MII:
974 reg |= RGMII_PORT_MODE_EXT_EPHY;
975 break;
976 default:
977 break;
978 }
979
980 if (intf->internal_phy)
981 reg |= RGMII_PORT_MODE_EPHY;
982
983 rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
984
985 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
986 reg &= ~RGMII_ID_MODE_DIS;
987 reg |= id_mode_dis;
988 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
989 }
990
bcmasp_netif_init(struct net_device * dev,bool phy_connect)991 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
992 {
993 struct bcmasp_intf *intf = netdev_priv(dev);
994 phy_interface_t phy_iface = intf->phy_interface;
995 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
996 PHY_BRCM_DIS_TXCRXC_NOENRGY |
997 PHY_BRCM_IDDQ_SUSPEND;
998 struct phy_device *phydev = NULL;
999 int ret;
1000
1001 /* Always enable interface clocks */
1002 bcmasp_core_clock_set_intf(intf, true);
1003
1004 /* Enable internal PHY or external PHY before any MAC activity */
1005 if (intf->internal_phy)
1006 bcmasp_ephy_enable_set(intf, true);
1007 else
1008 bcmasp_rgmii_mode_en_set(intf, true);
1009 bcmasp_configure_port(intf);
1010
1011 /* This is an ugly quirk but we have not been correctly
1012 * interpreting the phy_interface values and we have done that
1013 * across different drivers, so at least we are consistent in
1014 * our mistakes.
1015 *
1016 * When the Generic PHY driver is in use either the PHY has
1017 * been strapped or programmed correctly by the boot loader so
1018 * we should stick to our incorrect interpretation since we
1019 * have validated it.
1020 *
1021 * Now when a dedicated PHY driver is in use, we need to
1022 * reverse the meaning of the phy_interface_mode values to
1023 * something that the PHY driver will interpret and act on such
1024 * that we have two mistakes canceling themselves so to speak.
1025 * We only do this for the two modes that GENET driver
1026 * officially supports on Broadcom STB chips:
1027 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1028 * Other modes are not *officially* supported with the boot
1029 * loader and the scripted environment generating Device Tree
1030 * blobs for those platforms.
1031 *
1032 * Note that internal PHY and fixed-link configurations are not
1033 * affected because they use different phy_interface_t values
1034 * or the Generic PHY driver.
1035 */
1036 switch (phy_iface) {
1037 case PHY_INTERFACE_MODE_RGMII:
1038 phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1039 break;
1040 case PHY_INTERFACE_MODE_RGMII_TXID:
1041 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1042 break;
1043 default:
1044 break;
1045 }
1046
1047 if (phy_connect) {
1048 phydev = of_phy_connect(dev, intf->phy_dn,
1049 bcmasp_adj_link, phy_flags,
1050 phy_iface);
1051 if (!phydev) {
1052 ret = -ENODEV;
1053 netdev_err(dev, "could not attach to PHY\n");
1054 goto err_phy_disable;
1055 }
1056
1057 if (intf->internal_phy)
1058 dev->phydev->irq = PHY_MAC_INTERRUPT;
1059
1060 /* Indicate that the MAC is responsible for PHY PM */
1061 phydev->mac_managed_pm = true;
1062
1063 /* Set phylib's copy of the LPI timer */
1064 phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
1065 }
1066
1067 umac_reset(intf);
1068
1069 umac_init(intf);
1070
1071 umac_set_hw_addr(intf, dev->dev_addr);
1072
1073 intf->old_duplex = -1;
1074 intf->old_link = -1;
1075 intf->old_pause = -1;
1076
1077 bcmasp_init_tx(intf);
1078 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
1079 bcmasp_enable_tx(intf, 1);
1080
1081 bcmasp_init_rx(intf);
1082 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
1083 bcmasp_enable_rx(intf, 1);
1084
1085 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1086
1087 bcmasp_netif_start(dev);
1088
1089 netif_start_queue(dev);
1090
1091 return 0;
1092
1093 err_phy_disable:
1094 if (intf->internal_phy)
1095 bcmasp_ephy_enable_set(intf, false);
1096 else
1097 bcmasp_rgmii_mode_en_set(intf, false);
1098 return ret;
1099 }
1100
bcmasp_open(struct net_device * dev)1101 static int bcmasp_open(struct net_device *dev)
1102 {
1103 struct bcmasp_intf *intf = netdev_priv(dev);
1104 int ret;
1105
1106 netif_dbg(intf, ifup, dev, "bcmasp open\n");
1107
1108 ret = bcmasp_alloc_buffers(intf);
1109 if (ret)
1110 return ret;
1111
1112 ret = clk_prepare_enable(intf->parent->clk);
1113 if (ret)
1114 goto err_free_mem;
1115
1116 ret = bcmasp_netif_init(dev, true);
1117 if (ret) {
1118 clk_disable_unprepare(intf->parent->clk);
1119 goto err_free_mem;
1120 }
1121
1122 return ret;
1123
1124 err_free_mem:
1125 bcmasp_reclaim_free_buffers(intf);
1126
1127 return ret;
1128 }
1129
bcmasp_tx_timeout(struct net_device * dev,unsigned int txqueue)1130 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1131 {
1132 struct bcmasp_intf *intf = netdev_priv(dev);
1133
1134 netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1135 intf->mib.tx_timeout_cnt++;
1136 }
1137
bcmasp_get_phys_port_name(struct net_device * dev,char * name,size_t len)1138 static int bcmasp_get_phys_port_name(struct net_device *dev,
1139 char *name, size_t len)
1140 {
1141 struct bcmasp_intf *intf = netdev_priv(dev);
1142
1143 if (snprintf(name, len, "p%d", intf->port) >= len)
1144 return -EINVAL;
1145
1146 return 0;
1147 }
1148
bcmasp_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1149 static void bcmasp_get_stats64(struct net_device *dev,
1150 struct rtnl_link_stats64 *stats)
1151 {
1152 struct bcmasp_intf *intf = netdev_priv(dev);
1153 struct bcmasp_intf_stats64 *lstats;
1154 unsigned int start;
1155
1156 lstats = &intf->stats64;
1157
1158 do {
1159 start = u64_stats_fetch_begin(&lstats->syncp);
1160 stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1161 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1162 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1163 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1164 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1165 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1166
1167 stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1168 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1169 } while (u64_stats_fetch_retry(&lstats->syncp, start));
1170 }
1171
1172 static const struct net_device_ops bcmasp_netdev_ops = {
1173 .ndo_open = bcmasp_open,
1174 .ndo_stop = bcmasp_stop,
1175 .ndo_start_xmit = bcmasp_xmit,
1176 .ndo_tx_timeout = bcmasp_tx_timeout,
1177 .ndo_set_rx_mode = bcmasp_set_rx_mode,
1178 .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
1179 .ndo_eth_ioctl = phy_do_ioctl_running,
1180 .ndo_set_mac_address = eth_mac_addr,
1181 .ndo_get_stats64 = bcmasp_get_stats64,
1182 };
1183
bcmasp_map_res(struct bcmasp_priv * priv,struct bcmasp_intf * intf)1184 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1185 {
1186 /* Per port */
1187 intf->res.umac = priv->base + UMC_OFFSET(intf);
1188 intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1189 (intf->port * 0x4));
1190 intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1191
1192 /* Per ch */
1193 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1194 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1195 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1196 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1197 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1198
1199 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1200 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1201 }
1202
1203 #define MAX_IRQ_STR_LEN 64
bcmasp_interface_create(struct bcmasp_priv * priv,struct device_node * ndev_dn,int i)1204 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1205 struct device_node *ndev_dn, int i)
1206 {
1207 struct device *dev = &priv->pdev->dev;
1208 struct bcmasp_intf *intf;
1209 struct net_device *ndev;
1210 int ch, port, ret;
1211
1212 if (of_property_read_u32(ndev_dn, "reg", &port)) {
1213 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1214 goto err;
1215 }
1216
1217 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1218 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1219 goto err;
1220 }
1221
1222 ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1223 if (!ndev) {
1224 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1225 goto err;
1226 }
1227 intf = netdev_priv(ndev);
1228
1229 intf->parent = priv;
1230 intf->ndev = ndev;
1231 intf->channel = ch;
1232 intf->port = port;
1233 intf->ndev_dn = ndev_dn;
1234 intf->index = i;
1235
1236 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1237 if (ret < 0) {
1238 dev_err(dev, "invalid PHY mode property\n");
1239 goto err_free_netdev;
1240 }
1241
1242 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1243 intf->internal_phy = true;
1244
1245 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1246 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1247 ret = of_phy_register_fixed_link(ndev_dn);
1248 if (ret) {
1249 dev_warn(dev, "%s: failed to register fixed PHY\n",
1250 ndev_dn->name);
1251 goto err_free_netdev;
1252 }
1253 intf->phy_dn = ndev_dn;
1254 }
1255
1256 /* Map resource */
1257 bcmasp_map_res(priv, intf);
1258
1259 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1260 intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1261 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1262 (intf->port != 1 && intf->internal_phy)) {
1263 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1264 phy_modes(intf->phy_interface), intf->port);
1265 ret = -EINVAL;
1266 goto err_free_netdev;
1267 }
1268
1269 ret = of_get_ethdev_address(ndev_dn, ndev);
1270 if (ret) {
1271 netdev_warn(ndev, "using random Ethernet MAC\n");
1272 eth_hw_addr_random(ndev);
1273 }
1274
1275 SET_NETDEV_DEV(ndev, dev);
1276 intf->ops = &bcmasp_intf_ops;
1277 ndev->netdev_ops = &bcmasp_netdev_ops;
1278 ndev->ethtool_ops = &bcmasp_ethtool_ops;
1279 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1280 NETIF_MSG_PROBE |
1281 NETIF_MSG_LINK);
1282 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1283 NETIF_F_RXCSUM;
1284 ndev->hw_features |= ndev->features;
1285 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1286
1287 return intf;
1288
1289 err_free_netdev:
1290 free_netdev(ndev);
1291 err:
1292 return NULL;
1293 }
1294
bcmasp_interface_destroy(struct bcmasp_intf * intf)1295 void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1296 {
1297 if (intf->ndev->reg_state == NETREG_REGISTERED)
1298 unregister_netdev(intf->ndev);
1299 if (of_phy_is_fixed_link(intf->ndev_dn))
1300 of_phy_deregister_fixed_link(intf->ndev_dn);
1301 free_netdev(intf->ndev);
1302 }
1303
bcmasp_suspend_to_wol(struct bcmasp_intf * intf)1304 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1305 {
1306 struct net_device *ndev = intf->ndev;
1307 u32 reg;
1308
1309 reg = umac_rl(intf, UMC_MPD_CTRL);
1310 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1311 reg |= UMC_MPD_CTRL_MPD_EN;
1312 reg &= ~UMC_MPD_CTRL_PSW_EN;
1313 if (intf->wolopts & WAKE_MAGICSECURE) {
1314 /* Program the SecureOn password */
1315 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1316 UMC_PSW_MS);
1317 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1318 UMC_PSW_LS);
1319 reg |= UMC_MPD_CTRL_PSW_EN;
1320 }
1321 umac_wl(intf, reg, UMC_MPD_CTRL);
1322
1323 if (intf->wolopts & WAKE_FILTER)
1324 bcmasp_netfilt_suspend(intf);
1325
1326 /* Bring UniMAC out of reset if needed and enable RX */
1327 reg = umac_rl(intf, UMC_CMD);
1328 if (reg & UMC_CMD_SW_RESET)
1329 reg &= ~UMC_CMD_SW_RESET;
1330
1331 reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1332 umac_wl(intf, reg, UMC_CMD);
1333
1334 umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1335
1336 if (intf->parent->wol_irq > 0) {
1337 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1338 ASP_WAKEUP_INTR2_MASK_CLEAR);
1339 }
1340
1341 if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
1342 intf->parent->eee_fixup)
1343 intf->parent->eee_fixup(intf, true);
1344
1345 netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1346 }
1347
bcmasp_interface_suspend(struct bcmasp_intf * intf)1348 int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1349 {
1350 struct device *kdev = &intf->parent->pdev->dev;
1351 struct net_device *dev = intf->ndev;
1352
1353 if (!netif_running(dev))
1354 return 0;
1355
1356 netif_device_detach(dev);
1357
1358 bcmasp_netif_deinit(dev);
1359
1360 if (!intf->wolopts) {
1361 if (intf->internal_phy)
1362 bcmasp_ephy_enable_set(intf, false);
1363 else
1364 bcmasp_rgmii_mode_en_set(intf, false);
1365
1366 /* If Wake-on-LAN is disabled, we can safely
1367 * disable the network interface clocks.
1368 */
1369 bcmasp_core_clock_set_intf(intf, false);
1370 }
1371
1372 if (device_may_wakeup(kdev) && intf->wolopts)
1373 bcmasp_suspend_to_wol(intf);
1374
1375 clk_disable_unprepare(intf->parent->clk);
1376
1377 return 0;
1378 }
1379
bcmasp_resume_from_wol(struct bcmasp_intf * intf)1380 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1381 {
1382 u32 reg;
1383
1384 if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
1385 intf->parent->eee_fixup)
1386 intf->parent->eee_fixup(intf, false);
1387
1388 reg = umac_rl(intf, UMC_MPD_CTRL);
1389 reg &= ~UMC_MPD_CTRL_MPD_EN;
1390 umac_wl(intf, reg, UMC_MPD_CTRL);
1391
1392 if (intf->parent->wol_irq > 0) {
1393 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1394 ASP_WAKEUP_INTR2_MASK_SET);
1395 }
1396 }
1397
bcmasp_interface_resume(struct bcmasp_intf * intf)1398 int bcmasp_interface_resume(struct bcmasp_intf *intf)
1399 {
1400 struct net_device *dev = intf->ndev;
1401 int ret;
1402
1403 if (!netif_running(dev))
1404 return 0;
1405
1406 ret = clk_prepare_enable(intf->parent->clk);
1407 if (ret)
1408 return ret;
1409
1410 ret = bcmasp_netif_init(dev, false);
1411 if (ret)
1412 goto out;
1413
1414 bcmasp_resume_from_wol(intf);
1415
1416 netif_device_attach(dev);
1417
1418 return 0;
1419
1420 out:
1421 clk_disable_unprepare(intf->parent->clk);
1422 return ret;
1423 }
1424