1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2011 John Crispin <[email protected]>
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/interrupt.h>
12 #include <linux/uaccess.h>
13 #include <linux/in.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/skbuff.h>
20 #include <linux/mm.h>
21 #include <linux/platform_device.h>
22 #include <linux/ethtool.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/io.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/property.h>
29
30 #include <asm/checksum.h>
31
32 #include <lantiq_soc.h>
33 #include <xway_dma.h>
34 #include <lantiq_platform.h>
35
36 #define LTQ_ETOP_MDIO 0x11804
37 #define MDIO_REQUEST 0x80000000
38 #define MDIO_READ 0x40000000
39 #define MDIO_ADDR_MASK 0x1f
40 #define MDIO_ADDR_OFFSET 0x15
41 #define MDIO_REG_MASK 0x1f
42 #define MDIO_REG_OFFSET 0x10
43 #define MDIO_VAL_MASK 0xffff
44
45 #define PPE32_CGEN 0x800
46 #define LQ_PPE32_ENET_MAC_CFG 0x1840
47
48 #define LTQ_ETOP_ENETS0 0x11850
49 #define LTQ_ETOP_MAC_DA0 0x1186C
50 #define LTQ_ETOP_MAC_DA1 0x11870
51 #define LTQ_ETOP_CFG 0x16020
52 #define LTQ_ETOP_IGPLEN 0x16080
53
54 #define MAX_DMA_CHAN 0x8
55 #define MAX_DMA_CRC_LEN 0x4
56 #define MAX_DMA_DATA_LEN 0x600
57
58 #define ETOP_FTCU BIT(28)
59 #define ETOP_MII_MASK 0xf
60 #define ETOP_MII_NORMAL 0xd
61 #define ETOP_MII_REVERSE 0xe
62 #define ETOP_PLEN_UNDER 0x40
63 #define ETOP_CGEN 0x800
64
65 /* use 2 static channels for TX/RX */
66 #define LTQ_ETOP_TX_CHANNEL 1
67 #define LTQ_ETOP_RX_CHANNEL 6
68 #define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL)
69 #define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL)
70
71 #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
72 #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
73 #define ltq_etop_w32_mask(x, y, z) \
74 ltq_w32_mask(x, y, ltq_etop_membase + (z))
75
76 #define DRV_VERSION "1.0"
77
78 static void __iomem *ltq_etop_membase;
79
80 struct ltq_etop_chan {
81 int idx;
82 int tx_free;
83 struct net_device *netdev;
84 struct napi_struct napi;
85 struct ltq_dma_channel dma;
86 struct sk_buff *skb[LTQ_DESC_NUM];
87 };
88
89 struct ltq_etop_priv {
90 struct net_device *netdev;
91 struct platform_device *pdev;
92 struct ltq_eth_data *pldata;
93
94 struct mii_bus *mii_bus;
95
96 struct ltq_etop_chan ch[MAX_DMA_CHAN];
97
98 int tx_burst_len;
99 int rx_burst_len;
100
101 spinlock_t lock;
102 };
103
104 static int
ltq_etop_alloc_skb(struct ltq_etop_chan * ch)105 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
106 {
107 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
108
109 ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
110 if (!ch->skb[ch->dma.desc])
111 return -ENOMEM;
112 ch->dma.desc_base[ch->dma.desc].addr =
113 dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
114 MAX_DMA_DATA_LEN, DMA_FROM_DEVICE);
115 ch->dma.desc_base[ch->dma.desc].addr =
116 CPHYSADDR(ch->skb[ch->dma.desc]->data);
117 ch->dma.desc_base[ch->dma.desc].ctl =
118 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
119 MAX_DMA_DATA_LEN;
120 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
121 return 0;
122 }
123
124 static void
ltq_etop_hw_receive(struct ltq_etop_chan * ch)125 ltq_etop_hw_receive(struct ltq_etop_chan *ch)
126 {
127 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
128 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
129 struct sk_buff *skb = ch->skb[ch->dma.desc];
130 int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
131 unsigned long flags;
132
133 spin_lock_irqsave(&priv->lock, flags);
134 if (ltq_etop_alloc_skb(ch)) {
135 netdev_err(ch->netdev,
136 "failed to allocate new rx buffer, stopping DMA\n");
137 ltq_dma_close(&ch->dma);
138 }
139 ch->dma.desc++;
140 ch->dma.desc %= LTQ_DESC_NUM;
141 spin_unlock_irqrestore(&priv->lock, flags);
142
143 skb_put(skb, len);
144 skb->protocol = eth_type_trans(skb, ch->netdev);
145 netif_receive_skb(skb);
146 }
147
148 static int
ltq_etop_poll_rx(struct napi_struct * napi,int budget)149 ltq_etop_poll_rx(struct napi_struct *napi, int budget)
150 {
151 struct ltq_etop_chan *ch = container_of(napi,
152 struct ltq_etop_chan, napi);
153 int work_done = 0;
154
155 while (work_done < budget) {
156 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
157
158 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
159 break;
160 ltq_etop_hw_receive(ch);
161 work_done++;
162 }
163 if (work_done < budget) {
164 napi_complete_done(&ch->napi, work_done);
165 ltq_dma_ack_irq(&ch->dma);
166 }
167 return work_done;
168 }
169
170 static int
ltq_etop_poll_tx(struct napi_struct * napi,int budget)171 ltq_etop_poll_tx(struct napi_struct *napi, int budget)
172 {
173 struct ltq_etop_chan *ch =
174 container_of(napi, struct ltq_etop_chan, napi);
175 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
176 struct netdev_queue *txq =
177 netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
178 unsigned long flags;
179
180 spin_lock_irqsave(&priv->lock, flags);
181 while ((ch->dma.desc_base[ch->tx_free].ctl &
182 (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
183 dev_kfree_skb_any(ch->skb[ch->tx_free]);
184 ch->skb[ch->tx_free] = NULL;
185 memset(&ch->dma.desc_base[ch->tx_free], 0,
186 sizeof(struct ltq_dma_desc));
187 ch->tx_free++;
188 ch->tx_free %= LTQ_DESC_NUM;
189 }
190 spin_unlock_irqrestore(&priv->lock, flags);
191
192 if (netif_tx_queue_stopped(txq))
193 netif_tx_start_queue(txq);
194 napi_complete(&ch->napi);
195 ltq_dma_ack_irq(&ch->dma);
196 return 1;
197 }
198
199 static irqreturn_t
ltq_etop_dma_irq(int irq,void * _priv)200 ltq_etop_dma_irq(int irq, void *_priv)
201 {
202 struct ltq_etop_priv *priv = _priv;
203 int ch = irq - LTQ_DMA_CH0_INT;
204
205 napi_schedule(&priv->ch[ch].napi);
206 return IRQ_HANDLED;
207 }
208
209 static void
ltq_etop_free_channel(struct net_device * dev,struct ltq_etop_chan * ch)210 ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
211 {
212 struct ltq_etop_priv *priv = netdev_priv(dev);
213
214 ltq_dma_free(&ch->dma);
215 if (ch->dma.irq)
216 free_irq(ch->dma.irq, priv);
217 if (IS_RX(ch->idx)) {
218 struct ltq_dma_channel *dma = &ch->dma;
219
220 for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
221 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
222 }
223 }
224
225 static void
ltq_etop_hw_exit(struct net_device * dev)226 ltq_etop_hw_exit(struct net_device *dev)
227 {
228 struct ltq_etop_priv *priv = netdev_priv(dev);
229 int i;
230
231 ltq_pmu_disable(PMU_PPE);
232 for (i = 0; i < MAX_DMA_CHAN; i++)
233 if (IS_TX(i) || IS_RX(i))
234 ltq_etop_free_channel(dev, &priv->ch[i]);
235 }
236
237 static int
ltq_etop_hw_init(struct net_device * dev)238 ltq_etop_hw_init(struct net_device *dev)
239 {
240 struct ltq_etop_priv *priv = netdev_priv(dev);
241 int i;
242 int err;
243
244 ltq_pmu_enable(PMU_PPE);
245
246 switch (priv->pldata->mii_mode) {
247 case PHY_INTERFACE_MODE_RMII:
248 ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE,
249 LTQ_ETOP_CFG);
250 break;
251
252 case PHY_INTERFACE_MODE_MII:
253 ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL,
254 LTQ_ETOP_CFG);
255 break;
256
257 default:
258 netdev_err(dev, "unknown mii mode %d\n",
259 priv->pldata->mii_mode);
260 return -ENOTSUPP;
261 }
262
263 /* enable crc generation */
264 ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
265
266 ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, priv->rx_burst_len);
267
268 for (i = 0; i < MAX_DMA_CHAN; i++) {
269 int irq = LTQ_DMA_CH0_INT + i;
270 struct ltq_etop_chan *ch = &priv->ch[i];
271
272 ch->dma.nr = i;
273 ch->idx = ch->dma.nr;
274 ch->dma.dev = &priv->pdev->dev;
275
276 if (IS_TX(i)) {
277 ltq_dma_alloc_tx(&ch->dma);
278 err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
279 if (err) {
280 netdev_err(dev,
281 "Unable to get Tx DMA IRQ %d\n",
282 irq);
283 return err;
284 }
285 } else if (IS_RX(i)) {
286 ltq_dma_alloc_rx(&ch->dma);
287 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
288 ch->dma.desc++)
289 if (ltq_etop_alloc_skb(ch))
290 return -ENOMEM;
291 ch->dma.desc = 0;
292 err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
293 if (err) {
294 netdev_err(dev,
295 "Unable to get Rx DMA IRQ %d\n",
296 irq);
297 return err;
298 }
299 }
300 ch->dma.irq = irq;
301 }
302 return 0;
303 }
304
305 static void
ltq_etop_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)306 ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
307 {
308 strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
309 strscpy(info->bus_info, "internal", sizeof(info->bus_info));
310 strscpy(info->version, DRV_VERSION, sizeof(info->version));
311 }
312
313 static const struct ethtool_ops ltq_etop_ethtool_ops = {
314 .get_drvinfo = ltq_etop_get_drvinfo,
315 .nway_reset = phy_ethtool_nway_reset,
316 .get_link_ksettings = phy_ethtool_get_link_ksettings,
317 .set_link_ksettings = phy_ethtool_set_link_ksettings,
318 };
319
320 static int
ltq_etop_mdio_wr(struct mii_bus * bus,int phy_addr,int phy_reg,u16 phy_data)321 ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
322 {
323 u32 val = MDIO_REQUEST |
324 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
325 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
326 phy_data;
327
328 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
329 ;
330 ltq_etop_w32(val, LTQ_ETOP_MDIO);
331 return 0;
332 }
333
334 static int
ltq_etop_mdio_rd(struct mii_bus * bus,int phy_addr,int phy_reg)335 ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
336 {
337 u32 val = MDIO_REQUEST | MDIO_READ |
338 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
339 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
340
341 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
342 ;
343 ltq_etop_w32(val, LTQ_ETOP_MDIO);
344 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
345 ;
346 val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
347 return val;
348 }
349
350 static void
ltq_etop_mdio_link(struct net_device * dev)351 ltq_etop_mdio_link(struct net_device *dev)
352 {
353 /* nothing to do */
354 }
355
356 static int
ltq_etop_mdio_probe(struct net_device * dev)357 ltq_etop_mdio_probe(struct net_device *dev)
358 {
359 struct ltq_etop_priv *priv = netdev_priv(dev);
360 struct phy_device *phydev;
361
362 phydev = phy_find_first(priv->mii_bus);
363
364 if (!phydev) {
365 netdev_err(dev, "no PHY found\n");
366 return -ENODEV;
367 }
368
369 phydev = phy_connect(dev, phydev_name(phydev),
370 <q_etop_mdio_link, priv->pldata->mii_mode);
371
372 if (IS_ERR(phydev)) {
373 netdev_err(dev, "Could not attach to PHY\n");
374 return PTR_ERR(phydev);
375 }
376
377 phy_set_max_speed(phydev, SPEED_100);
378
379 phy_attached_info(phydev);
380
381 return 0;
382 }
383
384 static int
ltq_etop_mdio_init(struct net_device * dev)385 ltq_etop_mdio_init(struct net_device *dev)
386 {
387 struct ltq_etop_priv *priv = netdev_priv(dev);
388 int err;
389
390 priv->mii_bus = mdiobus_alloc();
391 if (!priv->mii_bus) {
392 netdev_err(dev, "failed to allocate mii bus\n");
393 err = -ENOMEM;
394 goto err_out;
395 }
396
397 priv->mii_bus->priv = dev;
398 priv->mii_bus->read = ltq_etop_mdio_rd;
399 priv->mii_bus->write = ltq_etop_mdio_wr;
400 priv->mii_bus->name = "ltq_mii";
401 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
402 priv->pdev->name, priv->pdev->id);
403 if (mdiobus_register(priv->mii_bus)) {
404 err = -ENXIO;
405 goto err_out_free_mdiobus;
406 }
407
408 if (ltq_etop_mdio_probe(dev)) {
409 err = -ENXIO;
410 goto err_out_unregister_bus;
411 }
412 return 0;
413
414 err_out_unregister_bus:
415 mdiobus_unregister(priv->mii_bus);
416 err_out_free_mdiobus:
417 mdiobus_free(priv->mii_bus);
418 err_out:
419 return err;
420 }
421
422 static void
ltq_etop_mdio_cleanup(struct net_device * dev)423 ltq_etop_mdio_cleanup(struct net_device *dev)
424 {
425 struct ltq_etop_priv *priv = netdev_priv(dev);
426
427 phy_disconnect(dev->phydev);
428 mdiobus_unregister(priv->mii_bus);
429 mdiobus_free(priv->mii_bus);
430 }
431
432 static int
ltq_etop_open(struct net_device * dev)433 ltq_etop_open(struct net_device *dev)
434 {
435 struct ltq_etop_priv *priv = netdev_priv(dev);
436 int i;
437
438 for (i = 0; i < MAX_DMA_CHAN; i++) {
439 struct ltq_etop_chan *ch = &priv->ch[i];
440
441 if (!IS_TX(i) && (!IS_RX(i)))
442 continue;
443 ltq_dma_open(&ch->dma);
444 ltq_dma_enable_irq(&ch->dma);
445 napi_enable(&ch->napi);
446 }
447 phy_start(dev->phydev);
448 netif_tx_start_all_queues(dev);
449 return 0;
450 }
451
452 static int
ltq_etop_stop(struct net_device * dev)453 ltq_etop_stop(struct net_device *dev)
454 {
455 struct ltq_etop_priv *priv = netdev_priv(dev);
456 int i;
457
458 netif_tx_stop_all_queues(dev);
459 phy_stop(dev->phydev);
460 for (i = 0; i < MAX_DMA_CHAN; i++) {
461 struct ltq_etop_chan *ch = &priv->ch[i];
462
463 if (!IS_RX(i) && !IS_TX(i))
464 continue;
465 napi_disable(&ch->napi);
466 ltq_dma_close(&ch->dma);
467 }
468 return 0;
469 }
470
471 static netdev_tx_t
ltq_etop_tx(struct sk_buff * skb,struct net_device * dev)472 ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
473 {
474 int queue = skb_get_queue_mapping(skb);
475 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
476 struct ltq_etop_priv *priv = netdev_priv(dev);
477 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
478 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
479 int len;
480 unsigned long flags;
481 u32 byte_offset;
482
483 if (skb_put_padto(skb, ETH_ZLEN))
484 return NETDEV_TX_OK;
485 len = skb->len;
486
487 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
488 netdev_err(dev, "tx ring full\n");
489 netif_tx_stop_queue(txq);
490 return NETDEV_TX_BUSY;
491 }
492
493 /* dma needs to start on a burst length value aligned address */
494 byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
495 ch->skb[ch->dma.desc] = skb;
496
497 netif_trans_update(dev);
498
499 spin_lock_irqsave(&priv->lock, flags);
500 desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len,
501 DMA_TO_DEVICE)) - byte_offset;
502 /* Make sure the address is written before we give it to HW */
503 wmb();
504 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
505 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
506 ch->dma.desc++;
507 ch->dma.desc %= LTQ_DESC_NUM;
508 spin_unlock_irqrestore(&priv->lock, flags);
509
510 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
511 netif_tx_stop_queue(txq);
512
513 return NETDEV_TX_OK;
514 }
515
516 static int
ltq_etop_change_mtu(struct net_device * dev,int new_mtu)517 ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
518 {
519 struct ltq_etop_priv *priv = netdev_priv(dev);
520 unsigned long flags;
521
522 WRITE_ONCE(dev->mtu, new_mtu);
523
524 spin_lock_irqsave(&priv->lock, flags);
525 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
526 spin_unlock_irqrestore(&priv->lock, flags);
527
528 return 0;
529 }
530
531 static int
ltq_etop_set_mac_address(struct net_device * dev,void * p)532 ltq_etop_set_mac_address(struct net_device *dev, void *p)
533 {
534 int ret = eth_mac_addr(dev, p);
535
536 if (!ret) {
537 struct ltq_etop_priv *priv = netdev_priv(dev);
538 unsigned long flags;
539
540 /* store the mac for the unicast filter */
541 spin_lock_irqsave(&priv->lock, flags);
542 ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
543 ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
544 LTQ_ETOP_MAC_DA1);
545 spin_unlock_irqrestore(&priv->lock, flags);
546 }
547 return ret;
548 }
549
550 static void
ltq_etop_set_multicast_list(struct net_device * dev)551 ltq_etop_set_multicast_list(struct net_device *dev)
552 {
553 struct ltq_etop_priv *priv = netdev_priv(dev);
554 unsigned long flags;
555
556 /* ensure that the unicast filter is not enabled in promiscious mode */
557 spin_lock_irqsave(&priv->lock, flags);
558 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
559 ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
560 else
561 ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
562 spin_unlock_irqrestore(&priv->lock, flags);
563 }
564
565 static int
ltq_etop_init(struct net_device * dev)566 ltq_etop_init(struct net_device *dev)
567 {
568 struct ltq_etop_priv *priv = netdev_priv(dev);
569 struct sockaddr mac;
570 int err;
571 bool random_mac = false;
572
573 dev->watchdog_timeo = 10 * HZ;
574 err = ltq_etop_hw_init(dev);
575 if (err)
576 goto err_hw;
577 ltq_etop_change_mtu(dev, 1500);
578
579 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
580 if (!is_valid_ether_addr(mac.sa_data)) {
581 pr_warn("etop: invalid MAC, using random\n");
582 eth_random_addr(mac.sa_data);
583 random_mac = true;
584 }
585
586 err = ltq_etop_set_mac_address(dev, &mac);
587 if (err)
588 goto err_netdev;
589
590 /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
591 if (random_mac)
592 dev->addr_assign_type = NET_ADDR_RANDOM;
593
594 ltq_etop_set_multicast_list(dev);
595 err = ltq_etop_mdio_init(dev);
596 if (err)
597 goto err_netdev;
598 return 0;
599
600 err_netdev:
601 unregister_netdev(dev);
602 free_netdev(dev);
603 err_hw:
604 ltq_etop_hw_exit(dev);
605 return err;
606 }
607
608 static void
ltq_etop_tx_timeout(struct net_device * dev,unsigned int txqueue)609 ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
610 {
611 int err;
612
613 ltq_etop_hw_exit(dev);
614 err = ltq_etop_hw_init(dev);
615 if (err)
616 goto err_hw;
617 netif_trans_update(dev);
618 netif_wake_queue(dev);
619 return;
620
621 err_hw:
622 ltq_etop_hw_exit(dev);
623 netdev_err(dev, "failed to restart etop after TX timeout\n");
624 }
625
626 static const struct net_device_ops ltq_eth_netdev_ops = {
627 .ndo_open = ltq_etop_open,
628 .ndo_stop = ltq_etop_stop,
629 .ndo_start_xmit = ltq_etop_tx,
630 .ndo_change_mtu = ltq_etop_change_mtu,
631 .ndo_eth_ioctl = phy_do_ioctl,
632 .ndo_set_mac_address = ltq_etop_set_mac_address,
633 .ndo_validate_addr = eth_validate_addr,
634 .ndo_set_rx_mode = ltq_etop_set_multicast_list,
635 .ndo_select_queue = dev_pick_tx_zero,
636 .ndo_init = ltq_etop_init,
637 .ndo_tx_timeout = ltq_etop_tx_timeout,
638 };
639
640 static int __init
ltq_etop_probe(struct platform_device * pdev)641 ltq_etop_probe(struct platform_device *pdev)
642 {
643 struct net_device *dev;
644 struct ltq_etop_priv *priv;
645 int err;
646 int i;
647
648 ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
649 if (IS_ERR(ltq_etop_membase)) {
650 dev_err(&pdev->dev, "failed to remap etop engine %d\n",
651 pdev->id);
652 err = PTR_ERR(ltq_etop_membase);
653 goto err_out;
654 }
655
656 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
657 if (!dev) {
658 err = -ENOMEM;
659 goto err_out;
660 }
661 dev->netdev_ops = <q_eth_netdev_ops;
662 dev->ethtool_ops = <q_etop_ethtool_ops;
663 priv = netdev_priv(dev);
664 priv->pdev = pdev;
665 priv->pldata = dev_get_platdata(&pdev->dev);
666 priv->netdev = dev;
667 spin_lock_init(&priv->lock);
668 SET_NETDEV_DEV(dev, &pdev->dev);
669
670 err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
671 if (err < 0) {
672 dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
673 goto err_free;
674 }
675
676 err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
677 if (err < 0) {
678 dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
679 goto err_free;
680 }
681
682 for (i = 0; i < MAX_DMA_CHAN; i++) {
683 if (IS_TX(i))
684 netif_napi_add_weight(dev, &priv->ch[i].napi,
685 ltq_etop_poll_tx, 8);
686 else if (IS_RX(i))
687 netif_napi_add_weight(dev, &priv->ch[i].napi,
688 ltq_etop_poll_rx, 32);
689 priv->ch[i].netdev = dev;
690 }
691
692 err = register_netdev(dev);
693 if (err)
694 goto err_free;
695
696 platform_set_drvdata(pdev, dev);
697 return 0;
698
699 err_free:
700 free_netdev(dev);
701 err_out:
702 return err;
703 }
704
ltq_etop_remove(struct platform_device * pdev)705 static void ltq_etop_remove(struct platform_device *pdev)
706 {
707 struct net_device *dev = platform_get_drvdata(pdev);
708
709 if (dev) {
710 netif_tx_stop_all_queues(dev);
711 ltq_etop_hw_exit(dev);
712 ltq_etop_mdio_cleanup(dev);
713 unregister_netdev(dev);
714 }
715 }
716
717 static struct platform_driver ltq_mii_driver = {
718 .remove = ltq_etop_remove,
719 .driver = {
720 .name = "ltq_etop",
721 },
722 };
723
724 static int __init
init_ltq_etop(void)725 init_ltq_etop(void)
726 {
727 int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe);
728
729 if (ret)
730 pr_err("ltq_etop: Error registering platform driver!");
731 return ret;
732 }
733
734 static void __exit
exit_ltq_etop(void)735 exit_ltq_etop(void)
736 {
737 platform_driver_unregister(<q_mii_driver);
738 }
739
740 module_init(init_ltq_etop);
741 module_exit(exit_ltq_etop);
742
743 MODULE_AUTHOR("John Crispin <[email protected]>");
744 MODULE_DESCRIPTION("Lantiq SoC ETOP");
745 MODULE_LICENSE("GPL");
746