1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <linux/pci.h>
8 #include "hbg_common.h"
9 #include "hbg_err.h"
10 #include "hbg_ethtool.h"
11 #include "hbg_hw.h"
12 #include "hbg_irq.h"
13 #include "hbg_mdio.h"
14 #include "hbg_txrx.h"
15 #include "hbg_debugfs.h"
16
hbg_all_irq_enable(struct hbg_priv * priv,bool enabled)17 static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
18 {
19 struct hbg_irq_info *info;
20 u32 i;
21
22 for (i = 0; i < priv->vectors.info_array_len; i++) {
23 info = &priv->vectors.info_array[i];
24 hbg_hw_irq_enable(priv, info->mask, enabled);
25 }
26 }
27
hbg_net_open(struct net_device * netdev)28 static int hbg_net_open(struct net_device *netdev)
29 {
30 struct hbg_priv *priv = netdev_priv(netdev);
31 int ret;
32
33 ret = hbg_txrx_init(priv);
34 if (ret)
35 return ret;
36
37 hbg_all_irq_enable(priv, true);
38 hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
39 netif_start_queue(netdev);
40 hbg_phy_start(priv);
41
42 return 0;
43 }
44
45 /* This function only can be called after hbg_txrx_uninit() */
hbg_hw_txrx_clear(struct hbg_priv * priv)46 static int hbg_hw_txrx_clear(struct hbg_priv *priv)
47 {
48 int ret;
49
50 /* After ring buffers have been released,
51 * do a reset to release hw fifo rx ring buffer
52 */
53 ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
54 if (ret)
55 return ret;
56
57 /* After reset, regs need to be reconfigured */
58 return hbg_rebuild(priv);
59 }
60
hbg_net_stop(struct net_device * netdev)61 static int hbg_net_stop(struct net_device *netdev)
62 {
63 struct hbg_priv *priv = netdev_priv(netdev);
64
65 hbg_phy_stop(priv);
66 netif_stop_queue(netdev);
67 hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
68 hbg_all_irq_enable(priv, false);
69 hbg_txrx_uninit(priv);
70 return hbg_hw_txrx_clear(priv);
71 }
72
hbg_update_promisc_mode(struct net_device * netdev,bool overflow)73 static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
74 {
75 struct hbg_priv *priv = netdev_priv(netdev);
76
77 /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
78 * The MAC filter will be enabled.
79 * Otherwise the filter will be disabled.
80 */
81 priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
82 hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
83 }
84
hbg_set_mac_to_mac_table(struct hbg_priv * priv,u32 index,const u8 * addr)85 static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
86 u32 index, const u8 *addr)
87 {
88 if (addr) {
89 ether_addr_copy(priv->filter.mac_table[index].addr, addr);
90 hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
91 } else {
92 eth_zero_addr(priv->filter.mac_table[index].addr);
93 hbg_hw_set_uc_addr(priv, 0, index);
94 }
95 }
96
hbg_get_index_from_mac_table(struct hbg_priv * priv,const u8 * addr,u32 * index)97 static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
98 const u8 *addr, u32 *index)
99 {
100 u32 i;
101
102 for (i = 0; i < priv->filter.table_max_len; i++)
103 if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
104 *index = i;
105 return 0;
106 }
107
108 return -EINVAL;
109 }
110
hbg_add_mac_to_filter(struct hbg_priv * priv,const u8 * addr)111 static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
112 {
113 u32 index;
114
115 /* already exists */
116 if (!hbg_get_index_from_mac_table(priv, addr, &index))
117 return 0;
118
119 for (index = 0; index < priv->filter.table_max_len; index++)
120 if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
121 hbg_set_mac_to_mac_table(priv, index, addr);
122 return 0;
123 }
124
125 return -ENOSPC;
126 }
127
hbg_del_mac_from_filter(struct hbg_priv * priv,const u8 * addr)128 static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
129 {
130 u32 index;
131
132 /* not exists */
133 if (hbg_get_index_from_mac_table(priv, addr, &index))
134 return;
135
136 hbg_set_mac_to_mac_table(priv, index, NULL);
137 }
138
hbg_uc_sync(struct net_device * netdev,const unsigned char * addr)139 static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
140 {
141 struct hbg_priv *priv = netdev_priv(netdev);
142
143 return hbg_add_mac_to_filter(priv, addr);
144 }
145
hbg_uc_unsync(struct net_device * netdev,const unsigned char * addr)146 static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
147 {
148 struct hbg_priv *priv = netdev_priv(netdev);
149
150 if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
151 return 0;
152
153 hbg_del_mac_from_filter(priv, addr);
154 return 0;
155 }
156
hbg_net_set_rx_mode(struct net_device * netdev)157 static void hbg_net_set_rx_mode(struct net_device *netdev)
158 {
159 int ret;
160
161 ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
162
163 /* If ret != 0, overflow has occurred */
164 hbg_update_promisc_mode(netdev, !!ret);
165 }
166
hbg_net_set_mac_address(struct net_device * netdev,void * addr)167 static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
168 {
169 struct hbg_priv *priv = netdev_priv(netdev);
170 u8 *mac_addr;
171 bool exists;
172 u32 index;
173
174 mac_addr = ((struct sockaddr *)addr)->sa_data;
175
176 if (!is_valid_ether_addr(mac_addr))
177 return -EADDRNOTAVAIL;
178
179 /* The index of host mac is always 0.
180 * If new mac address already exists,
181 * delete the existing mac address and
182 * add it to the position with index 0.
183 */
184 exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
185 hbg_set_mac_to_mac_table(priv, 0, mac_addr);
186 if (exists)
187 hbg_set_mac_to_mac_table(priv, index, NULL);
188
189 hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
190 dev_addr_set(netdev, mac_addr);
191 return 0;
192 }
193
hbg_net_change_mtu(struct net_device * netdev,int new_mtu)194 static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
195 {
196 struct hbg_priv *priv = netdev_priv(netdev);
197
198 if (netif_running(netdev))
199 return -EBUSY;
200
201 dev_dbg(&priv->pdev->dev,
202 "change mtu from %u to %u\n", netdev->mtu, new_mtu);
203
204 hbg_hw_set_mtu(priv, new_mtu);
205 WRITE_ONCE(netdev->mtu, new_mtu);
206
207 return 0;
208 }
209
hbg_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)210 static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
211 {
212 struct hbg_priv *priv = netdev_priv(netdev);
213 struct hbg_ring *ring = &priv->tx_ring;
214 char *buf = ring->tout_log_buf;
215 u32 pos = 0;
216
217 pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
218 "ring used num: %u, fifo used num: %u\n",
219 hbg_get_queue_used_num(ring),
220 hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX));
221 pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
222 "ntc: %u, ntu: %u, irq enabled: %u\n",
223 ring->ntc, ring->ntu,
224 hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B));
225
226 netdev_info(netdev, "%s", buf);
227 }
228
229 static const struct net_device_ops hbg_netdev_ops = {
230 .ndo_open = hbg_net_open,
231 .ndo_stop = hbg_net_stop,
232 .ndo_start_xmit = hbg_net_start_xmit,
233 .ndo_validate_addr = eth_validate_addr,
234 .ndo_set_mac_address = hbg_net_set_mac_address,
235 .ndo_change_mtu = hbg_net_change_mtu,
236 .ndo_tx_timeout = hbg_net_tx_timeout,
237 .ndo_set_rx_mode = hbg_net_set_rx_mode,
238 };
239
hbg_mac_filter_init(struct hbg_priv * priv)240 static int hbg_mac_filter_init(struct hbg_priv *priv)
241 {
242 struct hbg_dev_specs *dev_specs = &priv->dev_specs;
243 struct hbg_mac_filter *filter = &priv->filter;
244 struct hbg_mac_table_entry *tmp_table;
245
246 tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
247 sizeof(*tmp_table), GFP_KERNEL);
248 if (!tmp_table)
249 return -ENOMEM;
250
251 filter->mac_table = tmp_table;
252 filter->table_max_len = dev_specs->uc_mac_num;
253 filter->enabled = true;
254
255 hbg_hw_set_mac_filter_enable(priv, filter->enabled);
256 return 0;
257 }
258
hbg_init_user_def(struct hbg_priv * priv)259 static void hbg_init_user_def(struct hbg_priv *priv)
260 {
261 struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
262
263 priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
264
265 pause_param->autoneg = priv->mac.pause_autoneg;
266 hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
267 &pause_param->rx_pause);
268 }
269
hbg_init(struct hbg_priv * priv)270 static int hbg_init(struct hbg_priv *priv)
271 {
272 int ret;
273
274 ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT);
275 if (ret)
276 return ret;
277
278 ret = hbg_hw_init(priv);
279 if (ret)
280 return ret;
281
282 ret = hbg_irq_init(priv);
283 if (ret)
284 return ret;
285
286 ret = hbg_mdio_init(priv);
287 if (ret)
288 return ret;
289
290 ret = hbg_mac_filter_init(priv);
291 if (ret)
292 return ret;
293
294 hbg_debugfs_init(priv);
295 hbg_init_user_def(priv);
296 return 0;
297 }
298
hbg_pci_init(struct pci_dev * pdev)299 static int hbg_pci_init(struct pci_dev *pdev)
300 {
301 struct net_device *netdev = pci_get_drvdata(pdev);
302 struct hbg_priv *priv = netdev_priv(netdev);
303 struct device *dev = &pdev->dev;
304 int ret;
305
306 ret = pcim_enable_device(pdev);
307 if (ret)
308 return dev_err_probe(dev, ret, "failed to enable PCI device\n");
309
310 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
311 if (ret)
312 return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n");
313
314 ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev));
315 if (ret)
316 return dev_err_probe(dev, ret, "failed to map PCI bar space\n");
317
318 priv->io_base = pcim_iomap_table(pdev)[0];
319 if (!priv->io_base)
320 return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
321
322 pci_set_master(pdev);
323 return 0;
324 }
325
hbg_probe(struct pci_dev * pdev,const struct pci_device_id * ent)326 static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
327 {
328 struct device *dev = &pdev->dev;
329 struct net_device *netdev;
330 struct hbg_priv *priv;
331 int ret;
332
333 netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv));
334 if (!netdev)
335 return -ENOMEM;
336
337 pci_set_drvdata(pdev, netdev);
338 SET_NETDEV_DEV(netdev, dev);
339
340 priv = netdev_priv(netdev);
341 priv->netdev = netdev;
342 priv->pdev = pdev;
343
344 ret = hbg_pci_init(pdev);
345 if (ret)
346 return ret;
347
348 ret = hbg_init(priv);
349 if (ret)
350 return ret;
351
352 netdev->priv_flags |= IFF_UNICAST_FLT;
353
354 netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
355 netdev->max_mtu = priv->dev_specs.max_mtu;
356 netdev->min_mtu = priv->dev_specs.min_mtu;
357 netdev->netdev_ops = &hbg_netdev_ops;
358 netdev->watchdog_timeo = 5 * HZ;
359
360 hbg_hw_set_mtu(priv, ETH_DATA_LEN);
361 hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
362 hbg_ethtool_set_ops(netdev);
363
364 ret = devm_register_netdev(dev, netdev);
365 if (ret)
366 return dev_err_probe(dev, ret, "failed to register netdev\n");
367
368 netif_carrier_off(netdev);
369 return 0;
370 }
371
372 static const struct pci_device_id hbg_pci_tbl[] = {
373 {PCI_VDEVICE(HUAWEI, 0x3730), 0},
374 { }
375 };
376 MODULE_DEVICE_TABLE(pci, hbg_pci_tbl);
377
378 static struct pci_driver hbg_driver = {
379 .name = "hibmcge",
380 .id_table = hbg_pci_tbl,
381 .probe = hbg_probe,
382 };
383
hbg_module_init(void)384 static int __init hbg_module_init(void)
385 {
386 int ret;
387
388 hbg_debugfs_register();
389 hbg_set_pci_err_handler(&hbg_driver);
390 ret = pci_register_driver(&hbg_driver);
391 if (ret)
392 hbg_debugfs_unregister();
393
394 return ret;
395 }
396 module_init(hbg_module_init);
397
hbg_module_exit(void)398 static void __exit hbg_module_exit(void)
399 {
400 pci_unregister_driver(&hbg_driver);
401 hbg_debugfs_unregister();
402 }
403 module_exit(hbg_module_exit);
404
405 MODULE_LICENSE("GPL");
406 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
407 MODULE_DESCRIPTION("hibmcge driver");
408 MODULE_VERSION("1.0");
409