1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_coredump.h"
73 #include "bnxt_hwmon.h"
74
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 /* indexed by enum board_idx */
88 static const struct {
89 char *name;
90 } board_info[] = {
91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
140 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
141 };
142
143 static const struct pci_device_id bnxt_pci_tbl[] = {
144 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
145 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
146 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
147 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
148 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
149 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
150 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
151 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
153 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
154 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
155 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
156 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
157 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
158 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
159 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
160 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
161 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
162 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
163 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
164 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
166 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
167 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
168 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
171 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
178 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
179 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
180 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
181 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
182 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
183 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
184 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
185 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
186 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
194 #ifdef CONFIG_BNXT_SRIOV
195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
196 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
197 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
198 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
200 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
211 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
212 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
213 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
214 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 { 0 }
219 };
220
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223 static const u16 bnxt_vf_req_snif[] = {
224 HWRM_FUNC_CFG,
225 HWRM_FUNC_VF_CFG,
226 HWRM_PORT_PHY_QCFG,
227 HWRM_CFA_L2_FILTER_ALLOC,
228 };
229
230 static const u16 bnxt_async_events_arr[] = {
231 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
233 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
234 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
235 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
237 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
239 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
240 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
241 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
242 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
243 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
244 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
246 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
247 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
248 };
249
250 const u16 bnxt_bstore_to_trace[] = {
251 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
252 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
253 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
254 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
255 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
256 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
257 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
258 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
259 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
260 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
261 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
262 };
263
264 static struct workqueue_struct *bnxt_pf_wq;
265
266 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
267 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
268 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
269
270 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
271 .ports = {
272 .src = 0,
273 .dst = 0,
274 },
275 .addrs = {
276 .v6addrs = {
277 .src = BNXT_IPV6_MASK_NONE,
278 .dst = BNXT_IPV6_MASK_NONE,
279 },
280 },
281 };
282
283 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
284 .ports = {
285 .src = cpu_to_be16(0xffff),
286 .dst = cpu_to_be16(0xffff),
287 },
288 .addrs = {
289 .v6addrs = {
290 .src = BNXT_IPV6_MASK_ALL,
291 .dst = BNXT_IPV6_MASK_ALL,
292 },
293 },
294 };
295
296 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
297 .ports = {
298 .src = cpu_to_be16(0xffff),
299 .dst = cpu_to_be16(0xffff),
300 },
301 .addrs = {
302 .v4addrs = {
303 .src = cpu_to_be32(0xffffffff),
304 .dst = cpu_to_be32(0xffffffff),
305 },
306 },
307 };
308
bnxt_vf_pciid(enum board_idx idx)309 static bool bnxt_vf_pciid(enum board_idx idx)
310 {
311 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
312 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
313 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
314 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
315 }
316
317 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
318 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
319
320 #define BNXT_DB_CQ(db, idx) \
321 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
322
323 #define BNXT_DB_NQ_P5(db, idx) \
324 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
325 (db)->doorbell)
326
327 #define BNXT_DB_NQ_P7(db, idx) \
328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
329 DB_RING_IDX(db, idx), (db)->doorbell)
330
331 #define BNXT_DB_CQ_ARM(db, idx) \
332 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
333
334 #define BNXT_DB_NQ_ARM_P5(db, idx) \
335 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
336 DB_RING_IDX(db, idx), (db)->doorbell)
337
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)338 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
339 {
340 if (bp->flags & BNXT_FLAG_CHIP_P7)
341 BNXT_DB_NQ_P7(db, idx);
342 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
343 BNXT_DB_NQ_P5(db, idx);
344 else
345 BNXT_DB_CQ(db, idx);
346 }
347
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)348 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
349 {
350 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
351 BNXT_DB_NQ_ARM_P5(db, idx);
352 else
353 BNXT_DB_CQ_ARM(db, idx);
354 }
355
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)356 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
357 {
358 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
359 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
360 DB_RING_IDX(db, idx), db->doorbell);
361 else
362 BNXT_DB_CQ(db, idx);
363 }
364
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)365 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
366 {
367 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
368 return;
369
370 if (BNXT_PF(bp))
371 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
372 else
373 schedule_delayed_work(&bp->fw_reset_task, delay);
374 }
375
__bnxt_queue_sp_work(struct bnxt * bp)376 static void __bnxt_queue_sp_work(struct bnxt *bp)
377 {
378 if (BNXT_PF(bp))
379 queue_work(bnxt_pf_wq, &bp->sp_task);
380 else
381 schedule_work(&bp->sp_task);
382 }
383
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)384 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
385 {
386 set_bit(event, &bp->sp_event);
387 __bnxt_queue_sp_work(bp);
388 }
389
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)390 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
391 {
392 if (!rxr->bnapi->in_reset) {
393 rxr->bnapi->in_reset = true;
394 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
395 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
396 else
397 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
398 __bnxt_queue_sp_work(bp);
399 }
400 rxr->rx_next_cons = 0xffff;
401 }
402
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)403 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
404 u16 curr)
405 {
406 struct bnxt_napi *bnapi = txr->bnapi;
407
408 if (bnapi->tx_fault)
409 return;
410
411 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
412 txr->txq_index, txr->tx_hw_cons,
413 txr->tx_cons, txr->tx_prod, curr);
414 WARN_ON_ONCE(1);
415 bnapi->tx_fault = 1;
416 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
417 }
418
419 const u16 bnxt_lhint_arr[] = {
420 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
421 TX_BD_FLAGS_LHINT_512_TO_1023,
422 TX_BD_FLAGS_LHINT_1024_TO_2047,
423 TX_BD_FLAGS_LHINT_1024_TO_2047,
424 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
426 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
427 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
428 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
429 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
430 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
431 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
432 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 };
440
bnxt_xmit_get_cfa_action(struct sk_buff * skb)441 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
442 {
443 struct metadata_dst *md_dst = skb_metadata_dst(skb);
444
445 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
446 return 0;
447
448 return md_dst->u.port_info.port_id;
449 }
450
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)451 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
452 u16 prod)
453 {
454 /* Sync BD data before updating doorbell */
455 wmb();
456 bnxt_db_write(bp, &txr->tx_db, prod);
457 txr->kick_pending = 0;
458 }
459
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)460 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
461 {
462 struct bnxt *bp = netdev_priv(dev);
463 struct tx_bd *txbd, *txbd0;
464 struct tx_bd_ext *txbd1;
465 struct netdev_queue *txq;
466 int i;
467 dma_addr_t mapping;
468 unsigned int length, pad = 0;
469 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
470 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
471 struct pci_dev *pdev = bp->pdev;
472 u16 prod, last_frag, txts_prod;
473 struct bnxt_tx_ring_info *txr;
474 struct bnxt_sw_tx_bd *tx_buf;
475 __le32 lflags = 0;
476
477 i = skb_get_queue_mapping(skb);
478 if (unlikely(i >= bp->tx_nr_rings)) {
479 dev_kfree_skb_any(skb);
480 dev_core_stats_tx_dropped_inc(dev);
481 return NETDEV_TX_OK;
482 }
483
484 txq = netdev_get_tx_queue(dev, i);
485 txr = &bp->tx_ring[bp->tx_ring_map[i]];
486 prod = txr->tx_prod;
487
488 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
489 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
490 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
491 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
492 if (skb_linearize(skb)) {
493 dev_kfree_skb_any(skb);
494 dev_core_stats_tx_dropped_inc(dev);
495 return NETDEV_TX_OK;
496 }
497 }
498 #endif
499 free_size = bnxt_tx_avail(bp, txr);
500 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
501 /* We must have raced with NAPI cleanup */
502 if (net_ratelimit() && txr->kick_pending)
503 netif_warn(bp, tx_err, dev,
504 "bnxt: ring busy w/ flush pending!\n");
505 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
506 bp->tx_wake_thresh))
507 return NETDEV_TX_BUSY;
508 }
509
510 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
511 goto tx_free;
512
513 length = skb->len;
514 len = skb_headlen(skb);
515 last_frag = skb_shinfo(skb)->nr_frags;
516
517 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
518
519 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
520 tx_buf->skb = skb;
521 tx_buf->nr_frags = last_frag;
522
523 vlan_tag_flags = 0;
524 cfa_action = bnxt_xmit_get_cfa_action(skb);
525 if (skb_vlan_tag_present(skb)) {
526 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
527 skb_vlan_tag_get(skb);
528 /* Currently supports 8021Q, 8021AD vlan offloads
529 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
530 */
531 if (skb->vlan_proto == htons(ETH_P_8021Q))
532 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
533 }
534
535 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
536 ptp->tx_tstamp_en) {
537 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
538 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
539 tx_buf->is_ts_pkt = 1;
540 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
541 } else if (!skb_is_gso(skb)) {
542 u16 seq_id, hdr_off;
543
544 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
545 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
546 if (vlan_tag_flags)
547 hdr_off += VLAN_HLEN;
548 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
549 tx_buf->is_ts_pkt = 1;
550 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
551
552 ptp->txts_req[txts_prod].tx_seqid = seq_id;
553 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
554 tx_buf->txts_prod = txts_prod;
555 }
556 }
557 }
558 if (unlikely(skb->no_fcs))
559 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
560
561 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
562 !lflags) {
563 struct tx_push_buffer *tx_push_buf = txr->tx_push;
564 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
565 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
566 void __iomem *db = txr->tx_db.doorbell;
567 void *pdata = tx_push_buf->data;
568 u64 *end;
569 int j, push_len;
570
571 /* Set COAL_NOW to be ready quickly for the next push */
572 tx_push->tx_bd_len_flags_type =
573 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
574 TX_BD_TYPE_LONG_TX_BD |
575 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
576 TX_BD_FLAGS_COAL_NOW |
577 TX_BD_FLAGS_PACKET_END |
578 TX_BD_CNT(2));
579
580 if (skb->ip_summed == CHECKSUM_PARTIAL)
581 tx_push1->tx_bd_hsize_lflags =
582 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
583 else
584 tx_push1->tx_bd_hsize_lflags = 0;
585
586 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
587 tx_push1->tx_bd_cfa_action =
588 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
589
590 end = pdata + length;
591 end = PTR_ALIGN(end, 8) - 1;
592 *end = 0;
593
594 skb_copy_from_linear_data(skb, pdata, len);
595 pdata += len;
596 for (j = 0; j < last_frag; j++) {
597 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
598 void *fptr;
599
600 fptr = skb_frag_address_safe(frag);
601 if (!fptr)
602 goto normal_tx;
603
604 memcpy(pdata, fptr, skb_frag_size(frag));
605 pdata += skb_frag_size(frag);
606 }
607
608 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
609 txbd->tx_bd_haddr = txr->data_mapping;
610 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
611 prod = NEXT_TX(prod);
612 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
613 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
614 memcpy(txbd, tx_push1, sizeof(*txbd));
615 prod = NEXT_TX(prod);
616 tx_push->doorbell =
617 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
618 DB_RING_IDX(&txr->tx_db, prod));
619 WRITE_ONCE(txr->tx_prod, prod);
620
621 tx_buf->is_push = 1;
622 netdev_tx_sent_queue(txq, skb->len);
623 wmb(); /* Sync is_push and byte queue before pushing data */
624
625 push_len = (length + sizeof(*tx_push) + 7) / 8;
626 if (push_len > 16) {
627 __iowrite64_copy(db, tx_push_buf, 16);
628 __iowrite32_copy(db + 4, tx_push_buf + 1,
629 (push_len - 16) << 1);
630 } else {
631 __iowrite64_copy(db, tx_push_buf, push_len);
632 }
633
634 goto tx_done;
635 }
636
637 normal_tx:
638 if (length < BNXT_MIN_PKT_SIZE) {
639 pad = BNXT_MIN_PKT_SIZE - length;
640 if (skb_pad(skb, pad))
641 /* SKB already freed. */
642 goto tx_kick_pending;
643 length = BNXT_MIN_PKT_SIZE;
644 }
645
646 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
647
648 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
649 goto tx_free;
650
651 dma_unmap_addr_set(tx_buf, mapping, mapping);
652 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
653 TX_BD_CNT(last_frag + 2);
654
655 txbd->tx_bd_haddr = cpu_to_le64(mapping);
656 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
657
658 prod = NEXT_TX(prod);
659 txbd1 = (struct tx_bd_ext *)
660 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
661
662 txbd1->tx_bd_hsize_lflags = lflags;
663 if (skb_is_gso(skb)) {
664 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
665 u32 hdr_len;
666
667 if (skb->encapsulation) {
668 if (udp_gso)
669 hdr_len = skb_inner_transport_offset(skb) +
670 sizeof(struct udphdr);
671 else
672 hdr_len = skb_inner_tcp_all_headers(skb);
673 } else if (udp_gso) {
674 hdr_len = skb_transport_offset(skb) +
675 sizeof(struct udphdr);
676 } else {
677 hdr_len = skb_tcp_all_headers(skb);
678 }
679
680 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
681 TX_BD_FLAGS_T_IPID |
682 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
683 length = skb_shinfo(skb)->gso_size;
684 txbd1->tx_bd_mss = cpu_to_le32(length);
685 length += hdr_len;
686 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
687 txbd1->tx_bd_hsize_lflags |=
688 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
689 txbd1->tx_bd_mss = 0;
690 }
691
692 length >>= 9;
693 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
694 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
695 skb->len);
696 i = 0;
697 goto tx_dma_error;
698 }
699 flags |= bnxt_lhint_arr[length];
700 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
701
702 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
703 txbd1->tx_bd_cfa_action =
704 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
705 txbd0 = txbd;
706 for (i = 0; i < last_frag; i++) {
707 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
708
709 prod = NEXT_TX(prod);
710 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
711
712 len = skb_frag_size(frag);
713 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
714 DMA_TO_DEVICE);
715
716 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
717 goto tx_dma_error;
718
719 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
720 dma_unmap_addr_set(tx_buf, mapping, mapping);
721
722 txbd->tx_bd_haddr = cpu_to_le64(mapping);
723
724 flags = len << TX_BD_LEN_SHIFT;
725 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
726 }
727
728 flags &= ~TX_BD_LEN;
729 txbd->tx_bd_len_flags_type =
730 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
731 TX_BD_FLAGS_PACKET_END);
732
733 netdev_tx_sent_queue(txq, skb->len);
734
735 skb_tx_timestamp(skb);
736
737 prod = NEXT_TX(prod);
738 WRITE_ONCE(txr->tx_prod, prod);
739
740 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
741 bnxt_txr_db_kick(bp, txr, prod);
742 } else {
743 if (free_size >= bp->tx_wake_thresh)
744 txbd0->tx_bd_len_flags_type |=
745 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
746 txr->kick_pending = 1;
747 }
748
749 tx_done:
750
751 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
752 if (netdev_xmit_more() && !tx_buf->is_push) {
753 txbd0->tx_bd_len_flags_type &=
754 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
755 bnxt_txr_db_kick(bp, txr, prod);
756 }
757
758 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
759 bp->tx_wake_thresh);
760 }
761 return NETDEV_TX_OK;
762
763 tx_dma_error:
764 last_frag = i;
765
766 /* start back at beginning and unmap skb */
767 prod = txr->tx_prod;
768 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
769 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
770 skb_headlen(skb), DMA_TO_DEVICE);
771 prod = NEXT_TX(prod);
772
773 /* unmap remaining mapped pages */
774 for (i = 0; i < last_frag; i++) {
775 prod = NEXT_TX(prod);
776 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
777 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
778 skb_frag_size(&skb_shinfo(skb)->frags[i]),
779 DMA_TO_DEVICE);
780 }
781
782 tx_free:
783 dev_kfree_skb_any(skb);
784 tx_kick_pending:
785 if (BNXT_TX_PTP_IS_SET(lflags)) {
786 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
787 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
788 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
789 /* set SKB to err so PTP worker will clean up */
790 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
791 }
792 if (txr->kick_pending)
793 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
794 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
795 dev_core_stats_tx_dropped_inc(dev);
796 return NETDEV_TX_OK;
797 }
798
799 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)800 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
801 int budget)
802 {
803 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
804 struct pci_dev *pdev = bp->pdev;
805 u16 hw_cons = txr->tx_hw_cons;
806 unsigned int tx_bytes = 0;
807 u16 cons = txr->tx_cons;
808 int tx_pkts = 0;
809 bool rc = false;
810
811 while (RING_TX(bp, cons) != hw_cons) {
812 struct bnxt_sw_tx_bd *tx_buf;
813 struct sk_buff *skb;
814 bool is_ts_pkt;
815 int j, last;
816
817 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
818 skb = tx_buf->skb;
819
820 if (unlikely(!skb)) {
821 bnxt_sched_reset_txr(bp, txr, cons);
822 return rc;
823 }
824
825 is_ts_pkt = tx_buf->is_ts_pkt;
826 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
827 rc = true;
828 break;
829 }
830
831 cons = NEXT_TX(cons);
832 tx_pkts++;
833 tx_bytes += skb->len;
834 tx_buf->skb = NULL;
835 tx_buf->is_ts_pkt = 0;
836
837 if (tx_buf->is_push) {
838 tx_buf->is_push = 0;
839 goto next_tx_int;
840 }
841
842 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
843 skb_headlen(skb), DMA_TO_DEVICE);
844 last = tx_buf->nr_frags;
845
846 for (j = 0; j < last; j++) {
847 cons = NEXT_TX(cons);
848 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
849 dma_unmap_page(
850 &pdev->dev,
851 dma_unmap_addr(tx_buf, mapping),
852 skb_frag_size(&skb_shinfo(skb)->frags[j]),
853 DMA_TO_DEVICE);
854 }
855 if (unlikely(is_ts_pkt)) {
856 if (BNXT_CHIP_P5(bp)) {
857 /* PTP worker takes ownership of the skb */
858 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
859 skb = NULL;
860 }
861 }
862
863 next_tx_int:
864 cons = NEXT_TX(cons);
865
866 dev_consume_skb_any(skb);
867 }
868
869 WRITE_ONCE(txr->tx_cons, cons);
870
871 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
872 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
873 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
874
875 return rc;
876 }
877
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)878 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
879 {
880 struct bnxt_tx_ring_info *txr;
881 bool more = false;
882 int i;
883
884 bnxt_for_each_napi_tx(i, bnapi, txr) {
885 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
886 more |= __bnxt_tx_int(bp, txr, budget);
887 }
888 if (!more)
889 bnapi->events &= ~BNXT_TX_CMP_EVENT;
890 }
891
bnxt_separate_head_pool(void)892 static bool bnxt_separate_head_pool(void)
893 {
894 return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
895 }
896
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)897 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
898 struct bnxt_rx_ring_info *rxr,
899 unsigned int *offset,
900 gfp_t gfp)
901 {
902 struct page *page;
903
904 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
905 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
906 BNXT_RX_PAGE_SIZE);
907 } else {
908 page = page_pool_dev_alloc_pages(rxr->page_pool);
909 *offset = 0;
910 }
911 if (!page)
912 return NULL;
913
914 *mapping = page_pool_get_dma_addr(page) + *offset;
915 return page;
916 }
917
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)918 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
919 struct bnxt_rx_ring_info *rxr,
920 gfp_t gfp)
921 {
922 unsigned int offset;
923 struct page *page;
924
925 page = page_pool_alloc_frag(rxr->head_pool, &offset,
926 bp->rx_buf_size, gfp);
927 if (!page)
928 return NULL;
929
930 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
931 return page_address(page) + offset;
932 }
933
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)934 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
935 u16 prod, gfp_t gfp)
936 {
937 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
938 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
939 dma_addr_t mapping;
940
941 if (BNXT_RX_PAGE_MODE(bp)) {
942 unsigned int offset;
943 struct page *page =
944 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
945
946 if (!page)
947 return -ENOMEM;
948
949 mapping += bp->rx_dma_offset;
950 rx_buf->data = page;
951 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
952 } else {
953 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
954
955 if (!data)
956 return -ENOMEM;
957
958 rx_buf->data = data;
959 rx_buf->data_ptr = data + bp->rx_offset;
960 }
961 rx_buf->mapping = mapping;
962
963 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
964 return 0;
965 }
966
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)967 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
968 {
969 u16 prod = rxr->rx_prod;
970 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
971 struct bnxt *bp = rxr->bnapi->bp;
972 struct rx_bd *cons_bd, *prod_bd;
973
974 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
975 cons_rx_buf = &rxr->rx_buf_ring[cons];
976
977 prod_rx_buf->data = data;
978 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
979
980 prod_rx_buf->mapping = cons_rx_buf->mapping;
981
982 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
983 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
984
985 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
986 }
987
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)988 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
989 {
990 u16 next, max = rxr->rx_agg_bmap_size;
991
992 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
993 if (next >= max)
994 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
995 return next;
996 }
997
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)998 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
999 struct bnxt_rx_ring_info *rxr,
1000 u16 prod, gfp_t gfp)
1001 {
1002 struct rx_bd *rxbd =
1003 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1004 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1005 struct page *page;
1006 dma_addr_t mapping;
1007 u16 sw_prod = rxr->rx_sw_agg_prod;
1008 unsigned int offset = 0;
1009
1010 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
1011
1012 if (!page)
1013 return -ENOMEM;
1014
1015 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1016 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1017
1018 __set_bit(sw_prod, rxr->rx_agg_bmap);
1019 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1020 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1021
1022 rx_agg_buf->page = page;
1023 rx_agg_buf->offset = offset;
1024 rx_agg_buf->mapping = mapping;
1025 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1026 rxbd->rx_bd_opaque = sw_prod;
1027 return 0;
1028 }
1029
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1030 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1031 struct bnxt_cp_ring_info *cpr,
1032 u16 cp_cons, u16 curr)
1033 {
1034 struct rx_agg_cmp *agg;
1035
1036 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1037 agg = (struct rx_agg_cmp *)
1038 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1039 return agg;
1040 }
1041
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1042 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1043 struct bnxt_rx_ring_info *rxr,
1044 u16 agg_id, u16 curr)
1045 {
1046 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1047
1048 return &tpa_info->agg_arr[curr];
1049 }
1050
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1051 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1052 u16 start, u32 agg_bufs, bool tpa)
1053 {
1054 struct bnxt_napi *bnapi = cpr->bnapi;
1055 struct bnxt *bp = bnapi->bp;
1056 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1057 u16 prod = rxr->rx_agg_prod;
1058 u16 sw_prod = rxr->rx_sw_agg_prod;
1059 bool p5_tpa = false;
1060 u32 i;
1061
1062 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1063 p5_tpa = true;
1064
1065 for (i = 0; i < agg_bufs; i++) {
1066 u16 cons;
1067 struct rx_agg_cmp *agg;
1068 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1069 struct rx_bd *prod_bd;
1070 struct page *page;
1071
1072 if (p5_tpa)
1073 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1074 else
1075 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1076 cons = agg->rx_agg_cmp_opaque;
1077 __clear_bit(cons, rxr->rx_agg_bmap);
1078
1079 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1080 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1081
1082 __set_bit(sw_prod, rxr->rx_agg_bmap);
1083 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1084 cons_rx_buf = &rxr->rx_agg_ring[cons];
1085
1086 /* It is possible for sw_prod to be equal to cons, so
1087 * set cons_rx_buf->page to NULL first.
1088 */
1089 page = cons_rx_buf->page;
1090 cons_rx_buf->page = NULL;
1091 prod_rx_buf->page = page;
1092 prod_rx_buf->offset = cons_rx_buf->offset;
1093
1094 prod_rx_buf->mapping = cons_rx_buf->mapping;
1095
1096 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1097
1098 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1099 prod_bd->rx_bd_opaque = sw_prod;
1100
1101 prod = NEXT_RX_AGG(prod);
1102 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1103 }
1104 rxr->rx_agg_prod = prod;
1105 rxr->rx_sw_agg_prod = sw_prod;
1106 }
1107
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1108 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1109 struct bnxt_rx_ring_info *rxr,
1110 u16 cons, void *data, u8 *data_ptr,
1111 dma_addr_t dma_addr,
1112 unsigned int offset_and_len)
1113 {
1114 unsigned int len = offset_and_len & 0xffff;
1115 struct page *page = data;
1116 u16 prod = rxr->rx_prod;
1117 struct sk_buff *skb;
1118 int err;
1119
1120 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1121 if (unlikely(err)) {
1122 bnxt_reuse_rx_data(rxr, cons, data);
1123 return NULL;
1124 }
1125 dma_addr -= bp->rx_dma_offset;
1126 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1127 bp->rx_dir);
1128 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1129 if (!skb) {
1130 page_pool_recycle_direct(rxr->page_pool, page);
1131 return NULL;
1132 }
1133 skb_mark_for_recycle(skb);
1134 skb_reserve(skb, bp->rx_offset);
1135 __skb_put(skb, len);
1136
1137 return skb;
1138 }
1139
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1140 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1141 struct bnxt_rx_ring_info *rxr,
1142 u16 cons, void *data, u8 *data_ptr,
1143 dma_addr_t dma_addr,
1144 unsigned int offset_and_len)
1145 {
1146 unsigned int payload = offset_and_len >> 16;
1147 unsigned int len = offset_and_len & 0xffff;
1148 skb_frag_t *frag;
1149 struct page *page = data;
1150 u16 prod = rxr->rx_prod;
1151 struct sk_buff *skb;
1152 int off, err;
1153
1154 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1155 if (unlikely(err)) {
1156 bnxt_reuse_rx_data(rxr, cons, data);
1157 return NULL;
1158 }
1159 dma_addr -= bp->rx_dma_offset;
1160 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1161 bp->rx_dir);
1162
1163 if (unlikely(!payload))
1164 payload = eth_get_headlen(bp->dev, data_ptr, len);
1165
1166 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1167 if (!skb) {
1168 page_pool_recycle_direct(rxr->page_pool, page);
1169 return NULL;
1170 }
1171
1172 skb_mark_for_recycle(skb);
1173 off = (void *)data_ptr - page_address(page);
1174 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1175 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1176 payload + NET_IP_ALIGN);
1177
1178 frag = &skb_shinfo(skb)->frags[0];
1179 skb_frag_size_sub(frag, payload);
1180 skb_frag_off_add(frag, payload);
1181 skb->data_len -= payload;
1182 skb->tail += payload;
1183
1184 return skb;
1185 }
1186
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1187 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1188 struct bnxt_rx_ring_info *rxr, u16 cons,
1189 void *data, u8 *data_ptr,
1190 dma_addr_t dma_addr,
1191 unsigned int offset_and_len)
1192 {
1193 u16 prod = rxr->rx_prod;
1194 struct sk_buff *skb;
1195 int err;
1196
1197 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1198 if (unlikely(err)) {
1199 bnxt_reuse_rx_data(rxr, cons, data);
1200 return NULL;
1201 }
1202
1203 skb = napi_build_skb(data, bp->rx_buf_size);
1204 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1205 bp->rx_dir);
1206 if (!skb) {
1207 page_pool_free_va(rxr->head_pool, data, true);
1208 return NULL;
1209 }
1210
1211 skb_mark_for_recycle(skb);
1212 skb_reserve(skb, bp->rx_offset);
1213 skb_put(skb, offset_and_len & 0xffff);
1214 return skb;
1215 }
1216
__bnxt_rx_agg_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct skb_shared_info * shinfo,u16 idx,u32 agg_bufs,bool tpa,struct xdp_buff * xdp)1217 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1218 struct bnxt_cp_ring_info *cpr,
1219 struct skb_shared_info *shinfo,
1220 u16 idx, u32 agg_bufs, bool tpa,
1221 struct xdp_buff *xdp)
1222 {
1223 struct bnxt_napi *bnapi = cpr->bnapi;
1224 struct pci_dev *pdev = bp->pdev;
1225 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1226 u16 prod = rxr->rx_agg_prod;
1227 u32 i, total_frag_len = 0;
1228 bool p5_tpa = false;
1229
1230 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1231 p5_tpa = true;
1232
1233 for (i = 0; i < agg_bufs; i++) {
1234 skb_frag_t *frag = &shinfo->frags[i];
1235 u16 cons, frag_len;
1236 struct rx_agg_cmp *agg;
1237 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1238 struct page *page;
1239 dma_addr_t mapping;
1240
1241 if (p5_tpa)
1242 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1243 else
1244 agg = bnxt_get_agg(bp, cpr, idx, i);
1245 cons = agg->rx_agg_cmp_opaque;
1246 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1247 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1248
1249 cons_rx_buf = &rxr->rx_agg_ring[cons];
1250 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1251 cons_rx_buf->offset, frag_len);
1252 shinfo->nr_frags = i + 1;
1253 __clear_bit(cons, rxr->rx_agg_bmap);
1254
1255 /* It is possible for bnxt_alloc_rx_page() to allocate
1256 * a sw_prod index that equals the cons index, so we
1257 * need to clear the cons entry now.
1258 */
1259 mapping = cons_rx_buf->mapping;
1260 page = cons_rx_buf->page;
1261 cons_rx_buf->page = NULL;
1262
1263 if (xdp && page_is_pfmemalloc(page))
1264 xdp_buff_set_frag_pfmemalloc(xdp);
1265
1266 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1267 --shinfo->nr_frags;
1268 cons_rx_buf->page = page;
1269
1270 /* Update prod since possibly some pages have been
1271 * allocated already.
1272 */
1273 rxr->rx_agg_prod = prod;
1274 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1275 return 0;
1276 }
1277
1278 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1279 bp->rx_dir);
1280
1281 total_frag_len += frag_len;
1282 prod = NEXT_RX_AGG(prod);
1283 }
1284 rxr->rx_agg_prod = prod;
1285 return total_frag_len;
1286 }
1287
bnxt_rx_agg_pages_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1288 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1289 struct bnxt_cp_ring_info *cpr,
1290 struct sk_buff *skb, u16 idx,
1291 u32 agg_bufs, bool tpa)
1292 {
1293 struct skb_shared_info *shinfo = skb_shinfo(skb);
1294 u32 total_frag_len = 0;
1295
1296 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1297 agg_bufs, tpa, NULL);
1298 if (!total_frag_len) {
1299 skb_mark_for_recycle(skb);
1300 dev_kfree_skb(skb);
1301 return NULL;
1302 }
1303
1304 skb->data_len += total_frag_len;
1305 skb->len += total_frag_len;
1306 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1307 return skb;
1308 }
1309
bnxt_rx_agg_pages_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1310 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1311 struct bnxt_cp_ring_info *cpr,
1312 struct xdp_buff *xdp, u16 idx,
1313 u32 agg_bufs, bool tpa)
1314 {
1315 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1316 u32 total_frag_len = 0;
1317
1318 if (!xdp_buff_has_frags(xdp))
1319 shinfo->nr_frags = 0;
1320
1321 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1322 idx, agg_bufs, tpa, xdp);
1323 if (total_frag_len) {
1324 xdp_buff_set_frags_flag(xdp);
1325 shinfo->nr_frags = agg_bufs;
1326 shinfo->xdp_frags_size = total_frag_len;
1327 }
1328 return total_frag_len;
1329 }
1330
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1331 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1332 u8 agg_bufs, u32 *raw_cons)
1333 {
1334 u16 last;
1335 struct rx_agg_cmp *agg;
1336
1337 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1338 last = RING_CMP(*raw_cons);
1339 agg = (struct rx_agg_cmp *)
1340 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1341 return RX_AGG_CMP_VALID(agg, *raw_cons);
1342 }
1343
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1344 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1345 unsigned int len,
1346 dma_addr_t mapping)
1347 {
1348 struct bnxt *bp = bnapi->bp;
1349 struct pci_dev *pdev = bp->pdev;
1350 struct sk_buff *skb;
1351
1352 skb = napi_alloc_skb(&bnapi->napi, len);
1353 if (!skb)
1354 return NULL;
1355
1356 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1357 bp->rx_dir);
1358
1359 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1360 len + NET_IP_ALIGN);
1361
1362 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1363 bp->rx_dir);
1364
1365 skb_put(skb, len);
1366
1367 return skb;
1368 }
1369
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1370 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1371 unsigned int len,
1372 dma_addr_t mapping)
1373 {
1374 return bnxt_copy_data(bnapi, data, len, mapping);
1375 }
1376
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1377 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1378 struct xdp_buff *xdp,
1379 unsigned int len,
1380 dma_addr_t mapping)
1381 {
1382 unsigned int metasize = 0;
1383 u8 *data = xdp->data;
1384 struct sk_buff *skb;
1385
1386 len = xdp->data_end - xdp->data_meta;
1387 metasize = xdp->data - xdp->data_meta;
1388 data = xdp->data_meta;
1389
1390 skb = bnxt_copy_data(bnapi, data, len, mapping);
1391 if (!skb)
1392 return skb;
1393
1394 if (metasize) {
1395 skb_metadata_set(skb, metasize);
1396 __skb_pull(skb, metasize);
1397 }
1398
1399 return skb;
1400 }
1401
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1402 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1403 u32 *raw_cons, void *cmp)
1404 {
1405 struct rx_cmp *rxcmp = cmp;
1406 u32 tmp_raw_cons = *raw_cons;
1407 u8 cmp_type, agg_bufs = 0;
1408
1409 cmp_type = RX_CMP_TYPE(rxcmp);
1410
1411 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1412 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1413 RX_CMP_AGG_BUFS) >>
1414 RX_CMP_AGG_BUFS_SHIFT;
1415 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1416 struct rx_tpa_end_cmp *tpa_end = cmp;
1417
1418 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1419 return 0;
1420
1421 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1422 }
1423
1424 if (agg_bufs) {
1425 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1426 return -EBUSY;
1427 }
1428 *raw_cons = tmp_raw_cons;
1429 return 0;
1430 }
1431
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1432 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1433 {
1434 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1435 u16 idx = agg_id & MAX_TPA_P5_MASK;
1436
1437 if (test_bit(idx, map->agg_idx_bmap))
1438 idx = find_first_zero_bit(map->agg_idx_bmap,
1439 BNXT_AGG_IDX_BMAP_SIZE);
1440 __set_bit(idx, map->agg_idx_bmap);
1441 map->agg_id_tbl[agg_id] = idx;
1442 return idx;
1443 }
1444
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1445 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1446 {
1447 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1448
1449 __clear_bit(idx, map->agg_idx_bmap);
1450 }
1451
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1452 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1453 {
1454 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1455
1456 return map->agg_id_tbl[agg_id];
1457 }
1458
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1459 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1460 struct rx_tpa_start_cmp *tpa_start,
1461 struct rx_tpa_start_cmp_ext *tpa_start1)
1462 {
1463 tpa_info->cfa_code_valid = 1;
1464 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1465 tpa_info->vlan_valid = 0;
1466 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1467 tpa_info->vlan_valid = 1;
1468 tpa_info->metadata =
1469 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1470 }
1471 }
1472
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1473 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1474 struct rx_tpa_start_cmp *tpa_start,
1475 struct rx_tpa_start_cmp_ext *tpa_start1)
1476 {
1477 tpa_info->vlan_valid = 0;
1478 if (TPA_START_VLAN_VALID(tpa_start)) {
1479 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1480 u32 vlan_proto = ETH_P_8021Q;
1481
1482 tpa_info->vlan_valid = 1;
1483 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1484 vlan_proto = ETH_P_8021AD;
1485 tpa_info->metadata = vlan_proto << 16 |
1486 TPA_START_METADATA0_TCI(tpa_start1);
1487 }
1488 }
1489
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1490 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1491 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1492 struct rx_tpa_start_cmp_ext *tpa_start1)
1493 {
1494 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1495 struct bnxt_tpa_info *tpa_info;
1496 u16 cons, prod, agg_id;
1497 struct rx_bd *prod_bd;
1498 dma_addr_t mapping;
1499
1500 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1501 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1502 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1503 } else {
1504 agg_id = TPA_START_AGG_ID(tpa_start);
1505 }
1506 cons = tpa_start->rx_tpa_start_cmp_opaque;
1507 prod = rxr->rx_prod;
1508 cons_rx_buf = &rxr->rx_buf_ring[cons];
1509 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1510 tpa_info = &rxr->rx_tpa[agg_id];
1511
1512 if (unlikely(cons != rxr->rx_next_cons ||
1513 TPA_START_ERROR(tpa_start))) {
1514 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1515 cons, rxr->rx_next_cons,
1516 TPA_START_ERROR_CODE(tpa_start1));
1517 bnxt_sched_reset_rxr(bp, rxr);
1518 return;
1519 }
1520 prod_rx_buf->data = tpa_info->data;
1521 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1522
1523 mapping = tpa_info->mapping;
1524 prod_rx_buf->mapping = mapping;
1525
1526 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1527
1528 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1529
1530 tpa_info->data = cons_rx_buf->data;
1531 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1532 cons_rx_buf->data = NULL;
1533 tpa_info->mapping = cons_rx_buf->mapping;
1534
1535 tpa_info->len =
1536 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1537 RX_TPA_START_CMP_LEN_SHIFT;
1538 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1539 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1540 tpa_info->gso_type = SKB_GSO_TCPV4;
1541 if (TPA_START_IS_IPV6(tpa_start1))
1542 tpa_info->gso_type = SKB_GSO_TCPV6;
1543 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1544 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1545 TPA_START_HASH_TYPE(tpa_start) == 3)
1546 tpa_info->gso_type = SKB_GSO_TCPV6;
1547 tpa_info->rss_hash =
1548 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1549 } else {
1550 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1551 tpa_info->gso_type = 0;
1552 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1553 }
1554 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1555 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1556 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1557 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1558 else
1559 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1560 tpa_info->agg_count = 0;
1561
1562 rxr->rx_prod = NEXT_RX(prod);
1563 cons = RING_RX(bp, NEXT_RX(cons));
1564 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1565 cons_rx_buf = &rxr->rx_buf_ring[cons];
1566
1567 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1568 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1569 cons_rx_buf->data = NULL;
1570 }
1571
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1572 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1573 {
1574 if (agg_bufs)
1575 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1576 }
1577
1578 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1579 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1580 {
1581 struct udphdr *uh = NULL;
1582
1583 if (ip_proto == htons(ETH_P_IP)) {
1584 struct iphdr *iph = (struct iphdr *)skb->data;
1585
1586 if (iph->protocol == IPPROTO_UDP)
1587 uh = (struct udphdr *)(iph + 1);
1588 } else {
1589 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1590
1591 if (iph->nexthdr == IPPROTO_UDP)
1592 uh = (struct udphdr *)(iph + 1);
1593 }
1594 if (uh) {
1595 if (uh->check)
1596 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1597 else
1598 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1599 }
1600 }
1601 #endif
1602
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1603 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1604 int payload_off, int tcp_ts,
1605 struct sk_buff *skb)
1606 {
1607 #ifdef CONFIG_INET
1608 struct tcphdr *th;
1609 int len, nw_off;
1610 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1611 u32 hdr_info = tpa_info->hdr_info;
1612 bool loopback = false;
1613
1614 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1615 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1616 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1617
1618 /* If the packet is an internal loopback packet, the offsets will
1619 * have an extra 4 bytes.
1620 */
1621 if (inner_mac_off == 4) {
1622 loopback = true;
1623 } else if (inner_mac_off > 4) {
1624 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1625 ETH_HLEN - 2));
1626
1627 /* We only support inner iPv4/ipv6. If we don't see the
1628 * correct protocol ID, it must be a loopback packet where
1629 * the offsets are off by 4.
1630 */
1631 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1632 loopback = true;
1633 }
1634 if (loopback) {
1635 /* internal loopback packet, subtract all offsets by 4 */
1636 inner_ip_off -= 4;
1637 inner_mac_off -= 4;
1638 outer_ip_off -= 4;
1639 }
1640
1641 nw_off = inner_ip_off - ETH_HLEN;
1642 skb_set_network_header(skb, nw_off);
1643 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1644 struct ipv6hdr *iph = ipv6_hdr(skb);
1645
1646 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1647 len = skb->len - skb_transport_offset(skb);
1648 th = tcp_hdr(skb);
1649 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1650 } else {
1651 struct iphdr *iph = ip_hdr(skb);
1652
1653 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1654 len = skb->len - skb_transport_offset(skb);
1655 th = tcp_hdr(skb);
1656 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1657 }
1658
1659 if (inner_mac_off) { /* tunnel */
1660 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1661 ETH_HLEN - 2));
1662
1663 bnxt_gro_tunnel(skb, proto);
1664 }
1665 #endif
1666 return skb;
1667 }
1668
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1669 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1670 int payload_off, int tcp_ts,
1671 struct sk_buff *skb)
1672 {
1673 #ifdef CONFIG_INET
1674 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1675 u32 hdr_info = tpa_info->hdr_info;
1676 int iphdr_len, nw_off;
1677
1678 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1679 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1680 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1681
1682 nw_off = inner_ip_off - ETH_HLEN;
1683 skb_set_network_header(skb, nw_off);
1684 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1685 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1686 skb_set_transport_header(skb, nw_off + iphdr_len);
1687
1688 if (inner_mac_off) { /* tunnel */
1689 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1690 ETH_HLEN - 2));
1691
1692 bnxt_gro_tunnel(skb, proto);
1693 }
1694 #endif
1695 return skb;
1696 }
1697
1698 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1699 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1700
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1701 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1702 int payload_off, int tcp_ts,
1703 struct sk_buff *skb)
1704 {
1705 #ifdef CONFIG_INET
1706 struct tcphdr *th;
1707 int len, nw_off, tcp_opt_len = 0;
1708
1709 if (tcp_ts)
1710 tcp_opt_len = 12;
1711
1712 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1713 struct iphdr *iph;
1714
1715 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1716 ETH_HLEN;
1717 skb_set_network_header(skb, nw_off);
1718 iph = ip_hdr(skb);
1719 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1720 len = skb->len - skb_transport_offset(skb);
1721 th = tcp_hdr(skb);
1722 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1723 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1724 struct ipv6hdr *iph;
1725
1726 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1727 ETH_HLEN;
1728 skb_set_network_header(skb, nw_off);
1729 iph = ipv6_hdr(skb);
1730 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1731 len = skb->len - skb_transport_offset(skb);
1732 th = tcp_hdr(skb);
1733 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1734 } else {
1735 dev_kfree_skb_any(skb);
1736 return NULL;
1737 }
1738
1739 if (nw_off) /* tunnel */
1740 bnxt_gro_tunnel(skb, skb->protocol);
1741 #endif
1742 return skb;
1743 }
1744
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1745 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1746 struct bnxt_tpa_info *tpa_info,
1747 struct rx_tpa_end_cmp *tpa_end,
1748 struct rx_tpa_end_cmp_ext *tpa_end1,
1749 struct sk_buff *skb)
1750 {
1751 #ifdef CONFIG_INET
1752 int payload_off;
1753 u16 segs;
1754
1755 segs = TPA_END_TPA_SEGS(tpa_end);
1756 if (segs == 1)
1757 return skb;
1758
1759 NAPI_GRO_CB(skb)->count = segs;
1760 skb_shinfo(skb)->gso_size =
1761 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1762 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1763 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1764 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1765 else
1766 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1767 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1768 if (likely(skb))
1769 tcp_gro_complete(skb);
1770 #endif
1771 return skb;
1772 }
1773
1774 /* Given the cfa_code of a received packet determine which
1775 * netdev (vf-rep or PF) the packet is destined to.
1776 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1777 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1778 {
1779 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1780
1781 /* if vf-rep dev is NULL, the must belongs to the PF */
1782 return dev ? dev : bp->dev;
1783 }
1784
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1785 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1786 struct bnxt_cp_ring_info *cpr,
1787 u32 *raw_cons,
1788 struct rx_tpa_end_cmp *tpa_end,
1789 struct rx_tpa_end_cmp_ext *tpa_end1,
1790 u8 *event)
1791 {
1792 struct bnxt_napi *bnapi = cpr->bnapi;
1793 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1794 struct net_device *dev = bp->dev;
1795 u8 *data_ptr, agg_bufs;
1796 unsigned int len;
1797 struct bnxt_tpa_info *tpa_info;
1798 dma_addr_t mapping;
1799 struct sk_buff *skb;
1800 u16 idx = 0, agg_id;
1801 void *data;
1802 bool gro;
1803
1804 if (unlikely(bnapi->in_reset)) {
1805 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1806
1807 if (rc < 0)
1808 return ERR_PTR(-EBUSY);
1809 return NULL;
1810 }
1811
1812 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1813 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1814 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1815 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1816 tpa_info = &rxr->rx_tpa[agg_id];
1817 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1818 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1819 agg_bufs, tpa_info->agg_count);
1820 agg_bufs = tpa_info->agg_count;
1821 }
1822 tpa_info->agg_count = 0;
1823 *event |= BNXT_AGG_EVENT;
1824 bnxt_free_agg_idx(rxr, agg_id);
1825 idx = agg_id;
1826 gro = !!(bp->flags & BNXT_FLAG_GRO);
1827 } else {
1828 agg_id = TPA_END_AGG_ID(tpa_end);
1829 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1830 tpa_info = &rxr->rx_tpa[agg_id];
1831 idx = RING_CMP(*raw_cons);
1832 if (agg_bufs) {
1833 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1834 return ERR_PTR(-EBUSY);
1835
1836 *event |= BNXT_AGG_EVENT;
1837 idx = NEXT_CMP(idx);
1838 }
1839 gro = !!TPA_END_GRO(tpa_end);
1840 }
1841 data = tpa_info->data;
1842 data_ptr = tpa_info->data_ptr;
1843 prefetch(data_ptr);
1844 len = tpa_info->len;
1845 mapping = tpa_info->mapping;
1846
1847 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1848 bnxt_abort_tpa(cpr, idx, agg_bufs);
1849 if (agg_bufs > MAX_SKB_FRAGS)
1850 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1851 agg_bufs, (int)MAX_SKB_FRAGS);
1852 return NULL;
1853 }
1854
1855 if (len <= bp->rx_copybreak) {
1856 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1857 if (!skb) {
1858 bnxt_abort_tpa(cpr, idx, agg_bufs);
1859 cpr->sw_stats->rx.rx_oom_discards += 1;
1860 return NULL;
1861 }
1862 } else {
1863 u8 *new_data;
1864 dma_addr_t new_mapping;
1865
1866 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1867 GFP_ATOMIC);
1868 if (!new_data) {
1869 bnxt_abort_tpa(cpr, idx, agg_bufs);
1870 cpr->sw_stats->rx.rx_oom_discards += 1;
1871 return NULL;
1872 }
1873
1874 tpa_info->data = new_data;
1875 tpa_info->data_ptr = new_data + bp->rx_offset;
1876 tpa_info->mapping = new_mapping;
1877
1878 skb = napi_build_skb(data, bp->rx_buf_size);
1879 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1880 bp->rx_buf_use_size, bp->rx_dir);
1881
1882 if (!skb) {
1883 page_pool_free_va(rxr->head_pool, data, true);
1884 bnxt_abort_tpa(cpr, idx, agg_bufs);
1885 cpr->sw_stats->rx.rx_oom_discards += 1;
1886 return NULL;
1887 }
1888 skb_mark_for_recycle(skb);
1889 skb_reserve(skb, bp->rx_offset);
1890 skb_put(skb, len);
1891 }
1892
1893 if (agg_bufs) {
1894 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1895 if (!skb) {
1896 /* Page reuse already handled by bnxt_rx_pages(). */
1897 cpr->sw_stats->rx.rx_oom_discards += 1;
1898 return NULL;
1899 }
1900 }
1901
1902 if (tpa_info->cfa_code_valid)
1903 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1904 skb->protocol = eth_type_trans(skb, dev);
1905
1906 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1907 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1908
1909 if (tpa_info->vlan_valid &&
1910 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1911 __be16 vlan_proto = htons(tpa_info->metadata >>
1912 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1913 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1914
1915 if (eth_type_vlan(vlan_proto)) {
1916 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1917 } else {
1918 dev_kfree_skb(skb);
1919 return NULL;
1920 }
1921 }
1922
1923 skb_checksum_none_assert(skb);
1924 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 skb->csum_level =
1927 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1928 }
1929
1930 if (gro)
1931 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1932
1933 return skb;
1934 }
1935
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1936 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1937 struct rx_agg_cmp *rx_agg)
1938 {
1939 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1940 struct bnxt_tpa_info *tpa_info;
1941
1942 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1943 tpa_info = &rxr->rx_tpa[agg_id];
1944 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1945 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1946 }
1947
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1948 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1949 struct sk_buff *skb)
1950 {
1951 skb_mark_for_recycle(skb);
1952
1953 if (skb->dev != bp->dev) {
1954 /* this packet belongs to a vf-rep */
1955 bnxt_vf_rep_rx(bp, skb);
1956 return;
1957 }
1958 skb_record_rx_queue(skb, bnapi->index);
1959 napi_gro_receive(&bnapi->napi, skb);
1960 }
1961
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)1962 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1963 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1964 {
1965 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1966
1967 if (BNXT_PTP_RX_TS_VALID(flags))
1968 goto ts_valid;
1969 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1970 return false;
1971
1972 ts_valid:
1973 *cmpl_ts = ts;
1974 return true;
1975 }
1976
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)1977 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1978 struct rx_cmp *rxcmp,
1979 struct rx_cmp_ext *rxcmp1)
1980 {
1981 __be16 vlan_proto;
1982 u16 vtag;
1983
1984 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1985 __le32 flags2 = rxcmp1->rx_cmp_flags2;
1986 u32 meta_data;
1987
1988 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1989 return skb;
1990
1991 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1992 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1993 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1994 if (eth_type_vlan(vlan_proto))
1995 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1996 else
1997 goto vlan_err;
1998 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1999 if (RX_CMP_VLAN_VALID(rxcmp)) {
2000 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2001
2002 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2003 vlan_proto = htons(ETH_P_8021Q);
2004 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2005 vlan_proto = htons(ETH_P_8021AD);
2006 else
2007 goto vlan_err;
2008 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2009 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2010 }
2011 }
2012 return skb;
2013 vlan_err:
2014 dev_kfree_skb(skb);
2015 return NULL;
2016 }
2017
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2018 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2019 struct rx_cmp *rxcmp)
2020 {
2021 u8 ext_op;
2022
2023 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2024 switch (ext_op) {
2025 case EXT_OP_INNER_4:
2026 case EXT_OP_OUTER_4:
2027 case EXT_OP_INNFL_3:
2028 case EXT_OP_OUTFL_3:
2029 return PKT_HASH_TYPE_L4;
2030 default:
2031 return PKT_HASH_TYPE_L3;
2032 }
2033 }
2034
2035 /* returns the following:
2036 * 1 - 1 packet successfully received
2037 * 0 - successful TPA_START, packet not completed yet
2038 * -EBUSY - completion ring does not have all the agg buffers yet
2039 * -ENOMEM - packet aborted due to out of memory
2040 * -EIO - packet aborted due to hw error indicated in BD
2041 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2042 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2043 u32 *raw_cons, u8 *event)
2044 {
2045 struct bnxt_napi *bnapi = cpr->bnapi;
2046 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2047 struct net_device *dev = bp->dev;
2048 struct rx_cmp *rxcmp;
2049 struct rx_cmp_ext *rxcmp1;
2050 u32 tmp_raw_cons = *raw_cons;
2051 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2052 struct skb_shared_info *sinfo;
2053 struct bnxt_sw_rx_bd *rx_buf;
2054 unsigned int len;
2055 u8 *data_ptr, agg_bufs, cmp_type;
2056 bool xdp_active = false;
2057 dma_addr_t dma_addr;
2058 struct sk_buff *skb;
2059 struct xdp_buff xdp;
2060 u32 flags, misc;
2061 u32 cmpl_ts;
2062 void *data;
2063 int rc = 0;
2064
2065 rxcmp = (struct rx_cmp *)
2066 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2067
2068 cmp_type = RX_CMP_TYPE(rxcmp);
2069
2070 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2071 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2072 goto next_rx_no_prod_no_len;
2073 }
2074
2075 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2076 cp_cons = RING_CMP(tmp_raw_cons);
2077 rxcmp1 = (struct rx_cmp_ext *)
2078 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2079
2080 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2081 return -EBUSY;
2082
2083 /* The valid test of the entry must be done first before
2084 * reading any further.
2085 */
2086 dma_rmb();
2087 prod = rxr->rx_prod;
2088
2089 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2090 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2091 bnxt_tpa_start(bp, rxr, cmp_type,
2092 (struct rx_tpa_start_cmp *)rxcmp,
2093 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2094
2095 *event |= BNXT_RX_EVENT;
2096 goto next_rx_no_prod_no_len;
2097
2098 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2099 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2100 (struct rx_tpa_end_cmp *)rxcmp,
2101 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2102
2103 if (IS_ERR(skb))
2104 return -EBUSY;
2105
2106 rc = -ENOMEM;
2107 if (likely(skb)) {
2108 bnxt_deliver_skb(bp, bnapi, skb);
2109 rc = 1;
2110 }
2111 *event |= BNXT_RX_EVENT;
2112 goto next_rx_no_prod_no_len;
2113 }
2114
2115 cons = rxcmp->rx_cmp_opaque;
2116 if (unlikely(cons != rxr->rx_next_cons)) {
2117 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2118
2119 /* 0xffff is forced error, don't print it */
2120 if (rxr->rx_next_cons != 0xffff)
2121 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2122 cons, rxr->rx_next_cons);
2123 bnxt_sched_reset_rxr(bp, rxr);
2124 if (rc1)
2125 return rc1;
2126 goto next_rx_no_prod_no_len;
2127 }
2128 rx_buf = &rxr->rx_buf_ring[cons];
2129 data = rx_buf->data;
2130 data_ptr = rx_buf->data_ptr;
2131 prefetch(data_ptr);
2132
2133 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2134 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2135
2136 if (agg_bufs) {
2137 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2138 return -EBUSY;
2139
2140 cp_cons = NEXT_CMP(cp_cons);
2141 *event |= BNXT_AGG_EVENT;
2142 }
2143 *event |= BNXT_RX_EVENT;
2144
2145 rx_buf->data = NULL;
2146 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2147 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2148
2149 bnxt_reuse_rx_data(rxr, cons, data);
2150 if (agg_bufs)
2151 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2152 false);
2153
2154 rc = -EIO;
2155 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2156 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2157 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2158 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2159 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2160 rx_err);
2161 bnxt_sched_reset_rxr(bp, rxr);
2162 }
2163 }
2164 goto next_rx_no_len;
2165 }
2166
2167 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2168 len = flags >> RX_CMP_LEN_SHIFT;
2169 dma_addr = rx_buf->mapping;
2170
2171 if (bnxt_xdp_attached(bp, rxr)) {
2172 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2173 if (agg_bufs) {
2174 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2175 cp_cons, agg_bufs,
2176 false);
2177 if (!frag_len)
2178 goto oom_next_rx;
2179
2180 }
2181 xdp_active = true;
2182 }
2183
2184 if (xdp_active) {
2185 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2186 rc = 1;
2187 goto next_rx;
2188 }
2189 if (xdp_buff_has_frags(&xdp)) {
2190 sinfo = xdp_get_shared_info_from_buff(&xdp);
2191 agg_bufs = sinfo->nr_frags;
2192 } else {
2193 agg_bufs = 0;
2194 }
2195 }
2196
2197 if (len <= bp->rx_copybreak) {
2198 if (!xdp_active)
2199 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2200 else
2201 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2202 bnxt_reuse_rx_data(rxr, cons, data);
2203 if (!skb) {
2204 if (agg_bufs) {
2205 if (!xdp_active)
2206 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2207 agg_bufs, false);
2208 else
2209 bnxt_xdp_buff_frags_free(rxr, &xdp);
2210 }
2211 goto oom_next_rx;
2212 }
2213 } else {
2214 u32 payload;
2215
2216 if (rx_buf->data_ptr == data_ptr)
2217 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2218 else
2219 payload = 0;
2220 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2221 payload | len);
2222 if (!skb)
2223 goto oom_next_rx;
2224 }
2225
2226 if (agg_bufs) {
2227 if (!xdp_active) {
2228 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2229 if (!skb)
2230 goto oom_next_rx;
2231 } else {
2232 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2233 rxr->page_pool, &xdp);
2234 if (!skb) {
2235 /* we should be able to free the old skb here */
2236 bnxt_xdp_buff_frags_free(rxr, &xdp);
2237 goto oom_next_rx;
2238 }
2239 }
2240 }
2241
2242 if (RX_CMP_HASH_VALID(rxcmp)) {
2243 enum pkt_hash_types type;
2244
2245 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2246 type = bnxt_rss_ext_op(bp, rxcmp);
2247 } else {
2248 u32 itypes = RX_CMP_ITYPES(rxcmp);
2249
2250 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2251 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2252 type = PKT_HASH_TYPE_L4;
2253 else
2254 type = PKT_HASH_TYPE_L3;
2255 }
2256 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2257 }
2258
2259 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2260 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2261 skb->protocol = eth_type_trans(skb, dev);
2262
2263 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2264 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2265 if (!skb)
2266 goto next_rx;
2267 }
2268
2269 skb_checksum_none_assert(skb);
2270 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2271 if (dev->features & NETIF_F_RXCSUM) {
2272 skb->ip_summed = CHECKSUM_UNNECESSARY;
2273 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2274 }
2275 } else {
2276 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2277 if (dev->features & NETIF_F_RXCSUM)
2278 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2279 }
2280 }
2281
2282 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2283 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2284 u64 ns, ts;
2285
2286 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2287 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2288
2289 ns = bnxt_timecounter_cyc2time(ptp, ts);
2290 memset(skb_hwtstamps(skb), 0,
2291 sizeof(*skb_hwtstamps(skb)));
2292 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2293 }
2294 }
2295 }
2296 bnxt_deliver_skb(bp, bnapi, skb);
2297 rc = 1;
2298
2299 next_rx:
2300 cpr->rx_packets += 1;
2301 cpr->rx_bytes += len;
2302
2303 next_rx_no_len:
2304 rxr->rx_prod = NEXT_RX(prod);
2305 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2306
2307 next_rx_no_prod_no_len:
2308 *raw_cons = tmp_raw_cons;
2309
2310 return rc;
2311
2312 oom_next_rx:
2313 cpr->sw_stats->rx.rx_oom_discards += 1;
2314 rc = -ENOMEM;
2315 goto next_rx;
2316 }
2317
2318 /* In netpoll mode, if we are using a combined completion ring, we need to
2319 * discard the rx packets and recycle the buffers.
2320 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2321 static int bnxt_force_rx_discard(struct bnxt *bp,
2322 struct bnxt_cp_ring_info *cpr,
2323 u32 *raw_cons, u8 *event)
2324 {
2325 u32 tmp_raw_cons = *raw_cons;
2326 struct rx_cmp_ext *rxcmp1;
2327 struct rx_cmp *rxcmp;
2328 u16 cp_cons;
2329 u8 cmp_type;
2330 int rc;
2331
2332 cp_cons = RING_CMP(tmp_raw_cons);
2333 rxcmp = (struct rx_cmp *)
2334 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2335
2336 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2337 cp_cons = RING_CMP(tmp_raw_cons);
2338 rxcmp1 = (struct rx_cmp_ext *)
2339 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2340
2341 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2342 return -EBUSY;
2343
2344 /* The valid test of the entry must be done first before
2345 * reading any further.
2346 */
2347 dma_rmb();
2348 cmp_type = RX_CMP_TYPE(rxcmp);
2349 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2350 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2351 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2352 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2353 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2354 struct rx_tpa_end_cmp_ext *tpa_end1;
2355
2356 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2357 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2358 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2359 }
2360 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2361 if (rc && rc != -EBUSY)
2362 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2363 return rc;
2364 }
2365
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2366 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2367 {
2368 struct bnxt_fw_health *fw_health = bp->fw_health;
2369 u32 reg = fw_health->regs[reg_idx];
2370 u32 reg_type, reg_off, val = 0;
2371
2372 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2373 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2374 switch (reg_type) {
2375 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2376 pci_read_config_dword(bp->pdev, reg_off, &val);
2377 break;
2378 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2379 reg_off = fw_health->mapped_regs[reg_idx];
2380 fallthrough;
2381 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2382 val = readl(bp->bar0 + reg_off);
2383 break;
2384 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2385 val = readl(bp->bar1 + reg_off);
2386 break;
2387 }
2388 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2389 val &= fw_health->fw_reset_inprog_reg_mask;
2390 return val;
2391 }
2392
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2393 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2394 {
2395 int i;
2396
2397 for (i = 0; i < bp->rx_nr_rings; i++) {
2398 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2399 struct bnxt_ring_grp_info *grp_info;
2400
2401 grp_info = &bp->grp_info[grp_idx];
2402 if (grp_info->agg_fw_ring_id == ring_id)
2403 return grp_idx;
2404 }
2405 return INVALID_HW_RING_ID;
2406 }
2407
bnxt_get_force_speed(struct bnxt_link_info * link_info)2408 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2409 {
2410 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2411
2412 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2413 return link_info->force_link_speed2;
2414 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2415 return link_info->force_pam4_link_speed;
2416 return link_info->force_link_speed;
2417 }
2418
bnxt_set_force_speed(struct bnxt_link_info * link_info)2419 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2420 {
2421 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2422
2423 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2424 link_info->req_link_speed = link_info->force_link_speed2;
2425 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2426 switch (link_info->req_link_speed) {
2427 case BNXT_LINK_SPEED_50GB_PAM4:
2428 case BNXT_LINK_SPEED_100GB_PAM4:
2429 case BNXT_LINK_SPEED_200GB_PAM4:
2430 case BNXT_LINK_SPEED_400GB_PAM4:
2431 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2432 break;
2433 case BNXT_LINK_SPEED_100GB_PAM4_112:
2434 case BNXT_LINK_SPEED_200GB_PAM4_112:
2435 case BNXT_LINK_SPEED_400GB_PAM4_112:
2436 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2437 break;
2438 default:
2439 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2440 }
2441 return;
2442 }
2443 link_info->req_link_speed = link_info->force_link_speed;
2444 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2445 if (link_info->force_pam4_link_speed) {
2446 link_info->req_link_speed = link_info->force_pam4_link_speed;
2447 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2448 }
2449 }
2450
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2451 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2452 {
2453 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2454
2455 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2456 link_info->advertising = link_info->auto_link_speeds2;
2457 return;
2458 }
2459 link_info->advertising = link_info->auto_link_speeds;
2460 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2461 }
2462
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2463 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2464 {
2465 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2466
2467 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2468 if (link_info->req_link_speed != link_info->force_link_speed2)
2469 return true;
2470 return false;
2471 }
2472 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2473 link_info->req_link_speed != link_info->force_link_speed)
2474 return true;
2475 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2476 link_info->req_link_speed != link_info->force_pam4_link_speed)
2477 return true;
2478 return false;
2479 }
2480
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2481 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2482 {
2483 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2484
2485 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2486 if (link_info->advertising != link_info->auto_link_speeds2)
2487 return true;
2488 return false;
2489 }
2490 if (link_info->advertising != link_info->auto_link_speeds ||
2491 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2492 return true;
2493 return false;
2494 }
2495
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2496 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2497 {
2498 u32 flags = bp->ctx->ctx_arr[type].flags;
2499
2500 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2501 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2502 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2503 }
2504
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2505 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2506 {
2507 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2508 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2509 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2510 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2511 struct bnxt_bs_trace_info *bs_trace;
2512 int last_pg;
2513
2514 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2515 return;
2516
2517 mem_size = ctxm->max_entries * ctxm->entry_size;
2518 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2519 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2520
2521 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2522 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2523
2524 rmem = &ctx_pg[0].ring_mem;
2525 bs_trace = &bp->bs_trace[trace_type];
2526 bs_trace->ctx_type = ctxm->type;
2527 bs_trace->trace_type = trace_type;
2528 if (pages > MAX_CTX_PAGES) {
2529 int last_pg_dir = rmem->nr_pages - 1;
2530
2531 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2532 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2533 } else {
2534 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2535 }
2536 bs_trace->magic_byte += magic_byte_offset;
2537 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2538 }
2539
2540 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2541 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2542 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2543
2544 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2545 (((data2) & \
2546 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2547 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2548
2549 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2550 ((data2) & \
2551 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2552
2553 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2554 (((data2) & \
2555 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2556 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2557
2558 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2559 ((data1) & \
2560 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2561
2562 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2563 (((data1) & \
2564 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2565 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2566
2567 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2568 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2569 {
2570 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2571
2572 switch (err_type) {
2573 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2574 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2575 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2576 break;
2577 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2578 netdev_warn(bp->dev, "Pause Storm detected!\n");
2579 break;
2580 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2581 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2582 break;
2583 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2584 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2585 char *threshold_type;
2586 bool notify = false;
2587 char *dir_str;
2588
2589 switch (type) {
2590 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2591 threshold_type = "warning";
2592 break;
2593 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2594 threshold_type = "critical";
2595 break;
2596 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2597 threshold_type = "fatal";
2598 break;
2599 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2600 threshold_type = "shutdown";
2601 break;
2602 default:
2603 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2604 return false;
2605 }
2606 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2607 dir_str = "above";
2608 notify = true;
2609 } else {
2610 dir_str = "below";
2611 }
2612 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2613 dir_str, threshold_type);
2614 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2615 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2616 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2617 if (notify) {
2618 bp->thermal_threshold_type = type;
2619 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2620 return true;
2621 }
2622 return false;
2623 }
2624 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2625 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2626 break;
2627 default:
2628 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2629 err_type);
2630 break;
2631 }
2632 return false;
2633 }
2634
2635 #define BNXT_GET_EVENT_PORT(data) \
2636 ((data) & \
2637 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2638
2639 #define BNXT_EVENT_RING_TYPE(data2) \
2640 ((data2) & \
2641 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2642
2643 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2644 (BNXT_EVENT_RING_TYPE(data2) == \
2645 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2646
2647 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2648 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2649 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2650
2651 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2652 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2653 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2654
2655 #define BNXT_PHC_BITS 48
2656
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2657 static int bnxt_async_event_process(struct bnxt *bp,
2658 struct hwrm_async_event_cmpl *cmpl)
2659 {
2660 u16 event_id = le16_to_cpu(cmpl->event_id);
2661 u32 data1 = le32_to_cpu(cmpl->event_data1);
2662 u32 data2 = le32_to_cpu(cmpl->event_data2);
2663
2664 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2665 event_id, data1, data2);
2666
2667 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2668 switch (event_id) {
2669 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2670 struct bnxt_link_info *link_info = &bp->link_info;
2671
2672 if (BNXT_VF(bp))
2673 goto async_event_process_exit;
2674
2675 /* print unsupported speed warning in forced speed mode only */
2676 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2677 (data1 & 0x20000)) {
2678 u16 fw_speed = bnxt_get_force_speed(link_info);
2679 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2680
2681 if (speed != SPEED_UNKNOWN)
2682 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2683 speed);
2684 }
2685 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2686 }
2687 fallthrough;
2688 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2689 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2690 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2691 fallthrough;
2692 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2693 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2694 break;
2695 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2696 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2697 break;
2698 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2699 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2700
2701 if (BNXT_VF(bp))
2702 break;
2703
2704 if (bp->pf.port_id != port_id)
2705 break;
2706
2707 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2708 break;
2709 }
2710 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2711 if (BNXT_PF(bp))
2712 goto async_event_process_exit;
2713 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2714 break;
2715 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2716 char *type_str = "Solicited";
2717
2718 if (!bp->fw_health)
2719 goto async_event_process_exit;
2720
2721 bp->fw_reset_timestamp = jiffies;
2722 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2723 if (!bp->fw_reset_min_dsecs)
2724 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2725 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2726 if (!bp->fw_reset_max_dsecs)
2727 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2728 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2729 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2730 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2731 type_str = "Fatal";
2732 bp->fw_health->fatalities++;
2733 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2734 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2735 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2736 type_str = "Non-fatal";
2737 bp->fw_health->survivals++;
2738 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2739 }
2740 netif_warn(bp, hw, bp->dev,
2741 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2742 type_str, data1, data2,
2743 bp->fw_reset_min_dsecs * 100,
2744 bp->fw_reset_max_dsecs * 100);
2745 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2746 break;
2747 }
2748 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2749 struct bnxt_fw_health *fw_health = bp->fw_health;
2750 char *status_desc = "healthy";
2751 u32 status;
2752
2753 if (!fw_health)
2754 goto async_event_process_exit;
2755
2756 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2757 fw_health->enabled = false;
2758 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2759 break;
2760 }
2761 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2762 fw_health->tmr_multiplier =
2763 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2764 bp->current_interval * 10);
2765 fw_health->tmr_counter = fw_health->tmr_multiplier;
2766 if (!fw_health->enabled)
2767 fw_health->last_fw_heartbeat =
2768 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2769 fw_health->last_fw_reset_cnt =
2770 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2771 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2772 if (status != BNXT_FW_STATUS_HEALTHY)
2773 status_desc = "unhealthy";
2774 netif_info(bp, drv, bp->dev,
2775 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2776 fw_health->primary ? "primary" : "backup", status,
2777 status_desc, fw_health->last_fw_reset_cnt);
2778 if (!fw_health->enabled) {
2779 /* Make sure tmr_counter is set and visible to
2780 * bnxt_health_check() before setting enabled to true.
2781 */
2782 smp_wmb();
2783 fw_health->enabled = true;
2784 }
2785 goto async_event_process_exit;
2786 }
2787 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2788 netif_notice(bp, hw, bp->dev,
2789 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2790 data1, data2);
2791 goto async_event_process_exit;
2792 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2793 struct bnxt_rx_ring_info *rxr;
2794 u16 grp_idx;
2795
2796 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2797 goto async_event_process_exit;
2798
2799 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2800 BNXT_EVENT_RING_TYPE(data2), data1);
2801 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2802 goto async_event_process_exit;
2803
2804 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2805 if (grp_idx == INVALID_HW_RING_ID) {
2806 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2807 data1);
2808 goto async_event_process_exit;
2809 }
2810 rxr = bp->bnapi[grp_idx]->rx_ring;
2811 bnxt_sched_reset_rxr(bp, rxr);
2812 goto async_event_process_exit;
2813 }
2814 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2815 struct bnxt_fw_health *fw_health = bp->fw_health;
2816
2817 netif_notice(bp, hw, bp->dev,
2818 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2819 data1, data2);
2820 if (fw_health) {
2821 fw_health->echo_req_data1 = data1;
2822 fw_health->echo_req_data2 = data2;
2823 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2824 break;
2825 }
2826 goto async_event_process_exit;
2827 }
2828 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2829 bnxt_ptp_pps_event(bp, data1, data2);
2830 goto async_event_process_exit;
2831 }
2832 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2833 if (bnxt_event_error_report(bp, data1, data2))
2834 break;
2835 goto async_event_process_exit;
2836 }
2837 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2838 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2839 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2840 if (BNXT_PTP_USE_RTC(bp)) {
2841 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2842 unsigned long flags;
2843 u64 ns;
2844
2845 if (!ptp)
2846 goto async_event_process_exit;
2847
2848 bnxt_ptp_update_current_time(bp);
2849 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2850 BNXT_PHC_BITS) | ptp->current_time);
2851 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2852 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2853 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2854 }
2855 break;
2856 }
2857 goto async_event_process_exit;
2858 }
2859 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2860 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2861
2862 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2863 goto async_event_process_exit;
2864 }
2865 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2866 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2867 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2868
2869 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2870 goto async_event_process_exit;
2871 }
2872 default:
2873 goto async_event_process_exit;
2874 }
2875 __bnxt_queue_sp_work(bp);
2876 async_event_process_exit:
2877 bnxt_ulp_async_events(bp, cmpl);
2878 return 0;
2879 }
2880
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2881 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2882 {
2883 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2884 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2885 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2886 (struct hwrm_fwd_req_cmpl *)txcmp;
2887
2888 switch (cmpl_type) {
2889 case CMPL_BASE_TYPE_HWRM_DONE:
2890 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2891 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2892 break;
2893
2894 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2895 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2896
2897 if ((vf_id < bp->pf.first_vf_id) ||
2898 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2899 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2900 vf_id);
2901 return -EINVAL;
2902 }
2903
2904 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2905 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2906 break;
2907
2908 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2909 bnxt_async_event_process(bp,
2910 (struct hwrm_async_event_cmpl *)txcmp);
2911 break;
2912
2913 default:
2914 break;
2915 }
2916
2917 return 0;
2918 }
2919
bnxt_vnic_is_active(struct bnxt * bp)2920 static bool bnxt_vnic_is_active(struct bnxt *bp)
2921 {
2922 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2923
2924 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2925 }
2926
bnxt_msix(int irq,void * dev_instance)2927 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2928 {
2929 struct bnxt_napi *bnapi = dev_instance;
2930 struct bnxt *bp = bnapi->bp;
2931 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2932 u32 cons = RING_CMP(cpr->cp_raw_cons);
2933
2934 cpr->event_ctr++;
2935 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2936 napi_schedule(&bnapi->napi);
2937 return IRQ_HANDLED;
2938 }
2939
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2940 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2941 {
2942 u32 raw_cons = cpr->cp_raw_cons;
2943 u16 cons = RING_CMP(raw_cons);
2944 struct tx_cmp *txcmp;
2945
2946 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2947
2948 return TX_CMP_VALID(txcmp, raw_cons);
2949 }
2950
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2951 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2952 int budget)
2953 {
2954 struct bnxt_napi *bnapi = cpr->bnapi;
2955 u32 raw_cons = cpr->cp_raw_cons;
2956 u32 cons;
2957 int rx_pkts = 0;
2958 u8 event = 0;
2959 struct tx_cmp *txcmp;
2960
2961 cpr->has_more_work = 0;
2962 cpr->had_work_done = 1;
2963 while (1) {
2964 u8 cmp_type;
2965 int rc;
2966
2967 cons = RING_CMP(raw_cons);
2968 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2969
2970 if (!TX_CMP_VALID(txcmp, raw_cons))
2971 break;
2972
2973 /* The valid test of the entry must be done first before
2974 * reading any further.
2975 */
2976 dma_rmb();
2977 cmp_type = TX_CMP_TYPE(txcmp);
2978 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2979 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2980 u32 opaque = txcmp->tx_cmp_opaque;
2981 struct bnxt_tx_ring_info *txr;
2982 u16 tx_freed;
2983
2984 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2985 event |= BNXT_TX_CMP_EVENT;
2986 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2987 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2988 else
2989 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2990 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2991 bp->tx_ring_mask;
2992 /* return full budget so NAPI will complete. */
2993 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2994 rx_pkts = budget;
2995 raw_cons = NEXT_RAW_CMP(raw_cons);
2996 if (budget)
2997 cpr->has_more_work = 1;
2998 break;
2999 }
3000 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3001 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3002 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3003 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3004 if (likely(budget))
3005 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3006 else
3007 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3008 &event);
3009 if (likely(rc >= 0))
3010 rx_pkts += rc;
3011 /* Increment rx_pkts when rc is -ENOMEM to count towards
3012 * the NAPI budget. Otherwise, we may potentially loop
3013 * here forever if we consistently cannot allocate
3014 * buffers.
3015 */
3016 else if (rc == -ENOMEM && budget)
3017 rx_pkts++;
3018 else if (rc == -EBUSY) /* partial completion */
3019 break;
3020 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3021 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3022 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3023 bnxt_hwrm_handler(bp, txcmp);
3024 }
3025 raw_cons = NEXT_RAW_CMP(raw_cons);
3026
3027 if (rx_pkts && rx_pkts == budget) {
3028 cpr->has_more_work = 1;
3029 break;
3030 }
3031 }
3032
3033 if (event & BNXT_REDIRECT_EVENT) {
3034 xdp_do_flush();
3035 event &= ~BNXT_REDIRECT_EVENT;
3036 }
3037
3038 if (event & BNXT_TX_EVENT) {
3039 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3040 u16 prod = txr->tx_prod;
3041
3042 /* Sync BD data before updating doorbell */
3043 wmb();
3044
3045 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3046 event &= ~BNXT_TX_EVENT;
3047 }
3048
3049 cpr->cp_raw_cons = raw_cons;
3050 bnapi->events |= event;
3051 return rx_pkts;
3052 }
3053
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3054 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3055 int budget)
3056 {
3057 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3058 bnapi->tx_int(bp, bnapi, budget);
3059
3060 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3061 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3062
3063 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3064 bnapi->events &= ~BNXT_RX_EVENT;
3065 }
3066 if (bnapi->events & BNXT_AGG_EVENT) {
3067 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3068
3069 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3070 bnapi->events &= ~BNXT_AGG_EVENT;
3071 }
3072 }
3073
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3074 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3075 int budget)
3076 {
3077 struct bnxt_napi *bnapi = cpr->bnapi;
3078 int rx_pkts;
3079
3080 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3081
3082 /* ACK completion ring before freeing tx ring and producing new
3083 * buffers in rx/agg rings to prevent overflowing the completion
3084 * ring.
3085 */
3086 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3087
3088 __bnxt_poll_work_done(bp, bnapi, budget);
3089 return rx_pkts;
3090 }
3091
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3092 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3093 {
3094 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3095 struct bnxt *bp = bnapi->bp;
3096 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3097 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3098 struct tx_cmp *txcmp;
3099 struct rx_cmp_ext *rxcmp1;
3100 u32 cp_cons, tmp_raw_cons;
3101 u32 raw_cons = cpr->cp_raw_cons;
3102 bool flush_xdp = false;
3103 u32 rx_pkts = 0;
3104 u8 event = 0;
3105
3106 while (1) {
3107 int rc;
3108
3109 cp_cons = RING_CMP(raw_cons);
3110 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3111
3112 if (!TX_CMP_VALID(txcmp, raw_cons))
3113 break;
3114
3115 /* The valid test of the entry must be done first before
3116 * reading any further.
3117 */
3118 dma_rmb();
3119 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3120 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3121 cp_cons = RING_CMP(tmp_raw_cons);
3122 rxcmp1 = (struct rx_cmp_ext *)
3123 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3124
3125 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3126 break;
3127
3128 /* force an error to recycle the buffer */
3129 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3130 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3131
3132 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3133 if (likely(rc == -EIO) && budget)
3134 rx_pkts++;
3135 else if (rc == -EBUSY) /* partial completion */
3136 break;
3137 if (event & BNXT_REDIRECT_EVENT)
3138 flush_xdp = true;
3139 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3140 CMPL_BASE_TYPE_HWRM_DONE)) {
3141 bnxt_hwrm_handler(bp, txcmp);
3142 } else {
3143 netdev_err(bp->dev,
3144 "Invalid completion received on special ring\n");
3145 }
3146 raw_cons = NEXT_RAW_CMP(raw_cons);
3147
3148 if (rx_pkts == budget)
3149 break;
3150 }
3151
3152 cpr->cp_raw_cons = raw_cons;
3153 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3154 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3155
3156 if (event & BNXT_AGG_EVENT)
3157 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3158 if (flush_xdp)
3159 xdp_do_flush();
3160
3161 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3162 napi_complete_done(napi, rx_pkts);
3163 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3164 }
3165 return rx_pkts;
3166 }
3167
bnxt_poll(struct napi_struct * napi,int budget)3168 static int bnxt_poll(struct napi_struct *napi, int budget)
3169 {
3170 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3171 struct bnxt *bp = bnapi->bp;
3172 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3173 int work_done = 0;
3174
3175 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3176 napi_complete(napi);
3177 return 0;
3178 }
3179 while (1) {
3180 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3181
3182 if (work_done >= budget) {
3183 if (!budget)
3184 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3185 break;
3186 }
3187
3188 if (!bnxt_has_work(bp, cpr)) {
3189 if (napi_complete_done(napi, work_done))
3190 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3191 break;
3192 }
3193 }
3194 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3195 struct dim_sample dim_sample = {};
3196
3197 dim_update_sample(cpr->event_ctr,
3198 cpr->rx_packets,
3199 cpr->rx_bytes,
3200 &dim_sample);
3201 net_dim(&cpr->dim, &dim_sample);
3202 }
3203 return work_done;
3204 }
3205
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3206 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3207 {
3208 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3209 int i, work_done = 0;
3210
3211 for (i = 0; i < cpr->cp_ring_count; i++) {
3212 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3213
3214 if (cpr2->had_nqe_notify) {
3215 work_done += __bnxt_poll_work(bp, cpr2,
3216 budget - work_done);
3217 cpr->has_more_work |= cpr2->has_more_work;
3218 }
3219 }
3220 return work_done;
3221 }
3222
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3223 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3224 u64 dbr_type, int budget)
3225 {
3226 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3227 int i;
3228
3229 for (i = 0; i < cpr->cp_ring_count; i++) {
3230 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3231 struct bnxt_db_info *db;
3232
3233 if (cpr2->had_work_done) {
3234 u32 tgl = 0;
3235
3236 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3237 cpr2->had_nqe_notify = 0;
3238 tgl = cpr2->toggle;
3239 }
3240 db = &cpr2->cp_db;
3241 bnxt_writeq(bp,
3242 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3243 DB_RING_IDX(db, cpr2->cp_raw_cons),
3244 db->doorbell);
3245 cpr2->had_work_done = 0;
3246 }
3247 }
3248 __bnxt_poll_work_done(bp, bnapi, budget);
3249 }
3250
bnxt_poll_p5(struct napi_struct * napi,int budget)3251 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3252 {
3253 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3254 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3255 struct bnxt_cp_ring_info *cpr_rx;
3256 u32 raw_cons = cpr->cp_raw_cons;
3257 struct bnxt *bp = bnapi->bp;
3258 struct nqe_cn *nqcmp;
3259 int work_done = 0;
3260 u32 cons;
3261
3262 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3263 napi_complete(napi);
3264 return 0;
3265 }
3266 if (cpr->has_more_work) {
3267 cpr->has_more_work = 0;
3268 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3269 }
3270 while (1) {
3271 u16 type;
3272
3273 cons = RING_CMP(raw_cons);
3274 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3275
3276 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3277 if (cpr->has_more_work)
3278 break;
3279
3280 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3281 budget);
3282 cpr->cp_raw_cons = raw_cons;
3283 if (napi_complete_done(napi, work_done))
3284 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3285 cpr->cp_raw_cons);
3286 goto poll_done;
3287 }
3288
3289 /* The valid test of the entry must be done first before
3290 * reading any further.
3291 */
3292 dma_rmb();
3293
3294 type = le16_to_cpu(nqcmp->type);
3295 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3296 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3297 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3298 struct bnxt_cp_ring_info *cpr2;
3299
3300 /* No more budget for RX work */
3301 if (budget && work_done >= budget &&
3302 cq_type == BNXT_NQ_HDL_TYPE_RX)
3303 break;
3304
3305 idx = BNXT_NQ_HDL_IDX(idx);
3306 cpr2 = &cpr->cp_ring_arr[idx];
3307 cpr2->had_nqe_notify = 1;
3308 cpr2->toggle = NQE_CN_TOGGLE(type);
3309 work_done += __bnxt_poll_work(bp, cpr2,
3310 budget - work_done);
3311 cpr->has_more_work |= cpr2->has_more_work;
3312 } else {
3313 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3314 }
3315 raw_cons = NEXT_RAW_CMP(raw_cons);
3316 }
3317 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3318 if (raw_cons != cpr->cp_raw_cons) {
3319 cpr->cp_raw_cons = raw_cons;
3320 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3321 }
3322 poll_done:
3323 cpr_rx = &cpr->cp_ring_arr[0];
3324 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3325 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3326 struct dim_sample dim_sample = {};
3327
3328 dim_update_sample(cpr->event_ctr,
3329 cpr_rx->rx_packets,
3330 cpr_rx->rx_bytes,
3331 &dim_sample);
3332 net_dim(&cpr->dim, &dim_sample);
3333 }
3334 return work_done;
3335 }
3336
bnxt_free_tx_skbs(struct bnxt * bp)3337 static void bnxt_free_tx_skbs(struct bnxt *bp)
3338 {
3339 int i, max_idx;
3340 struct pci_dev *pdev = bp->pdev;
3341
3342 if (!bp->tx_ring)
3343 return;
3344
3345 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3346 for (i = 0; i < bp->tx_nr_rings; i++) {
3347 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3348 int j;
3349
3350 if (!txr->tx_buf_ring)
3351 continue;
3352
3353 for (j = 0; j < max_idx;) {
3354 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3355 struct sk_buff *skb;
3356 int k, last;
3357
3358 if (i < bp->tx_nr_rings_xdp &&
3359 tx_buf->action == XDP_REDIRECT) {
3360 dma_unmap_single(&pdev->dev,
3361 dma_unmap_addr(tx_buf, mapping),
3362 dma_unmap_len(tx_buf, len),
3363 DMA_TO_DEVICE);
3364 xdp_return_frame(tx_buf->xdpf);
3365 tx_buf->action = 0;
3366 tx_buf->xdpf = NULL;
3367 j++;
3368 continue;
3369 }
3370
3371 skb = tx_buf->skb;
3372 if (!skb) {
3373 j++;
3374 continue;
3375 }
3376
3377 tx_buf->skb = NULL;
3378
3379 if (tx_buf->is_push) {
3380 dev_kfree_skb(skb);
3381 j += 2;
3382 continue;
3383 }
3384
3385 dma_unmap_single(&pdev->dev,
3386 dma_unmap_addr(tx_buf, mapping),
3387 skb_headlen(skb),
3388 DMA_TO_DEVICE);
3389
3390 last = tx_buf->nr_frags;
3391 j += 2;
3392 for (k = 0; k < last; k++, j++) {
3393 int ring_idx = j & bp->tx_ring_mask;
3394 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3395
3396 tx_buf = &txr->tx_buf_ring[ring_idx];
3397 dma_unmap_page(
3398 &pdev->dev,
3399 dma_unmap_addr(tx_buf, mapping),
3400 skb_frag_size(frag), DMA_TO_DEVICE);
3401 }
3402 dev_kfree_skb(skb);
3403 }
3404 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3405 }
3406 }
3407
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3408 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3409 {
3410 int i, max_idx;
3411
3412 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3413
3414 for (i = 0; i < max_idx; i++) {
3415 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3416 void *data = rx_buf->data;
3417
3418 if (!data)
3419 continue;
3420
3421 rx_buf->data = NULL;
3422 if (BNXT_RX_PAGE_MODE(bp))
3423 page_pool_recycle_direct(rxr->page_pool, data);
3424 else
3425 page_pool_free_va(rxr->head_pool, data, true);
3426 }
3427 }
3428
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3429 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3430 {
3431 int i, max_idx;
3432
3433 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3434
3435 for (i = 0; i < max_idx; i++) {
3436 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3437 struct page *page = rx_agg_buf->page;
3438
3439 if (!page)
3440 continue;
3441
3442 rx_agg_buf->page = NULL;
3443 __clear_bit(i, rxr->rx_agg_bmap);
3444
3445 page_pool_recycle_direct(rxr->page_pool, page);
3446 }
3447 }
3448
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3449 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3450 struct bnxt_rx_ring_info *rxr)
3451 {
3452 int i;
3453
3454 for (i = 0; i < bp->max_tpa; i++) {
3455 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3456 u8 *data = tpa_info->data;
3457
3458 if (!data)
3459 continue;
3460
3461 tpa_info->data = NULL;
3462 page_pool_free_va(rxr->head_pool, data, false);
3463 }
3464 }
3465
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3466 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3467 struct bnxt_rx_ring_info *rxr)
3468 {
3469 struct bnxt_tpa_idx_map *map;
3470
3471 if (!rxr->rx_tpa)
3472 goto skip_rx_tpa_free;
3473
3474 bnxt_free_one_tpa_info_data(bp, rxr);
3475
3476 skip_rx_tpa_free:
3477 if (!rxr->rx_buf_ring)
3478 goto skip_rx_buf_free;
3479
3480 bnxt_free_one_rx_ring(bp, rxr);
3481
3482 skip_rx_buf_free:
3483 if (!rxr->rx_agg_ring)
3484 goto skip_rx_agg_free;
3485
3486 bnxt_free_one_rx_agg_ring(bp, rxr);
3487
3488 skip_rx_agg_free:
3489 map = rxr->rx_tpa_idx_map;
3490 if (map)
3491 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3492 }
3493
bnxt_free_rx_skbs(struct bnxt * bp)3494 static void bnxt_free_rx_skbs(struct bnxt *bp)
3495 {
3496 int i;
3497
3498 if (!bp->rx_ring)
3499 return;
3500
3501 for (i = 0; i < bp->rx_nr_rings; i++)
3502 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3503 }
3504
bnxt_free_skbs(struct bnxt * bp)3505 static void bnxt_free_skbs(struct bnxt *bp)
3506 {
3507 bnxt_free_tx_skbs(bp);
3508 bnxt_free_rx_skbs(bp);
3509 }
3510
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3511 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3512 {
3513 u8 init_val = ctxm->init_value;
3514 u16 offset = ctxm->init_offset;
3515 u8 *p2 = p;
3516 int i;
3517
3518 if (!init_val)
3519 return;
3520 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3521 memset(p, init_val, len);
3522 return;
3523 }
3524 for (i = 0; i < len; i += ctxm->entry_size)
3525 *(p2 + i + offset) = init_val;
3526 }
3527
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3528 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3529 void *buf, size_t offset, size_t head,
3530 size_t tail)
3531 {
3532 int i, head_page, start_idx, source_offset;
3533 size_t len, rem_len, total_len, max_bytes;
3534
3535 head_page = head / rmem->page_size;
3536 source_offset = head % rmem->page_size;
3537 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3538 if (!total_len)
3539 total_len = MAX_CTX_BYTES;
3540 start_idx = head_page % MAX_CTX_PAGES;
3541 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3542 source_offset;
3543 total_len = min(total_len, max_bytes);
3544 rem_len = total_len;
3545
3546 for (i = start_idx; rem_len; i++, source_offset = 0) {
3547 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3548 if (buf)
3549 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3550 len);
3551 offset += len;
3552 rem_len -= len;
3553 }
3554 return total_len;
3555 }
3556
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3557 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3558 {
3559 struct pci_dev *pdev = bp->pdev;
3560 int i;
3561
3562 if (!rmem->pg_arr)
3563 goto skip_pages;
3564
3565 for (i = 0; i < rmem->nr_pages; i++) {
3566 if (!rmem->pg_arr[i])
3567 continue;
3568
3569 dma_free_coherent(&pdev->dev, rmem->page_size,
3570 rmem->pg_arr[i], rmem->dma_arr[i]);
3571
3572 rmem->pg_arr[i] = NULL;
3573 }
3574 skip_pages:
3575 if (rmem->pg_tbl) {
3576 size_t pg_tbl_size = rmem->nr_pages * 8;
3577
3578 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3579 pg_tbl_size = rmem->page_size;
3580 dma_free_coherent(&pdev->dev, pg_tbl_size,
3581 rmem->pg_tbl, rmem->pg_tbl_map);
3582 rmem->pg_tbl = NULL;
3583 }
3584 if (rmem->vmem_size && *rmem->vmem) {
3585 vfree(*rmem->vmem);
3586 *rmem->vmem = NULL;
3587 }
3588 }
3589
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3590 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3591 {
3592 struct pci_dev *pdev = bp->pdev;
3593 u64 valid_bit = 0;
3594 int i;
3595
3596 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3597 valid_bit = PTU_PTE_VALID;
3598 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3599 size_t pg_tbl_size = rmem->nr_pages * 8;
3600
3601 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3602 pg_tbl_size = rmem->page_size;
3603 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3604 &rmem->pg_tbl_map,
3605 GFP_KERNEL);
3606 if (!rmem->pg_tbl)
3607 return -ENOMEM;
3608 }
3609
3610 for (i = 0; i < rmem->nr_pages; i++) {
3611 u64 extra_bits = valid_bit;
3612
3613 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3614 rmem->page_size,
3615 &rmem->dma_arr[i],
3616 GFP_KERNEL);
3617 if (!rmem->pg_arr[i])
3618 return -ENOMEM;
3619
3620 if (rmem->ctx_mem)
3621 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3622 rmem->page_size);
3623 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3624 if (i == rmem->nr_pages - 2 &&
3625 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3626 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3627 else if (i == rmem->nr_pages - 1 &&
3628 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3629 extra_bits |= PTU_PTE_LAST;
3630 rmem->pg_tbl[i] =
3631 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3632 }
3633 }
3634
3635 if (rmem->vmem_size) {
3636 *rmem->vmem = vzalloc(rmem->vmem_size);
3637 if (!(*rmem->vmem))
3638 return -ENOMEM;
3639 }
3640 return 0;
3641 }
3642
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3643 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3644 struct bnxt_rx_ring_info *rxr)
3645 {
3646 int i;
3647
3648 kfree(rxr->rx_tpa_idx_map);
3649 rxr->rx_tpa_idx_map = NULL;
3650 if (rxr->rx_tpa) {
3651 for (i = 0; i < bp->max_tpa; i++) {
3652 kfree(rxr->rx_tpa[i].agg_arr);
3653 rxr->rx_tpa[i].agg_arr = NULL;
3654 }
3655 }
3656 kfree(rxr->rx_tpa);
3657 rxr->rx_tpa = NULL;
3658 }
3659
bnxt_free_tpa_info(struct bnxt * bp)3660 static void bnxt_free_tpa_info(struct bnxt *bp)
3661 {
3662 int i;
3663
3664 for (i = 0; i < bp->rx_nr_rings; i++) {
3665 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3666
3667 bnxt_free_one_tpa_info(bp, rxr);
3668 }
3669 }
3670
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3671 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3672 struct bnxt_rx_ring_info *rxr)
3673 {
3674 struct rx_agg_cmp *agg;
3675 int i;
3676
3677 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3678 GFP_KERNEL);
3679 if (!rxr->rx_tpa)
3680 return -ENOMEM;
3681
3682 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3683 return 0;
3684 for (i = 0; i < bp->max_tpa; i++) {
3685 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3686 if (!agg)
3687 return -ENOMEM;
3688 rxr->rx_tpa[i].agg_arr = agg;
3689 }
3690 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3691 GFP_KERNEL);
3692 if (!rxr->rx_tpa_idx_map)
3693 return -ENOMEM;
3694
3695 return 0;
3696 }
3697
bnxt_alloc_tpa_info(struct bnxt * bp)3698 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3699 {
3700 int i, rc;
3701
3702 bp->max_tpa = MAX_TPA;
3703 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3704 if (!bp->max_tpa_v2)
3705 return 0;
3706 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3707 }
3708
3709 for (i = 0; i < bp->rx_nr_rings; i++) {
3710 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3711
3712 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3713 if (rc)
3714 return rc;
3715 }
3716 return 0;
3717 }
3718
bnxt_free_rx_rings(struct bnxt * bp)3719 static void bnxt_free_rx_rings(struct bnxt *bp)
3720 {
3721 int i;
3722
3723 if (!bp->rx_ring)
3724 return;
3725
3726 bnxt_free_tpa_info(bp);
3727 for (i = 0; i < bp->rx_nr_rings; i++) {
3728 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3729 struct bnxt_ring_struct *ring;
3730
3731 if (rxr->xdp_prog)
3732 bpf_prog_put(rxr->xdp_prog);
3733
3734 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3735 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3736
3737 page_pool_destroy(rxr->page_pool);
3738 if (bnxt_separate_head_pool())
3739 page_pool_destroy(rxr->head_pool);
3740 rxr->page_pool = rxr->head_pool = NULL;
3741
3742 kfree(rxr->rx_agg_bmap);
3743 rxr->rx_agg_bmap = NULL;
3744
3745 ring = &rxr->rx_ring_struct;
3746 bnxt_free_ring(bp, &ring->ring_mem);
3747
3748 ring = &rxr->rx_agg_ring_struct;
3749 bnxt_free_ring(bp, &ring->ring_mem);
3750 }
3751 }
3752
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3753 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3754 struct bnxt_rx_ring_info *rxr,
3755 int numa_node)
3756 {
3757 struct page_pool_params pp = { 0 };
3758 struct page_pool *pool;
3759
3760 pp.pool_size = bp->rx_agg_ring_size;
3761 if (BNXT_RX_PAGE_MODE(bp))
3762 pp.pool_size += bp->rx_ring_size;
3763 pp.nid = numa_node;
3764 pp.napi = &rxr->bnapi->napi;
3765 pp.netdev = bp->dev;
3766 pp.dev = &bp->pdev->dev;
3767 pp.dma_dir = bp->rx_dir;
3768 pp.max_len = PAGE_SIZE;
3769 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3770
3771 pool = page_pool_create(&pp);
3772 if (IS_ERR(pool))
3773 return PTR_ERR(pool);
3774 rxr->page_pool = pool;
3775
3776 if (bnxt_separate_head_pool()) {
3777 pp.pool_size = max(bp->rx_ring_size, 1024);
3778 pool = page_pool_create(&pp);
3779 if (IS_ERR(pool))
3780 goto err_destroy_pp;
3781 }
3782 rxr->head_pool = pool;
3783
3784 return 0;
3785
3786 err_destroy_pp:
3787 page_pool_destroy(rxr->page_pool);
3788 rxr->page_pool = NULL;
3789 return PTR_ERR(pool);
3790 }
3791
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3792 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3793 {
3794 u16 mem_size;
3795
3796 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3797 mem_size = rxr->rx_agg_bmap_size / 8;
3798 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3799 if (!rxr->rx_agg_bmap)
3800 return -ENOMEM;
3801
3802 return 0;
3803 }
3804
bnxt_alloc_rx_rings(struct bnxt * bp)3805 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3806 {
3807 int numa_node = dev_to_node(&bp->pdev->dev);
3808 int i, rc = 0, agg_rings = 0, cpu;
3809
3810 if (!bp->rx_ring)
3811 return -ENOMEM;
3812
3813 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3814 agg_rings = 1;
3815
3816 for (i = 0; i < bp->rx_nr_rings; i++) {
3817 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3818 struct bnxt_ring_struct *ring;
3819 int cpu_node;
3820
3821 ring = &rxr->rx_ring_struct;
3822
3823 cpu = cpumask_local_spread(i, numa_node);
3824 cpu_node = cpu_to_node(cpu);
3825 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3826 i, cpu_node);
3827 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3828 if (rc)
3829 return rc;
3830
3831 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3832 if (rc < 0)
3833 return rc;
3834
3835 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3836 MEM_TYPE_PAGE_POOL,
3837 rxr->page_pool);
3838 if (rc) {
3839 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3840 return rc;
3841 }
3842
3843 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3844 if (rc)
3845 return rc;
3846
3847 ring->grp_idx = i;
3848 if (agg_rings) {
3849 ring = &rxr->rx_agg_ring_struct;
3850 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3851 if (rc)
3852 return rc;
3853
3854 ring->grp_idx = i;
3855 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3856 if (rc)
3857 return rc;
3858 }
3859 }
3860 if (bp->flags & BNXT_FLAG_TPA)
3861 rc = bnxt_alloc_tpa_info(bp);
3862 return rc;
3863 }
3864
bnxt_free_tx_rings(struct bnxt * bp)3865 static void bnxt_free_tx_rings(struct bnxt *bp)
3866 {
3867 int i;
3868 struct pci_dev *pdev = bp->pdev;
3869
3870 if (!bp->tx_ring)
3871 return;
3872
3873 for (i = 0; i < bp->tx_nr_rings; i++) {
3874 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3875 struct bnxt_ring_struct *ring;
3876
3877 if (txr->tx_push) {
3878 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3879 txr->tx_push, txr->tx_push_mapping);
3880 txr->tx_push = NULL;
3881 }
3882
3883 ring = &txr->tx_ring_struct;
3884
3885 bnxt_free_ring(bp, &ring->ring_mem);
3886 }
3887 }
3888
3889 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3890 ((tc) * (bp)->tx_nr_rings_per_tc)
3891
3892 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3893 ((tx) % (bp)->tx_nr_rings_per_tc)
3894
3895 #define BNXT_RING_TO_TC(bp, tx) \
3896 ((tx) / (bp)->tx_nr_rings_per_tc)
3897
bnxt_alloc_tx_rings(struct bnxt * bp)3898 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3899 {
3900 int i, j, rc;
3901 struct pci_dev *pdev = bp->pdev;
3902
3903 bp->tx_push_size = 0;
3904 if (bp->tx_push_thresh) {
3905 int push_size;
3906
3907 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3908 bp->tx_push_thresh);
3909
3910 if (push_size > 256) {
3911 push_size = 0;
3912 bp->tx_push_thresh = 0;
3913 }
3914
3915 bp->tx_push_size = push_size;
3916 }
3917
3918 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3919 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3920 struct bnxt_ring_struct *ring;
3921 u8 qidx;
3922
3923 ring = &txr->tx_ring_struct;
3924
3925 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3926 if (rc)
3927 return rc;
3928
3929 ring->grp_idx = txr->bnapi->index;
3930 if (bp->tx_push_size) {
3931 dma_addr_t mapping;
3932
3933 /* One pre-allocated DMA buffer to backup
3934 * TX push operation
3935 */
3936 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3937 bp->tx_push_size,
3938 &txr->tx_push_mapping,
3939 GFP_KERNEL);
3940
3941 if (!txr->tx_push)
3942 return -ENOMEM;
3943
3944 mapping = txr->tx_push_mapping +
3945 sizeof(struct tx_push_bd);
3946 txr->data_mapping = cpu_to_le64(mapping);
3947 }
3948 qidx = bp->tc_to_qidx[j];
3949 ring->queue_id = bp->q_info[qidx].queue_id;
3950 spin_lock_init(&txr->xdp_tx_lock);
3951 if (i < bp->tx_nr_rings_xdp)
3952 continue;
3953 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3954 j++;
3955 }
3956 return 0;
3957 }
3958
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)3959 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3960 {
3961 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3962
3963 kfree(cpr->cp_desc_ring);
3964 cpr->cp_desc_ring = NULL;
3965 ring->ring_mem.pg_arr = NULL;
3966 kfree(cpr->cp_desc_mapping);
3967 cpr->cp_desc_mapping = NULL;
3968 ring->ring_mem.dma_arr = NULL;
3969 }
3970
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)3971 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3972 {
3973 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3974 if (!cpr->cp_desc_ring)
3975 return -ENOMEM;
3976 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3977 GFP_KERNEL);
3978 if (!cpr->cp_desc_mapping)
3979 return -ENOMEM;
3980 return 0;
3981 }
3982
bnxt_free_all_cp_arrays(struct bnxt * bp)3983 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3984 {
3985 int i;
3986
3987 if (!bp->bnapi)
3988 return;
3989 for (i = 0; i < bp->cp_nr_rings; i++) {
3990 struct bnxt_napi *bnapi = bp->bnapi[i];
3991
3992 if (!bnapi)
3993 continue;
3994 bnxt_free_cp_arrays(&bnapi->cp_ring);
3995 }
3996 }
3997
bnxt_alloc_all_cp_arrays(struct bnxt * bp)3998 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3999 {
4000 int i, n = bp->cp_nr_pages;
4001
4002 for (i = 0; i < bp->cp_nr_rings; i++) {
4003 struct bnxt_napi *bnapi = bp->bnapi[i];
4004 int rc;
4005
4006 if (!bnapi)
4007 continue;
4008 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4009 if (rc)
4010 return rc;
4011 }
4012 return 0;
4013 }
4014
bnxt_free_cp_rings(struct bnxt * bp)4015 static void bnxt_free_cp_rings(struct bnxt *bp)
4016 {
4017 int i;
4018
4019 if (!bp->bnapi)
4020 return;
4021
4022 for (i = 0; i < bp->cp_nr_rings; i++) {
4023 struct bnxt_napi *bnapi = bp->bnapi[i];
4024 struct bnxt_cp_ring_info *cpr;
4025 struct bnxt_ring_struct *ring;
4026 int j;
4027
4028 if (!bnapi)
4029 continue;
4030
4031 cpr = &bnapi->cp_ring;
4032 ring = &cpr->cp_ring_struct;
4033
4034 bnxt_free_ring(bp, &ring->ring_mem);
4035
4036 if (!cpr->cp_ring_arr)
4037 continue;
4038
4039 for (j = 0; j < cpr->cp_ring_count; j++) {
4040 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4041
4042 ring = &cpr2->cp_ring_struct;
4043 bnxt_free_ring(bp, &ring->ring_mem);
4044 bnxt_free_cp_arrays(cpr2);
4045 }
4046 kfree(cpr->cp_ring_arr);
4047 cpr->cp_ring_arr = NULL;
4048 cpr->cp_ring_count = 0;
4049 }
4050 }
4051
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4052 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4053 struct bnxt_cp_ring_info *cpr)
4054 {
4055 struct bnxt_ring_mem_info *rmem;
4056 struct bnxt_ring_struct *ring;
4057 int rc;
4058
4059 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4060 if (rc) {
4061 bnxt_free_cp_arrays(cpr);
4062 return -ENOMEM;
4063 }
4064 ring = &cpr->cp_ring_struct;
4065 rmem = &ring->ring_mem;
4066 rmem->nr_pages = bp->cp_nr_pages;
4067 rmem->page_size = HW_CMPD_RING_SIZE;
4068 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4069 rmem->dma_arr = cpr->cp_desc_mapping;
4070 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4071 rc = bnxt_alloc_ring(bp, rmem);
4072 if (rc) {
4073 bnxt_free_ring(bp, rmem);
4074 bnxt_free_cp_arrays(cpr);
4075 }
4076 return rc;
4077 }
4078
bnxt_alloc_cp_rings(struct bnxt * bp)4079 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4080 {
4081 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4082 int i, j, rc, ulp_msix;
4083 int tcs = bp->num_tc;
4084
4085 if (!tcs)
4086 tcs = 1;
4087 ulp_msix = bnxt_get_ulp_msix_num(bp);
4088 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4089 struct bnxt_napi *bnapi = bp->bnapi[i];
4090 struct bnxt_cp_ring_info *cpr, *cpr2;
4091 struct bnxt_ring_struct *ring;
4092 int cp_count = 0, k;
4093 int rx = 0, tx = 0;
4094
4095 if (!bnapi)
4096 continue;
4097
4098 cpr = &bnapi->cp_ring;
4099 cpr->bnapi = bnapi;
4100 ring = &cpr->cp_ring_struct;
4101
4102 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4103 if (rc)
4104 return rc;
4105
4106 ring->map_idx = ulp_msix + i;
4107
4108 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4109 continue;
4110
4111 if (i < bp->rx_nr_rings) {
4112 cp_count++;
4113 rx = 1;
4114 }
4115 if (i < bp->tx_nr_rings_xdp) {
4116 cp_count++;
4117 tx = 1;
4118 } else if ((sh && i < bp->tx_nr_rings) ||
4119 (!sh && i >= bp->rx_nr_rings)) {
4120 cp_count += tcs;
4121 tx = 1;
4122 }
4123
4124 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4125 GFP_KERNEL);
4126 if (!cpr->cp_ring_arr)
4127 return -ENOMEM;
4128 cpr->cp_ring_count = cp_count;
4129
4130 for (k = 0; k < cp_count; k++) {
4131 cpr2 = &cpr->cp_ring_arr[k];
4132 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4133 if (rc)
4134 return rc;
4135 cpr2->bnapi = bnapi;
4136 cpr2->sw_stats = cpr->sw_stats;
4137 cpr2->cp_idx = k;
4138 if (!k && rx) {
4139 bp->rx_ring[i].rx_cpr = cpr2;
4140 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4141 } else {
4142 int n, tc = k - rx;
4143
4144 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4145 bp->tx_ring[n].tx_cpr = cpr2;
4146 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4147 }
4148 }
4149 if (tx)
4150 j++;
4151 }
4152 return 0;
4153 }
4154
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4155 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4156 struct bnxt_rx_ring_info *rxr)
4157 {
4158 struct bnxt_ring_mem_info *rmem;
4159 struct bnxt_ring_struct *ring;
4160
4161 ring = &rxr->rx_ring_struct;
4162 rmem = &ring->ring_mem;
4163 rmem->nr_pages = bp->rx_nr_pages;
4164 rmem->page_size = HW_RXBD_RING_SIZE;
4165 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4166 rmem->dma_arr = rxr->rx_desc_mapping;
4167 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4168 rmem->vmem = (void **)&rxr->rx_buf_ring;
4169
4170 ring = &rxr->rx_agg_ring_struct;
4171 rmem = &ring->ring_mem;
4172 rmem->nr_pages = bp->rx_agg_nr_pages;
4173 rmem->page_size = HW_RXBD_RING_SIZE;
4174 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4175 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4176 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4177 rmem->vmem = (void **)&rxr->rx_agg_ring;
4178 }
4179
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4180 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4181 struct bnxt_rx_ring_info *rxr)
4182 {
4183 struct bnxt_ring_mem_info *rmem;
4184 struct bnxt_ring_struct *ring;
4185 int i;
4186
4187 rxr->page_pool->p.napi = NULL;
4188 rxr->page_pool = NULL;
4189 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4190
4191 ring = &rxr->rx_ring_struct;
4192 rmem = &ring->ring_mem;
4193 rmem->pg_tbl = NULL;
4194 rmem->pg_tbl_map = 0;
4195 for (i = 0; i < rmem->nr_pages; i++) {
4196 rmem->pg_arr[i] = NULL;
4197 rmem->dma_arr[i] = 0;
4198 }
4199 *rmem->vmem = NULL;
4200
4201 ring = &rxr->rx_agg_ring_struct;
4202 rmem = &ring->ring_mem;
4203 rmem->pg_tbl = NULL;
4204 rmem->pg_tbl_map = 0;
4205 for (i = 0; i < rmem->nr_pages; i++) {
4206 rmem->pg_arr[i] = NULL;
4207 rmem->dma_arr[i] = 0;
4208 }
4209 *rmem->vmem = NULL;
4210 }
4211
bnxt_init_ring_struct(struct bnxt * bp)4212 static void bnxt_init_ring_struct(struct bnxt *bp)
4213 {
4214 int i, j;
4215
4216 for (i = 0; i < bp->cp_nr_rings; i++) {
4217 struct bnxt_napi *bnapi = bp->bnapi[i];
4218 struct bnxt_ring_mem_info *rmem;
4219 struct bnxt_cp_ring_info *cpr;
4220 struct bnxt_rx_ring_info *rxr;
4221 struct bnxt_tx_ring_info *txr;
4222 struct bnxt_ring_struct *ring;
4223
4224 if (!bnapi)
4225 continue;
4226
4227 cpr = &bnapi->cp_ring;
4228 ring = &cpr->cp_ring_struct;
4229 rmem = &ring->ring_mem;
4230 rmem->nr_pages = bp->cp_nr_pages;
4231 rmem->page_size = HW_CMPD_RING_SIZE;
4232 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4233 rmem->dma_arr = cpr->cp_desc_mapping;
4234 rmem->vmem_size = 0;
4235
4236 rxr = bnapi->rx_ring;
4237 if (!rxr)
4238 goto skip_rx;
4239
4240 ring = &rxr->rx_ring_struct;
4241 rmem = &ring->ring_mem;
4242 rmem->nr_pages = bp->rx_nr_pages;
4243 rmem->page_size = HW_RXBD_RING_SIZE;
4244 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4245 rmem->dma_arr = rxr->rx_desc_mapping;
4246 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4247 rmem->vmem = (void **)&rxr->rx_buf_ring;
4248
4249 ring = &rxr->rx_agg_ring_struct;
4250 rmem = &ring->ring_mem;
4251 rmem->nr_pages = bp->rx_agg_nr_pages;
4252 rmem->page_size = HW_RXBD_RING_SIZE;
4253 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4254 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4255 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4256 rmem->vmem = (void **)&rxr->rx_agg_ring;
4257
4258 skip_rx:
4259 bnxt_for_each_napi_tx(j, bnapi, txr) {
4260 ring = &txr->tx_ring_struct;
4261 rmem = &ring->ring_mem;
4262 rmem->nr_pages = bp->tx_nr_pages;
4263 rmem->page_size = HW_TXBD_RING_SIZE;
4264 rmem->pg_arr = (void **)txr->tx_desc_ring;
4265 rmem->dma_arr = txr->tx_desc_mapping;
4266 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4267 rmem->vmem = (void **)&txr->tx_buf_ring;
4268 }
4269 }
4270 }
4271
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4272 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4273 {
4274 int i;
4275 u32 prod;
4276 struct rx_bd **rx_buf_ring;
4277
4278 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4279 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4280 int j;
4281 struct rx_bd *rxbd;
4282
4283 rxbd = rx_buf_ring[i];
4284 if (!rxbd)
4285 continue;
4286
4287 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4288 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4289 rxbd->rx_bd_opaque = prod;
4290 }
4291 }
4292 }
4293
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4294 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4295 struct bnxt_rx_ring_info *rxr,
4296 int ring_nr)
4297 {
4298 u32 prod;
4299 int i;
4300
4301 prod = rxr->rx_prod;
4302 for (i = 0; i < bp->rx_ring_size; i++) {
4303 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4304 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4305 ring_nr, i, bp->rx_ring_size);
4306 break;
4307 }
4308 prod = NEXT_RX(prod);
4309 }
4310 rxr->rx_prod = prod;
4311 }
4312
bnxt_alloc_one_rx_ring_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4313 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
4314 struct bnxt_rx_ring_info *rxr,
4315 int ring_nr)
4316 {
4317 u32 prod;
4318 int i;
4319
4320 prod = rxr->rx_agg_prod;
4321 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4322 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4323 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4324 ring_nr, i, bp->rx_ring_size);
4325 break;
4326 }
4327 prod = NEXT_RX_AGG(prod);
4328 }
4329 rxr->rx_agg_prod = prod;
4330 }
4331
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4332 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4333 struct bnxt_rx_ring_info *rxr)
4334 {
4335 dma_addr_t mapping;
4336 u8 *data;
4337 int i;
4338
4339 for (i = 0; i < bp->max_tpa; i++) {
4340 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4341 GFP_KERNEL);
4342 if (!data)
4343 return -ENOMEM;
4344
4345 rxr->rx_tpa[i].data = data;
4346 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4347 rxr->rx_tpa[i].mapping = mapping;
4348 }
4349
4350 return 0;
4351 }
4352
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4353 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4354 {
4355 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4356 int rc;
4357
4358 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4359
4360 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4361 return 0;
4362
4363 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
4364
4365 if (rxr->rx_tpa) {
4366 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4367 if (rc)
4368 return rc;
4369 }
4370 return 0;
4371 }
4372
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4373 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4374 struct bnxt_rx_ring_info *rxr)
4375 {
4376 struct bnxt_ring_struct *ring;
4377 u32 type;
4378
4379 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4380 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4381
4382 if (NET_IP_ALIGN == 2)
4383 type |= RX_BD_FLAGS_SOP;
4384
4385 ring = &rxr->rx_ring_struct;
4386 bnxt_init_rxbd_pages(ring, type);
4387 ring->fw_ring_id = INVALID_HW_RING_ID;
4388 }
4389
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4390 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4391 struct bnxt_rx_ring_info *rxr)
4392 {
4393 struct bnxt_ring_struct *ring;
4394 u32 type;
4395
4396 ring = &rxr->rx_agg_ring_struct;
4397 ring->fw_ring_id = INVALID_HW_RING_ID;
4398 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4399 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4400 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4401
4402 bnxt_init_rxbd_pages(ring, type);
4403 }
4404 }
4405
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4406 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4407 {
4408 struct bnxt_rx_ring_info *rxr;
4409
4410 rxr = &bp->rx_ring[ring_nr];
4411 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4412
4413 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4414 &rxr->bnapi->napi);
4415
4416 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4417 bpf_prog_add(bp->xdp_prog, 1);
4418 rxr->xdp_prog = bp->xdp_prog;
4419 }
4420
4421 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4422
4423 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4424 }
4425
bnxt_init_cp_rings(struct bnxt * bp)4426 static void bnxt_init_cp_rings(struct bnxt *bp)
4427 {
4428 int i, j;
4429
4430 for (i = 0; i < bp->cp_nr_rings; i++) {
4431 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4432 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4433
4434 ring->fw_ring_id = INVALID_HW_RING_ID;
4435 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4436 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4437 if (!cpr->cp_ring_arr)
4438 continue;
4439 for (j = 0; j < cpr->cp_ring_count; j++) {
4440 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4441
4442 ring = &cpr2->cp_ring_struct;
4443 ring->fw_ring_id = INVALID_HW_RING_ID;
4444 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4445 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4446 }
4447 }
4448 }
4449
bnxt_init_rx_rings(struct bnxt * bp)4450 static int bnxt_init_rx_rings(struct bnxt *bp)
4451 {
4452 int i, rc = 0;
4453
4454 if (BNXT_RX_PAGE_MODE(bp)) {
4455 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4456 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4457 } else {
4458 bp->rx_offset = BNXT_RX_OFFSET;
4459 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4460 }
4461
4462 for (i = 0; i < bp->rx_nr_rings; i++) {
4463 rc = bnxt_init_one_rx_ring(bp, i);
4464 if (rc)
4465 break;
4466 }
4467
4468 return rc;
4469 }
4470
bnxt_init_tx_rings(struct bnxt * bp)4471 static int bnxt_init_tx_rings(struct bnxt *bp)
4472 {
4473 u16 i;
4474
4475 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4476 BNXT_MIN_TX_DESC_CNT);
4477
4478 for (i = 0; i < bp->tx_nr_rings; i++) {
4479 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4480 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4481
4482 ring->fw_ring_id = INVALID_HW_RING_ID;
4483
4484 if (i >= bp->tx_nr_rings_xdp)
4485 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4486 NETDEV_QUEUE_TYPE_TX,
4487 &txr->bnapi->napi);
4488 }
4489
4490 return 0;
4491 }
4492
bnxt_free_ring_grps(struct bnxt * bp)4493 static void bnxt_free_ring_grps(struct bnxt *bp)
4494 {
4495 kfree(bp->grp_info);
4496 bp->grp_info = NULL;
4497 }
4498
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4499 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4500 {
4501 int i;
4502
4503 if (irq_re_init) {
4504 bp->grp_info = kcalloc(bp->cp_nr_rings,
4505 sizeof(struct bnxt_ring_grp_info),
4506 GFP_KERNEL);
4507 if (!bp->grp_info)
4508 return -ENOMEM;
4509 }
4510 for (i = 0; i < bp->cp_nr_rings; i++) {
4511 if (irq_re_init)
4512 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4513 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4514 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4515 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4516 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4517 }
4518 return 0;
4519 }
4520
bnxt_free_vnics(struct bnxt * bp)4521 static void bnxt_free_vnics(struct bnxt *bp)
4522 {
4523 kfree(bp->vnic_info);
4524 bp->vnic_info = NULL;
4525 bp->nr_vnics = 0;
4526 }
4527
bnxt_alloc_vnics(struct bnxt * bp)4528 static int bnxt_alloc_vnics(struct bnxt *bp)
4529 {
4530 int num_vnics = 1;
4531
4532 #ifdef CONFIG_RFS_ACCEL
4533 if (bp->flags & BNXT_FLAG_RFS) {
4534 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4535 num_vnics++;
4536 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4537 num_vnics += bp->rx_nr_rings;
4538 }
4539 #endif
4540
4541 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4542 num_vnics++;
4543
4544 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4545 GFP_KERNEL);
4546 if (!bp->vnic_info)
4547 return -ENOMEM;
4548
4549 bp->nr_vnics = num_vnics;
4550 return 0;
4551 }
4552
bnxt_init_vnics(struct bnxt * bp)4553 static void bnxt_init_vnics(struct bnxt *bp)
4554 {
4555 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4556 int i;
4557
4558 for (i = 0; i < bp->nr_vnics; i++) {
4559 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4560 int j;
4561
4562 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4563 vnic->vnic_id = i;
4564 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4565 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4566
4567 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4568
4569 if (bp->vnic_info[i].rss_hash_key) {
4570 if (i == BNXT_VNIC_DEFAULT) {
4571 u8 *key = (void *)vnic->rss_hash_key;
4572 int k;
4573
4574 if (!bp->rss_hash_key_valid &&
4575 !bp->rss_hash_key_updated) {
4576 get_random_bytes(bp->rss_hash_key,
4577 HW_HASH_KEY_SIZE);
4578 bp->rss_hash_key_updated = true;
4579 }
4580
4581 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4582 HW_HASH_KEY_SIZE);
4583
4584 if (!bp->rss_hash_key_updated)
4585 continue;
4586
4587 bp->rss_hash_key_updated = false;
4588 bp->rss_hash_key_valid = true;
4589
4590 bp->toeplitz_prefix = 0;
4591 for (k = 0; k < 8; k++) {
4592 bp->toeplitz_prefix <<= 8;
4593 bp->toeplitz_prefix |= key[k];
4594 }
4595 } else {
4596 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4597 HW_HASH_KEY_SIZE);
4598 }
4599 }
4600 }
4601 }
4602
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4603 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4604 {
4605 int pages;
4606
4607 pages = ring_size / desc_per_pg;
4608
4609 if (!pages)
4610 return 1;
4611
4612 pages++;
4613
4614 while (pages & (pages - 1))
4615 pages++;
4616
4617 return pages;
4618 }
4619
bnxt_set_tpa_flags(struct bnxt * bp)4620 void bnxt_set_tpa_flags(struct bnxt *bp)
4621 {
4622 bp->flags &= ~BNXT_FLAG_TPA;
4623 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4624 return;
4625 if (bp->dev->features & NETIF_F_LRO)
4626 bp->flags |= BNXT_FLAG_LRO;
4627 else if (bp->dev->features & NETIF_F_GRO_HW)
4628 bp->flags |= BNXT_FLAG_GRO;
4629 }
4630
bnxt_init_ring_params(struct bnxt * bp)4631 static void bnxt_init_ring_params(struct bnxt *bp)
4632 {
4633 unsigned int rx_size;
4634
4635 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4636 /* Try to fit 4 chunks into a 4k page */
4637 rx_size = SZ_1K -
4638 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4639 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4640 }
4641
4642 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4643 * be set on entry.
4644 */
bnxt_set_ring_params(struct bnxt * bp)4645 void bnxt_set_ring_params(struct bnxt *bp)
4646 {
4647 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4648 u32 agg_factor = 0, agg_ring_size = 0;
4649
4650 /* 8 for CRC and VLAN */
4651 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4652
4653 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4654 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4655
4656 ring_size = bp->rx_ring_size;
4657 bp->rx_agg_ring_size = 0;
4658 bp->rx_agg_nr_pages = 0;
4659
4660 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4661 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4662
4663 bp->flags &= ~BNXT_FLAG_JUMBO;
4664 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4665 u32 jumbo_factor;
4666
4667 bp->flags |= BNXT_FLAG_JUMBO;
4668 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4669 if (jumbo_factor > agg_factor)
4670 agg_factor = jumbo_factor;
4671 }
4672 if (agg_factor) {
4673 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4674 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4675 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4676 bp->rx_ring_size, ring_size);
4677 bp->rx_ring_size = ring_size;
4678 }
4679 agg_ring_size = ring_size * agg_factor;
4680
4681 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4682 RX_DESC_CNT);
4683 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4684 u32 tmp = agg_ring_size;
4685
4686 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4687 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4688 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4689 tmp, agg_ring_size);
4690 }
4691 bp->rx_agg_ring_size = agg_ring_size;
4692 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4693
4694 if (BNXT_RX_PAGE_MODE(bp)) {
4695 rx_space = PAGE_SIZE;
4696 rx_size = PAGE_SIZE -
4697 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4698 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4699 } else {
4700 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4701 bp->rx_copybreak,
4702 bp->dev->cfg_pending->hds_thresh);
4703 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4704 rx_space = rx_size + NET_SKB_PAD +
4705 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4706 }
4707 }
4708
4709 bp->rx_buf_use_size = rx_size;
4710 bp->rx_buf_size = rx_space;
4711
4712 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4713 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4714
4715 ring_size = bp->tx_ring_size;
4716 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4717 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4718
4719 max_rx_cmpl = bp->rx_ring_size;
4720 /* MAX TPA needs to be added because TPA_START completions are
4721 * immediately recycled, so the TPA completions are not bound by
4722 * the RX ring size.
4723 */
4724 if (bp->flags & BNXT_FLAG_TPA)
4725 max_rx_cmpl += bp->max_tpa;
4726 /* RX and TPA completions are 32-byte, all others are 16-byte */
4727 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4728 bp->cp_ring_size = ring_size;
4729
4730 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4731 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4732 bp->cp_nr_pages = MAX_CP_PAGES;
4733 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4734 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4735 ring_size, bp->cp_ring_size);
4736 }
4737 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4738 bp->cp_ring_mask = bp->cp_bit - 1;
4739 }
4740
4741 /* Changing allocation mode of RX rings.
4742 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4743 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4744 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4745 {
4746 struct net_device *dev = bp->dev;
4747
4748 if (page_mode) {
4749 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4750 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4751
4752 if (bp->xdp_prog->aux->xdp_has_frags)
4753 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4754 else
4755 dev->max_mtu =
4756 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4757 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4758 bp->flags |= BNXT_FLAG_JUMBO;
4759 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4760 } else {
4761 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4762 bp->rx_skb_func = bnxt_rx_page_skb;
4763 }
4764 bp->rx_dir = DMA_BIDIRECTIONAL;
4765 } else {
4766 dev->max_mtu = bp->max_mtu;
4767 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4768 bp->rx_dir = DMA_FROM_DEVICE;
4769 bp->rx_skb_func = bnxt_rx_skb;
4770 }
4771 }
4772
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4773 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4774 {
4775 __bnxt_set_rx_skb_mode(bp, page_mode);
4776
4777 if (!page_mode) {
4778 int rx, tx;
4779
4780 bnxt_get_max_rings(bp, &rx, &tx, true);
4781 if (rx > 1) {
4782 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4783 bp->dev->hw_features |= NETIF_F_LRO;
4784 }
4785 }
4786
4787 /* Update LRO and GRO_HW availability */
4788 netdev_update_features(bp->dev);
4789 }
4790
bnxt_free_vnic_attributes(struct bnxt * bp)4791 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4792 {
4793 int i;
4794 struct bnxt_vnic_info *vnic;
4795 struct pci_dev *pdev = bp->pdev;
4796
4797 if (!bp->vnic_info)
4798 return;
4799
4800 for (i = 0; i < bp->nr_vnics; i++) {
4801 vnic = &bp->vnic_info[i];
4802
4803 kfree(vnic->fw_grp_ids);
4804 vnic->fw_grp_ids = NULL;
4805
4806 kfree(vnic->uc_list);
4807 vnic->uc_list = NULL;
4808
4809 if (vnic->mc_list) {
4810 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4811 vnic->mc_list, vnic->mc_list_mapping);
4812 vnic->mc_list = NULL;
4813 }
4814
4815 if (vnic->rss_table) {
4816 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4817 vnic->rss_table,
4818 vnic->rss_table_dma_addr);
4819 vnic->rss_table = NULL;
4820 }
4821
4822 vnic->rss_hash_key = NULL;
4823 vnic->flags = 0;
4824 }
4825 }
4826
bnxt_alloc_vnic_attributes(struct bnxt * bp)4827 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4828 {
4829 int i, rc = 0, size;
4830 struct bnxt_vnic_info *vnic;
4831 struct pci_dev *pdev = bp->pdev;
4832 int max_rings;
4833
4834 for (i = 0; i < bp->nr_vnics; i++) {
4835 vnic = &bp->vnic_info[i];
4836
4837 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4838 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4839
4840 if (mem_size > 0) {
4841 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4842 if (!vnic->uc_list) {
4843 rc = -ENOMEM;
4844 goto out;
4845 }
4846 }
4847 }
4848
4849 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4850 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4851 vnic->mc_list =
4852 dma_alloc_coherent(&pdev->dev,
4853 vnic->mc_list_size,
4854 &vnic->mc_list_mapping,
4855 GFP_KERNEL);
4856 if (!vnic->mc_list) {
4857 rc = -ENOMEM;
4858 goto out;
4859 }
4860 }
4861
4862 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4863 goto vnic_skip_grps;
4864
4865 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4866 max_rings = bp->rx_nr_rings;
4867 else
4868 max_rings = 1;
4869
4870 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4871 if (!vnic->fw_grp_ids) {
4872 rc = -ENOMEM;
4873 goto out;
4874 }
4875 vnic_skip_grps:
4876 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4877 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4878 continue;
4879
4880 /* Allocate rss table and hash key */
4881 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4882 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4883 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4884
4885 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4886 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4887 vnic->rss_table_size,
4888 &vnic->rss_table_dma_addr,
4889 GFP_KERNEL);
4890 if (!vnic->rss_table) {
4891 rc = -ENOMEM;
4892 goto out;
4893 }
4894
4895 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4896 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4897 }
4898 return 0;
4899
4900 out:
4901 return rc;
4902 }
4903
bnxt_free_hwrm_resources(struct bnxt * bp)4904 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4905 {
4906 struct bnxt_hwrm_wait_token *token;
4907
4908 dma_pool_destroy(bp->hwrm_dma_pool);
4909 bp->hwrm_dma_pool = NULL;
4910
4911 rcu_read_lock();
4912 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4913 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4914 rcu_read_unlock();
4915 }
4916
bnxt_alloc_hwrm_resources(struct bnxt * bp)4917 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4918 {
4919 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4920 BNXT_HWRM_DMA_SIZE,
4921 BNXT_HWRM_DMA_ALIGN, 0);
4922 if (!bp->hwrm_dma_pool)
4923 return -ENOMEM;
4924
4925 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4926
4927 return 0;
4928 }
4929
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)4930 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4931 {
4932 kfree(stats->hw_masks);
4933 stats->hw_masks = NULL;
4934 kfree(stats->sw_stats);
4935 stats->sw_stats = NULL;
4936 if (stats->hw_stats) {
4937 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4938 stats->hw_stats_map);
4939 stats->hw_stats = NULL;
4940 }
4941 }
4942
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)4943 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4944 bool alloc_masks)
4945 {
4946 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4947 &stats->hw_stats_map, GFP_KERNEL);
4948 if (!stats->hw_stats)
4949 return -ENOMEM;
4950
4951 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4952 if (!stats->sw_stats)
4953 goto stats_mem_err;
4954
4955 if (alloc_masks) {
4956 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4957 if (!stats->hw_masks)
4958 goto stats_mem_err;
4959 }
4960 return 0;
4961
4962 stats_mem_err:
4963 bnxt_free_stats_mem(bp, stats);
4964 return -ENOMEM;
4965 }
4966
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)4967 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4968 {
4969 int i;
4970
4971 for (i = 0; i < count; i++)
4972 mask_arr[i] = mask;
4973 }
4974
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)4975 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4976 {
4977 int i;
4978
4979 for (i = 0; i < count; i++)
4980 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4981 }
4982
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)4983 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4984 struct bnxt_stats_mem *stats)
4985 {
4986 struct hwrm_func_qstats_ext_output *resp;
4987 struct hwrm_func_qstats_ext_input *req;
4988 __le64 *hw_masks;
4989 int rc;
4990
4991 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4992 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4993 return -EOPNOTSUPP;
4994
4995 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4996 if (rc)
4997 return rc;
4998
4999 req->fid = cpu_to_le16(0xffff);
5000 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5001
5002 resp = hwrm_req_hold(bp, req);
5003 rc = hwrm_req_send(bp, req);
5004 if (!rc) {
5005 hw_masks = &resp->rx_ucast_pkts;
5006 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5007 }
5008 hwrm_req_drop(bp, req);
5009 return rc;
5010 }
5011
5012 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5013 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5014
bnxt_init_stats(struct bnxt * bp)5015 static void bnxt_init_stats(struct bnxt *bp)
5016 {
5017 struct bnxt_napi *bnapi = bp->bnapi[0];
5018 struct bnxt_cp_ring_info *cpr;
5019 struct bnxt_stats_mem *stats;
5020 __le64 *rx_stats, *tx_stats;
5021 int rc, rx_count, tx_count;
5022 u64 *rx_masks, *tx_masks;
5023 u64 mask;
5024 u8 flags;
5025
5026 cpr = &bnapi->cp_ring;
5027 stats = &cpr->stats;
5028 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5029 if (rc) {
5030 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5031 mask = (1ULL << 48) - 1;
5032 else
5033 mask = -1ULL;
5034 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5035 }
5036 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5037 stats = &bp->port_stats;
5038 rx_stats = stats->hw_stats;
5039 rx_masks = stats->hw_masks;
5040 rx_count = sizeof(struct rx_port_stats) / 8;
5041 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5042 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5043 tx_count = sizeof(struct tx_port_stats) / 8;
5044
5045 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5046 rc = bnxt_hwrm_port_qstats(bp, flags);
5047 if (rc) {
5048 mask = (1ULL << 40) - 1;
5049
5050 bnxt_fill_masks(rx_masks, mask, rx_count);
5051 bnxt_fill_masks(tx_masks, mask, tx_count);
5052 } else {
5053 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5054 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5055 bnxt_hwrm_port_qstats(bp, 0);
5056 }
5057 }
5058 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5059 stats = &bp->rx_port_stats_ext;
5060 rx_stats = stats->hw_stats;
5061 rx_masks = stats->hw_masks;
5062 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5063 stats = &bp->tx_port_stats_ext;
5064 tx_stats = stats->hw_stats;
5065 tx_masks = stats->hw_masks;
5066 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5067
5068 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5069 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5070 if (rc) {
5071 mask = (1ULL << 40) - 1;
5072
5073 bnxt_fill_masks(rx_masks, mask, rx_count);
5074 if (tx_stats)
5075 bnxt_fill_masks(tx_masks, mask, tx_count);
5076 } else {
5077 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5078 if (tx_stats)
5079 bnxt_copy_hw_masks(tx_masks, tx_stats,
5080 tx_count);
5081 bnxt_hwrm_port_qstats_ext(bp, 0);
5082 }
5083 }
5084 }
5085
bnxt_free_port_stats(struct bnxt * bp)5086 static void bnxt_free_port_stats(struct bnxt *bp)
5087 {
5088 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5089 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5090
5091 bnxt_free_stats_mem(bp, &bp->port_stats);
5092 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5093 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5094 }
5095
bnxt_free_ring_stats(struct bnxt * bp)5096 static void bnxt_free_ring_stats(struct bnxt *bp)
5097 {
5098 int i;
5099
5100 if (!bp->bnapi)
5101 return;
5102
5103 for (i = 0; i < bp->cp_nr_rings; i++) {
5104 struct bnxt_napi *bnapi = bp->bnapi[i];
5105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5106
5107 bnxt_free_stats_mem(bp, &cpr->stats);
5108
5109 kfree(cpr->sw_stats);
5110 cpr->sw_stats = NULL;
5111 }
5112 }
5113
bnxt_alloc_stats(struct bnxt * bp)5114 static int bnxt_alloc_stats(struct bnxt *bp)
5115 {
5116 u32 size, i;
5117 int rc;
5118
5119 size = bp->hw_ring_stats_size;
5120
5121 for (i = 0; i < bp->cp_nr_rings; i++) {
5122 struct bnxt_napi *bnapi = bp->bnapi[i];
5123 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5124
5125 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5126 if (!cpr->sw_stats)
5127 return -ENOMEM;
5128
5129 cpr->stats.len = size;
5130 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5131 if (rc)
5132 return rc;
5133
5134 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5135 }
5136
5137 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5138 return 0;
5139
5140 if (bp->port_stats.hw_stats)
5141 goto alloc_ext_stats;
5142
5143 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5144 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5145 if (rc)
5146 return rc;
5147
5148 bp->flags |= BNXT_FLAG_PORT_STATS;
5149
5150 alloc_ext_stats:
5151 /* Display extended statistics only if FW supports it */
5152 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5153 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5154 return 0;
5155
5156 if (bp->rx_port_stats_ext.hw_stats)
5157 goto alloc_tx_ext_stats;
5158
5159 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5160 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5161 /* Extended stats are optional */
5162 if (rc)
5163 return 0;
5164
5165 alloc_tx_ext_stats:
5166 if (bp->tx_port_stats_ext.hw_stats)
5167 return 0;
5168
5169 if (bp->hwrm_spec_code >= 0x10902 ||
5170 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5171 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5172 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5173 /* Extended stats are optional */
5174 if (rc)
5175 return 0;
5176 }
5177 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5178 return 0;
5179 }
5180
bnxt_clear_ring_indices(struct bnxt * bp)5181 static void bnxt_clear_ring_indices(struct bnxt *bp)
5182 {
5183 int i, j;
5184
5185 if (!bp->bnapi)
5186 return;
5187
5188 for (i = 0; i < bp->cp_nr_rings; i++) {
5189 struct bnxt_napi *bnapi = bp->bnapi[i];
5190 struct bnxt_cp_ring_info *cpr;
5191 struct bnxt_rx_ring_info *rxr;
5192 struct bnxt_tx_ring_info *txr;
5193
5194 if (!bnapi)
5195 continue;
5196
5197 cpr = &bnapi->cp_ring;
5198 cpr->cp_raw_cons = 0;
5199
5200 bnxt_for_each_napi_tx(j, bnapi, txr) {
5201 txr->tx_prod = 0;
5202 txr->tx_cons = 0;
5203 txr->tx_hw_cons = 0;
5204 }
5205
5206 rxr = bnapi->rx_ring;
5207 if (rxr) {
5208 rxr->rx_prod = 0;
5209 rxr->rx_agg_prod = 0;
5210 rxr->rx_sw_agg_prod = 0;
5211 rxr->rx_next_cons = 0;
5212 }
5213 bnapi->events = 0;
5214 }
5215 }
5216
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5217 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5218 {
5219 u8 type = fltr->type, flags = fltr->flags;
5220
5221 INIT_LIST_HEAD(&fltr->list);
5222 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5223 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5224 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5225 }
5226
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5227 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5228 {
5229 if (!list_empty(&fltr->list))
5230 list_del_init(&fltr->list);
5231 }
5232
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5233 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5234 {
5235 struct bnxt_filter_base *usr_fltr, *tmp;
5236
5237 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5238 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5239 continue;
5240 bnxt_del_one_usr_fltr(bp, usr_fltr);
5241 }
5242 }
5243
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5244 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5245 {
5246 hlist_del(&fltr->hash);
5247 bnxt_del_one_usr_fltr(bp, fltr);
5248 if (fltr->flags) {
5249 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5250 bp->ntp_fltr_count--;
5251 }
5252 kfree(fltr);
5253 }
5254
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5255 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5256 {
5257 int i;
5258
5259 /* Under rtnl_lock and all our NAPIs have been disabled. It's
5260 * safe to delete the hash table.
5261 */
5262 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5263 struct hlist_head *head;
5264 struct hlist_node *tmp;
5265 struct bnxt_ntuple_filter *fltr;
5266
5267 head = &bp->ntp_fltr_hash_tbl[i];
5268 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5269 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5270 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5271 !list_empty(&fltr->base.list)))
5272 continue;
5273 bnxt_del_fltr(bp, &fltr->base);
5274 }
5275 }
5276 if (!all)
5277 return;
5278
5279 bitmap_free(bp->ntp_fltr_bmap);
5280 bp->ntp_fltr_bmap = NULL;
5281 bp->ntp_fltr_count = 0;
5282 }
5283
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5284 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5285 {
5286 int i, rc = 0;
5287
5288 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5289 return 0;
5290
5291 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5292 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5293
5294 bp->ntp_fltr_count = 0;
5295 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5296
5297 if (!bp->ntp_fltr_bmap)
5298 rc = -ENOMEM;
5299
5300 return rc;
5301 }
5302
bnxt_free_l2_filters(struct bnxt * bp,bool all)5303 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5304 {
5305 int i;
5306
5307 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5308 struct hlist_head *head;
5309 struct hlist_node *tmp;
5310 struct bnxt_l2_filter *fltr;
5311
5312 head = &bp->l2_fltr_hash_tbl[i];
5313 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5314 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5315 !list_empty(&fltr->base.list)))
5316 continue;
5317 bnxt_del_fltr(bp, &fltr->base);
5318 }
5319 }
5320 }
5321
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5322 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5323 {
5324 int i;
5325
5326 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5327 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5328 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5329 }
5330
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5331 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5332 {
5333 bnxt_free_vnic_attributes(bp);
5334 bnxt_free_tx_rings(bp);
5335 bnxt_free_rx_rings(bp);
5336 bnxt_free_cp_rings(bp);
5337 bnxt_free_all_cp_arrays(bp);
5338 bnxt_free_ntp_fltrs(bp, false);
5339 bnxt_free_l2_filters(bp, false);
5340 if (irq_re_init) {
5341 bnxt_free_ring_stats(bp);
5342 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5343 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5344 bnxt_free_port_stats(bp);
5345 bnxt_free_ring_grps(bp);
5346 bnxt_free_vnics(bp);
5347 kfree(bp->tx_ring_map);
5348 bp->tx_ring_map = NULL;
5349 kfree(bp->tx_ring);
5350 bp->tx_ring = NULL;
5351 kfree(bp->rx_ring);
5352 bp->rx_ring = NULL;
5353 kfree(bp->bnapi);
5354 bp->bnapi = NULL;
5355 } else {
5356 bnxt_clear_ring_indices(bp);
5357 }
5358 }
5359
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5360 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5361 {
5362 int i, j, rc, size, arr_size;
5363 void *bnapi;
5364
5365 if (irq_re_init) {
5366 /* Allocate bnapi mem pointer array and mem block for
5367 * all queues
5368 */
5369 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5370 bp->cp_nr_rings);
5371 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5372 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5373 if (!bnapi)
5374 return -ENOMEM;
5375
5376 bp->bnapi = bnapi;
5377 bnapi += arr_size;
5378 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5379 bp->bnapi[i] = bnapi;
5380 bp->bnapi[i]->index = i;
5381 bp->bnapi[i]->bp = bp;
5382 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5383 struct bnxt_cp_ring_info *cpr =
5384 &bp->bnapi[i]->cp_ring;
5385
5386 cpr->cp_ring_struct.ring_mem.flags =
5387 BNXT_RMEM_RING_PTE_FLAG;
5388 }
5389 }
5390
5391 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5392 sizeof(struct bnxt_rx_ring_info),
5393 GFP_KERNEL);
5394 if (!bp->rx_ring)
5395 return -ENOMEM;
5396
5397 for (i = 0; i < bp->rx_nr_rings; i++) {
5398 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5399
5400 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5401 rxr->rx_ring_struct.ring_mem.flags =
5402 BNXT_RMEM_RING_PTE_FLAG;
5403 rxr->rx_agg_ring_struct.ring_mem.flags =
5404 BNXT_RMEM_RING_PTE_FLAG;
5405 } else {
5406 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5407 }
5408 rxr->bnapi = bp->bnapi[i];
5409 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5410 }
5411
5412 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5413 sizeof(struct bnxt_tx_ring_info),
5414 GFP_KERNEL);
5415 if (!bp->tx_ring)
5416 return -ENOMEM;
5417
5418 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5419 GFP_KERNEL);
5420
5421 if (!bp->tx_ring_map)
5422 return -ENOMEM;
5423
5424 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5425 j = 0;
5426 else
5427 j = bp->rx_nr_rings;
5428
5429 for (i = 0; i < bp->tx_nr_rings; i++) {
5430 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5431 struct bnxt_napi *bnapi2;
5432
5433 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5434 txr->tx_ring_struct.ring_mem.flags =
5435 BNXT_RMEM_RING_PTE_FLAG;
5436 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5437 if (i >= bp->tx_nr_rings_xdp) {
5438 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5439
5440 bnapi2 = bp->bnapi[k];
5441 txr->txq_index = i - bp->tx_nr_rings_xdp;
5442 txr->tx_napi_idx =
5443 BNXT_RING_TO_TC(bp, txr->txq_index);
5444 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5445 bnapi2->tx_int = bnxt_tx_int;
5446 } else {
5447 bnapi2 = bp->bnapi[j];
5448 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5449 bnapi2->tx_ring[0] = txr;
5450 bnapi2->tx_int = bnxt_tx_int_xdp;
5451 j++;
5452 }
5453 txr->bnapi = bnapi2;
5454 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5455 txr->tx_cpr = &bnapi2->cp_ring;
5456 }
5457
5458 rc = bnxt_alloc_stats(bp);
5459 if (rc)
5460 goto alloc_mem_err;
5461 bnxt_init_stats(bp);
5462
5463 rc = bnxt_alloc_ntp_fltrs(bp);
5464 if (rc)
5465 goto alloc_mem_err;
5466
5467 rc = bnxt_alloc_vnics(bp);
5468 if (rc)
5469 goto alloc_mem_err;
5470 }
5471
5472 rc = bnxt_alloc_all_cp_arrays(bp);
5473 if (rc)
5474 goto alloc_mem_err;
5475
5476 bnxt_init_ring_struct(bp);
5477
5478 rc = bnxt_alloc_rx_rings(bp);
5479 if (rc)
5480 goto alloc_mem_err;
5481
5482 rc = bnxt_alloc_tx_rings(bp);
5483 if (rc)
5484 goto alloc_mem_err;
5485
5486 rc = bnxt_alloc_cp_rings(bp);
5487 if (rc)
5488 goto alloc_mem_err;
5489
5490 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5491 BNXT_VNIC_MCAST_FLAG |
5492 BNXT_VNIC_UCAST_FLAG;
5493 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5494 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5495 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5496
5497 rc = bnxt_alloc_vnic_attributes(bp);
5498 if (rc)
5499 goto alloc_mem_err;
5500 return 0;
5501
5502 alloc_mem_err:
5503 bnxt_free_mem(bp, true);
5504 return rc;
5505 }
5506
bnxt_disable_int(struct bnxt * bp)5507 static void bnxt_disable_int(struct bnxt *bp)
5508 {
5509 int i;
5510
5511 if (!bp->bnapi)
5512 return;
5513
5514 for (i = 0; i < bp->cp_nr_rings; i++) {
5515 struct bnxt_napi *bnapi = bp->bnapi[i];
5516 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5517 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5518
5519 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5520 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5521 }
5522 }
5523
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5524 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5525 {
5526 struct bnxt_napi *bnapi = bp->bnapi[n];
5527 struct bnxt_cp_ring_info *cpr;
5528
5529 cpr = &bnapi->cp_ring;
5530 return cpr->cp_ring_struct.map_idx;
5531 }
5532
bnxt_disable_int_sync(struct bnxt * bp)5533 static void bnxt_disable_int_sync(struct bnxt *bp)
5534 {
5535 int i;
5536
5537 if (!bp->irq_tbl)
5538 return;
5539
5540 atomic_inc(&bp->intr_sem);
5541
5542 bnxt_disable_int(bp);
5543 for (i = 0; i < bp->cp_nr_rings; i++) {
5544 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5545
5546 synchronize_irq(bp->irq_tbl[map_idx].vector);
5547 }
5548 }
5549
bnxt_enable_int(struct bnxt * bp)5550 static void bnxt_enable_int(struct bnxt *bp)
5551 {
5552 int i;
5553
5554 atomic_set(&bp->intr_sem, 0);
5555 for (i = 0; i < bp->cp_nr_rings; i++) {
5556 struct bnxt_napi *bnapi = bp->bnapi[i];
5557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5558
5559 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5560 }
5561 }
5562
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5563 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5564 bool async_only)
5565 {
5566 DECLARE_BITMAP(async_events_bmap, 256);
5567 u32 *events = (u32 *)async_events_bmap;
5568 struct hwrm_func_drv_rgtr_output *resp;
5569 struct hwrm_func_drv_rgtr_input *req;
5570 u32 flags;
5571 int rc, i;
5572
5573 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5574 if (rc)
5575 return rc;
5576
5577 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5578 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5579 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5580
5581 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5582 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5583 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5584 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5585 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5586 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5587 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5588 req->flags = cpu_to_le32(flags);
5589 req->ver_maj_8b = DRV_VER_MAJ;
5590 req->ver_min_8b = DRV_VER_MIN;
5591 req->ver_upd_8b = DRV_VER_UPD;
5592 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5593 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5594 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5595
5596 if (BNXT_PF(bp)) {
5597 u32 data[8];
5598 int i;
5599
5600 memset(data, 0, sizeof(data));
5601 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5602 u16 cmd = bnxt_vf_req_snif[i];
5603 unsigned int bit, idx;
5604
5605 idx = cmd / 32;
5606 bit = cmd % 32;
5607 data[idx] |= 1 << bit;
5608 }
5609
5610 for (i = 0; i < 8; i++)
5611 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5612
5613 req->enables |=
5614 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5615 }
5616
5617 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5618 req->flags |= cpu_to_le32(
5619 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5620
5621 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5622 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5623 u16 event_id = bnxt_async_events_arr[i];
5624
5625 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5626 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5627 continue;
5628 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5629 !bp->ptp_cfg)
5630 continue;
5631 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5632 }
5633 if (bmap && bmap_size) {
5634 for (i = 0; i < bmap_size; i++) {
5635 if (test_bit(i, bmap))
5636 __set_bit(i, async_events_bmap);
5637 }
5638 }
5639 for (i = 0; i < 8; i++)
5640 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5641
5642 if (async_only)
5643 req->enables =
5644 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5645
5646 resp = hwrm_req_hold(bp, req);
5647 rc = hwrm_req_send(bp, req);
5648 if (!rc) {
5649 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5650 if (resp->flags &
5651 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5652 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5653 }
5654 hwrm_req_drop(bp, req);
5655 return rc;
5656 }
5657
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5658 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5659 {
5660 struct hwrm_func_drv_unrgtr_input *req;
5661 int rc;
5662
5663 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5664 return 0;
5665
5666 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5667 if (rc)
5668 return rc;
5669 return hwrm_req_send(bp, req);
5670 }
5671
5672 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5673
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5674 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5675 {
5676 struct hwrm_tunnel_dst_port_free_input *req;
5677 int rc;
5678
5679 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5680 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5681 return 0;
5682 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5683 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5684 return 0;
5685
5686 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5687 if (rc)
5688 return rc;
5689
5690 req->tunnel_type = tunnel_type;
5691
5692 switch (tunnel_type) {
5693 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5694 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5695 bp->vxlan_port = 0;
5696 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5697 break;
5698 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5699 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5700 bp->nge_port = 0;
5701 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5702 break;
5703 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5704 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5705 bp->vxlan_gpe_port = 0;
5706 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5707 break;
5708 default:
5709 break;
5710 }
5711
5712 rc = hwrm_req_send(bp, req);
5713 if (rc)
5714 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5715 rc);
5716 if (bp->flags & BNXT_FLAG_TPA)
5717 bnxt_set_tpa(bp, true);
5718 return rc;
5719 }
5720
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5721 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5722 u8 tunnel_type)
5723 {
5724 struct hwrm_tunnel_dst_port_alloc_output *resp;
5725 struct hwrm_tunnel_dst_port_alloc_input *req;
5726 int rc;
5727
5728 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5729 if (rc)
5730 return rc;
5731
5732 req->tunnel_type = tunnel_type;
5733 req->tunnel_dst_port_val = port;
5734
5735 resp = hwrm_req_hold(bp, req);
5736 rc = hwrm_req_send(bp, req);
5737 if (rc) {
5738 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5739 rc);
5740 goto err_out;
5741 }
5742
5743 switch (tunnel_type) {
5744 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5745 bp->vxlan_port = port;
5746 bp->vxlan_fw_dst_port_id =
5747 le16_to_cpu(resp->tunnel_dst_port_id);
5748 break;
5749 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5750 bp->nge_port = port;
5751 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5752 break;
5753 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5754 bp->vxlan_gpe_port = port;
5755 bp->vxlan_gpe_fw_dst_port_id =
5756 le16_to_cpu(resp->tunnel_dst_port_id);
5757 break;
5758 default:
5759 break;
5760 }
5761 if (bp->flags & BNXT_FLAG_TPA)
5762 bnxt_set_tpa(bp, true);
5763
5764 err_out:
5765 hwrm_req_drop(bp, req);
5766 return rc;
5767 }
5768
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5769 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5770 {
5771 struct hwrm_cfa_l2_set_rx_mask_input *req;
5772 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5773 int rc;
5774
5775 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5776 if (rc)
5777 return rc;
5778
5779 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5780 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5781 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5782 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5783 }
5784 req->mask = cpu_to_le32(vnic->rx_mask);
5785 return hwrm_req_send_silent(bp, req);
5786 }
5787
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5788 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5789 {
5790 if (!atomic_dec_and_test(&fltr->refcnt))
5791 return;
5792 spin_lock_bh(&bp->ntp_fltr_lock);
5793 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5794 spin_unlock_bh(&bp->ntp_fltr_lock);
5795 return;
5796 }
5797 hlist_del_rcu(&fltr->base.hash);
5798 bnxt_del_one_usr_fltr(bp, &fltr->base);
5799 if (fltr->base.flags) {
5800 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5801 bp->ntp_fltr_count--;
5802 }
5803 spin_unlock_bh(&bp->ntp_fltr_lock);
5804 kfree_rcu(fltr, base.rcu);
5805 }
5806
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5807 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5808 struct bnxt_l2_key *key,
5809 u32 idx)
5810 {
5811 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5812 struct bnxt_l2_filter *fltr;
5813
5814 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5815 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5816
5817 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5818 l2_key->vlan == key->vlan)
5819 return fltr;
5820 }
5821 return NULL;
5822 }
5823
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5824 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5825 struct bnxt_l2_key *key,
5826 u32 idx)
5827 {
5828 struct bnxt_l2_filter *fltr = NULL;
5829
5830 rcu_read_lock();
5831 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5832 if (fltr)
5833 atomic_inc(&fltr->refcnt);
5834 rcu_read_unlock();
5835 return fltr;
5836 }
5837
5838 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5839 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5840 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5841 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5842 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5843
5844 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5845 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5846 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5847 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5848 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5849
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5850 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5851 {
5852 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5853 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5854 return sizeof(fkeys->addrs.v4addrs) +
5855 sizeof(fkeys->ports);
5856
5857 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5858 return sizeof(fkeys->addrs.v4addrs);
5859 }
5860
5861 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5862 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5863 return sizeof(fkeys->addrs.v6addrs) +
5864 sizeof(fkeys->ports);
5865
5866 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5867 return sizeof(fkeys->addrs.v6addrs);
5868 }
5869
5870 return 0;
5871 }
5872
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5873 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5874 const unsigned char *key)
5875 {
5876 u64 prefix = bp->toeplitz_prefix, hash = 0;
5877 struct bnxt_ipv4_tuple tuple4;
5878 struct bnxt_ipv6_tuple tuple6;
5879 int i, j, len = 0;
5880 u8 *four_tuple;
5881
5882 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5883 if (!len)
5884 return 0;
5885
5886 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5887 tuple4.v4addrs = fkeys->addrs.v4addrs;
5888 tuple4.ports = fkeys->ports;
5889 four_tuple = (unsigned char *)&tuple4;
5890 } else {
5891 tuple6.v6addrs = fkeys->addrs.v6addrs;
5892 tuple6.ports = fkeys->ports;
5893 four_tuple = (unsigned char *)&tuple6;
5894 }
5895
5896 for (i = 0, j = 8; i < len; i++, j++) {
5897 u8 byte = four_tuple[i];
5898 int bit;
5899
5900 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5901 if (byte & 0x80)
5902 hash ^= prefix;
5903 }
5904 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5905 }
5906
5907 /* The valid part of the hash is in the upper 32 bits. */
5908 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5909 }
5910
5911 #ifdef CONFIG_RFS_ACCEL
5912 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)5913 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5914 {
5915 struct bnxt_l2_filter *fltr;
5916 u32 idx;
5917
5918 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5919 BNXT_L2_FLTR_HASH_MASK;
5920 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5921 return fltr;
5922 }
5923 #endif
5924
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)5925 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5926 struct bnxt_l2_key *key, u32 idx)
5927 {
5928 struct hlist_head *head;
5929
5930 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5931 fltr->l2_key.vlan = key->vlan;
5932 fltr->base.type = BNXT_FLTR_TYPE_L2;
5933 if (fltr->base.flags) {
5934 int bit_id;
5935
5936 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5937 bp->max_fltr, 0);
5938 if (bit_id < 0)
5939 return -ENOMEM;
5940 fltr->base.sw_id = (u16)bit_id;
5941 bp->ntp_fltr_count++;
5942 }
5943 head = &bp->l2_fltr_hash_tbl[idx];
5944 hlist_add_head_rcu(&fltr->base.hash, head);
5945 bnxt_insert_usr_fltr(bp, &fltr->base);
5946 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5947 atomic_set(&fltr->refcnt, 1);
5948 return 0;
5949 }
5950
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)5951 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5952 struct bnxt_l2_key *key,
5953 gfp_t gfp)
5954 {
5955 struct bnxt_l2_filter *fltr;
5956 u32 idx;
5957 int rc;
5958
5959 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5960 BNXT_L2_FLTR_HASH_MASK;
5961 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5962 if (fltr)
5963 return fltr;
5964
5965 fltr = kzalloc(sizeof(*fltr), gfp);
5966 if (!fltr)
5967 return ERR_PTR(-ENOMEM);
5968 spin_lock_bh(&bp->ntp_fltr_lock);
5969 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5970 spin_unlock_bh(&bp->ntp_fltr_lock);
5971 if (rc) {
5972 bnxt_del_l2_filter(bp, fltr);
5973 fltr = ERR_PTR(rc);
5974 }
5975 return fltr;
5976 }
5977
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)5978 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5979 struct bnxt_l2_key *key,
5980 u16 flags)
5981 {
5982 struct bnxt_l2_filter *fltr;
5983 u32 idx;
5984 int rc;
5985
5986 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5987 BNXT_L2_FLTR_HASH_MASK;
5988 spin_lock_bh(&bp->ntp_fltr_lock);
5989 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5990 if (fltr) {
5991 fltr = ERR_PTR(-EEXIST);
5992 goto l2_filter_exit;
5993 }
5994 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5995 if (!fltr) {
5996 fltr = ERR_PTR(-ENOMEM);
5997 goto l2_filter_exit;
5998 }
5999 fltr->base.flags = flags;
6000 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6001 if (rc) {
6002 spin_unlock_bh(&bp->ntp_fltr_lock);
6003 bnxt_del_l2_filter(bp, fltr);
6004 return ERR_PTR(rc);
6005 }
6006
6007 l2_filter_exit:
6008 spin_unlock_bh(&bp->ntp_fltr_lock);
6009 return fltr;
6010 }
6011
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6012 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6013 {
6014 #ifdef CONFIG_BNXT_SRIOV
6015 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6016
6017 return vf->fw_fid;
6018 #else
6019 return INVALID_HW_RING_ID;
6020 #endif
6021 }
6022
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6023 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6024 {
6025 struct hwrm_cfa_l2_filter_free_input *req;
6026 u16 target_id = 0xffff;
6027 int rc;
6028
6029 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6030 struct bnxt_pf_info *pf = &bp->pf;
6031
6032 if (fltr->base.vf_idx >= pf->active_vfs)
6033 return -EINVAL;
6034
6035 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6036 if (target_id == INVALID_HW_RING_ID)
6037 return -EINVAL;
6038 }
6039
6040 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6041 if (rc)
6042 return rc;
6043
6044 req->target_id = cpu_to_le16(target_id);
6045 req->l2_filter_id = fltr->base.filter_id;
6046 return hwrm_req_send(bp, req);
6047 }
6048
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6049 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6050 {
6051 struct hwrm_cfa_l2_filter_alloc_output *resp;
6052 struct hwrm_cfa_l2_filter_alloc_input *req;
6053 u16 target_id = 0xffff;
6054 int rc;
6055
6056 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6057 struct bnxt_pf_info *pf = &bp->pf;
6058
6059 if (fltr->base.vf_idx >= pf->active_vfs)
6060 return -EINVAL;
6061
6062 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6063 }
6064 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6065 if (rc)
6066 return rc;
6067
6068 req->target_id = cpu_to_le16(target_id);
6069 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6070
6071 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6072 req->flags |=
6073 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6074 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6075 req->enables =
6076 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6077 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6078 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6079 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6080 eth_broadcast_addr(req->l2_addr_mask);
6081
6082 if (fltr->l2_key.vlan) {
6083 req->enables |=
6084 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6085 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6086 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6087 req->num_vlans = 1;
6088 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6089 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6090 }
6091
6092 resp = hwrm_req_hold(bp, req);
6093 rc = hwrm_req_send(bp, req);
6094 if (!rc) {
6095 fltr->base.filter_id = resp->l2_filter_id;
6096 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6097 }
6098 hwrm_req_drop(bp, req);
6099 return rc;
6100 }
6101
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6102 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6103 struct bnxt_ntuple_filter *fltr)
6104 {
6105 struct hwrm_cfa_ntuple_filter_free_input *req;
6106 int rc;
6107
6108 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6109 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6110 if (rc)
6111 return rc;
6112
6113 req->ntuple_filter_id = fltr->base.filter_id;
6114 return hwrm_req_send(bp, req);
6115 }
6116
6117 #define BNXT_NTP_FLTR_FLAGS \
6118 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6119 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6120 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6121 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6122 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6123 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6124 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6125 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6126 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6127 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6128 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6129 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6130 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6131
6132 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6133 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6134
bnxt_fill_ipv6_mask(__be32 mask[4])6135 void bnxt_fill_ipv6_mask(__be32 mask[4])
6136 {
6137 int i;
6138
6139 for (i = 0; i < 4; i++)
6140 mask[i] = cpu_to_be32(~0);
6141 }
6142
6143 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6144 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6145 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6146 struct bnxt_ntuple_filter *fltr)
6147 {
6148 u16 rxq = fltr->base.rxq;
6149
6150 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6151 struct ethtool_rxfh_context *ctx;
6152 struct bnxt_rss_ctx *rss_ctx;
6153 struct bnxt_vnic_info *vnic;
6154
6155 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6156 fltr->base.fw_vnic_id);
6157 if (ctx) {
6158 rss_ctx = ethtool_rxfh_context_priv(ctx);
6159 vnic = &rss_ctx->vnic;
6160
6161 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6162 }
6163 return;
6164 }
6165 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6166 struct bnxt_vnic_info *vnic;
6167 u32 enables;
6168
6169 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6170 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6171 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6172 req->enables |= cpu_to_le32(enables);
6173 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6174 } else {
6175 u32 flags;
6176
6177 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6178 req->flags |= cpu_to_le32(flags);
6179 req->dst_id = cpu_to_le16(rxq);
6180 }
6181 }
6182
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6183 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6184 struct bnxt_ntuple_filter *fltr)
6185 {
6186 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6187 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6188 struct bnxt_flow_masks *masks = &fltr->fmasks;
6189 struct flow_keys *keys = &fltr->fkeys;
6190 struct bnxt_l2_filter *l2_fltr;
6191 struct bnxt_vnic_info *vnic;
6192 int rc;
6193
6194 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6195 if (rc)
6196 return rc;
6197
6198 l2_fltr = fltr->l2_fltr;
6199 req->l2_filter_id = l2_fltr->base.filter_id;
6200
6201 if (fltr->base.flags & BNXT_ACT_DROP) {
6202 req->flags =
6203 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6204 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6205 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6206 } else {
6207 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6208 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6209 }
6210 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6211
6212 req->ethertype = htons(ETH_P_IP);
6213 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6214 req->ip_protocol = keys->basic.ip_proto;
6215
6216 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6217 req->ethertype = htons(ETH_P_IPV6);
6218 req->ip_addr_type =
6219 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6220 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6221 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6222 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6223 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6224 } else {
6225 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6226 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6227 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6228 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6229 }
6230 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6231 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6232 req->tunnel_type =
6233 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6234 }
6235
6236 req->src_port = keys->ports.src;
6237 req->src_port_mask = masks->ports.src;
6238 req->dst_port = keys->ports.dst;
6239 req->dst_port_mask = masks->ports.dst;
6240
6241 resp = hwrm_req_hold(bp, req);
6242 rc = hwrm_req_send(bp, req);
6243 if (!rc)
6244 fltr->base.filter_id = resp->ntuple_filter_id;
6245 hwrm_req_drop(bp, req);
6246 return rc;
6247 }
6248
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6249 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6250 const u8 *mac_addr)
6251 {
6252 struct bnxt_l2_filter *fltr;
6253 struct bnxt_l2_key key;
6254 int rc;
6255
6256 ether_addr_copy(key.dst_mac_addr, mac_addr);
6257 key.vlan = 0;
6258 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6259 if (IS_ERR(fltr))
6260 return PTR_ERR(fltr);
6261
6262 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6263 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6264 if (rc)
6265 bnxt_del_l2_filter(bp, fltr);
6266 else
6267 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6268 return rc;
6269 }
6270
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6271 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6272 {
6273 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6274
6275 /* Any associated ntuple filters will also be cleared by firmware. */
6276 for (i = 0; i < num_of_vnics; i++) {
6277 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6278
6279 for (j = 0; j < vnic->uc_filter_count; j++) {
6280 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6281
6282 bnxt_hwrm_l2_filter_free(bp, fltr);
6283 bnxt_del_l2_filter(bp, fltr);
6284 }
6285 vnic->uc_filter_count = 0;
6286 }
6287 }
6288
6289 #define BNXT_DFLT_TUNL_TPA_BMAP \
6290 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6291 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6292 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6293
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6294 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6295 struct hwrm_vnic_tpa_cfg_input *req)
6296 {
6297 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6298
6299 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6300 return;
6301
6302 if (bp->vxlan_port)
6303 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6304 if (bp->vxlan_gpe_port)
6305 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6306 if (bp->nge_port)
6307 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6308
6309 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6310 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6311 }
6312
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6313 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6314 u32 tpa_flags)
6315 {
6316 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6317 struct hwrm_vnic_tpa_cfg_input *req;
6318 int rc;
6319
6320 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6321 return 0;
6322
6323 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6324 if (rc)
6325 return rc;
6326
6327 if (tpa_flags) {
6328 u16 mss = bp->dev->mtu - 40;
6329 u32 nsegs, n, segs = 0, flags;
6330
6331 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6332 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6333 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6334 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6335 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6336 if (tpa_flags & BNXT_FLAG_GRO)
6337 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6338
6339 req->flags = cpu_to_le32(flags);
6340
6341 req->enables =
6342 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6343 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6344 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6345
6346 /* Number of segs are log2 units, and first packet is not
6347 * included as part of this units.
6348 */
6349 if (mss <= BNXT_RX_PAGE_SIZE) {
6350 n = BNXT_RX_PAGE_SIZE / mss;
6351 nsegs = (MAX_SKB_FRAGS - 1) * n;
6352 } else {
6353 n = mss / BNXT_RX_PAGE_SIZE;
6354 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6355 n++;
6356 nsegs = (MAX_SKB_FRAGS - n) / n;
6357 }
6358
6359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6360 segs = MAX_TPA_SEGS_P5;
6361 max_aggs = bp->max_tpa;
6362 } else {
6363 segs = ilog2(nsegs);
6364 }
6365 req->max_agg_segs = cpu_to_le16(segs);
6366 req->max_aggs = cpu_to_le16(max_aggs);
6367
6368 req->min_agg_len = cpu_to_le32(512);
6369 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6370 }
6371 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6372
6373 return hwrm_req_send(bp, req);
6374 }
6375
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6376 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6377 {
6378 struct bnxt_ring_grp_info *grp_info;
6379
6380 grp_info = &bp->grp_info[ring->grp_idx];
6381 return grp_info->cp_fw_ring_id;
6382 }
6383
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6384 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6385 {
6386 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6387 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6388 else
6389 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6390 }
6391
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6392 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6393 {
6394 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6395 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6396 else
6397 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6398 }
6399
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6400 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6401 {
6402 int entries;
6403
6404 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6405 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6406 else
6407 entries = HW_HASH_INDEX_SIZE;
6408
6409 bp->rss_indir_tbl_entries = entries;
6410 bp->rss_indir_tbl =
6411 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6412 if (!bp->rss_indir_tbl)
6413 return -ENOMEM;
6414
6415 return 0;
6416 }
6417
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6418 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6419 struct ethtool_rxfh_context *rss_ctx)
6420 {
6421 u16 max_rings, max_entries, pad, i;
6422 u32 *rss_indir_tbl;
6423
6424 if (!bp->rx_nr_rings)
6425 return;
6426
6427 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6428 max_rings = bp->rx_nr_rings - 1;
6429 else
6430 max_rings = bp->rx_nr_rings;
6431
6432 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6433 if (rss_ctx)
6434 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6435 else
6436 rss_indir_tbl = &bp->rss_indir_tbl[0];
6437
6438 for (i = 0; i < max_entries; i++)
6439 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6440
6441 pad = bp->rss_indir_tbl_entries - max_entries;
6442 if (pad)
6443 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6444 }
6445
bnxt_get_max_rss_ring(struct bnxt * bp)6446 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6447 {
6448 u32 i, tbl_size, max_ring = 0;
6449
6450 if (!bp->rss_indir_tbl)
6451 return 0;
6452
6453 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6454 for (i = 0; i < tbl_size; i++)
6455 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6456 return max_ring;
6457 }
6458
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6459 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6460 {
6461 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6462 if (!rx_rings)
6463 return 0;
6464 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6465 BNXT_RSS_TABLE_ENTRIES_P5);
6466 }
6467 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6468 return 2;
6469 return 1;
6470 }
6471
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6472 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6473 {
6474 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6475 u16 i, j;
6476
6477 /* Fill the RSS indirection table with ring group ids */
6478 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6479 if (!no_rss)
6480 j = bp->rss_indir_tbl[i];
6481 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6482 }
6483 }
6484
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6485 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6486 struct bnxt_vnic_info *vnic)
6487 {
6488 __le16 *ring_tbl = vnic->rss_table;
6489 struct bnxt_rx_ring_info *rxr;
6490 u16 tbl_size, i;
6491
6492 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6493
6494 for (i = 0; i < tbl_size; i++) {
6495 u16 ring_id, j;
6496
6497 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6498 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6499 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6500 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6501 else
6502 j = bp->rss_indir_tbl[i];
6503 rxr = &bp->rx_ring[j];
6504
6505 ring_id = rxr->rx_ring_struct.fw_ring_id;
6506 *ring_tbl++ = cpu_to_le16(ring_id);
6507 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6508 *ring_tbl++ = cpu_to_le16(ring_id);
6509 }
6510 }
6511
6512 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6513 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6514 struct bnxt_vnic_info *vnic)
6515 {
6516 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6517 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6518 if (bp->flags & BNXT_FLAG_CHIP_P7)
6519 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6520 } else {
6521 bnxt_fill_hw_rss_tbl(bp, vnic);
6522 }
6523
6524 if (bp->rss_hash_delta) {
6525 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6526 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6527 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6528 else
6529 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6530 } else {
6531 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6532 }
6533 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6534 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6535 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6536 }
6537
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6538 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6539 bool set_rss)
6540 {
6541 struct hwrm_vnic_rss_cfg_input *req;
6542 int rc;
6543
6544 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6545 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6546 return 0;
6547
6548 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6549 if (rc)
6550 return rc;
6551
6552 if (set_rss)
6553 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6554 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6555 return hwrm_req_send(bp, req);
6556 }
6557
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6558 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6559 struct bnxt_vnic_info *vnic, bool set_rss)
6560 {
6561 struct hwrm_vnic_rss_cfg_input *req;
6562 dma_addr_t ring_tbl_map;
6563 u32 i, nr_ctxs;
6564 int rc;
6565
6566 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6567 if (rc)
6568 return rc;
6569
6570 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6571 if (!set_rss)
6572 return hwrm_req_send(bp, req);
6573
6574 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6575 ring_tbl_map = vnic->rss_table_dma_addr;
6576 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6577
6578 hwrm_req_hold(bp, req);
6579 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6580 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6581 req->ring_table_pair_index = i;
6582 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6583 rc = hwrm_req_send(bp, req);
6584 if (rc)
6585 goto exit;
6586 }
6587
6588 exit:
6589 hwrm_req_drop(bp, req);
6590 return rc;
6591 }
6592
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6593 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6594 {
6595 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6596 struct hwrm_vnic_rss_qcfg_output *resp;
6597 struct hwrm_vnic_rss_qcfg_input *req;
6598
6599 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6600 return;
6601
6602 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6603 /* all contexts configured to same hash_type, zero always exists */
6604 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6605 resp = hwrm_req_hold(bp, req);
6606 if (!hwrm_req_send(bp, req)) {
6607 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6608 bp->rss_hash_delta = 0;
6609 }
6610 hwrm_req_drop(bp, req);
6611 }
6612
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6613 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6614 {
6615 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6616 struct hwrm_vnic_plcmodes_cfg_input *req;
6617 int rc;
6618
6619 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6620 if (rc)
6621 return rc;
6622
6623 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6624 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6625 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6626
6627 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6628 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6629 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6630 req->enables |=
6631 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6632 req->hds_threshold = cpu_to_le16(hds_thresh);
6633 }
6634 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6635 return hwrm_req_send(bp, req);
6636 }
6637
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6638 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6639 struct bnxt_vnic_info *vnic,
6640 u16 ctx_idx)
6641 {
6642 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6643
6644 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6645 return;
6646
6647 req->rss_cos_lb_ctx_id =
6648 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6649
6650 hwrm_req_send(bp, req);
6651 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6652 }
6653
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6654 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6655 {
6656 int i, j;
6657
6658 for (i = 0; i < bp->nr_vnics; i++) {
6659 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6660
6661 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6662 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6663 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6664 }
6665 }
6666 bp->rsscos_nr_ctxs = 0;
6667 }
6668
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6669 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6670 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6671 {
6672 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6673 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6674 int rc;
6675
6676 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6677 if (rc)
6678 return rc;
6679
6680 resp = hwrm_req_hold(bp, req);
6681 rc = hwrm_req_send(bp, req);
6682 if (!rc)
6683 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6684 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6685 hwrm_req_drop(bp, req);
6686
6687 return rc;
6688 }
6689
bnxt_get_roce_vnic_mode(struct bnxt * bp)6690 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6691 {
6692 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6693 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6694 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6695 }
6696
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6697 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6698 {
6699 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6700 struct hwrm_vnic_cfg_input *req;
6701 unsigned int ring = 0, grp_idx;
6702 u16 def_vlan = 0;
6703 int rc;
6704
6705 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6706 if (rc)
6707 return rc;
6708
6709 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6710 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6711
6712 req->default_rx_ring_id =
6713 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6714 req->default_cmpl_ring_id =
6715 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6716 req->enables =
6717 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6718 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6719 goto vnic_mru;
6720 }
6721 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6722 /* Only RSS support for now TBD: COS & LB */
6723 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6724 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6725 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6726 VNIC_CFG_REQ_ENABLES_MRU);
6727 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6728 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6729 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6730 VNIC_CFG_REQ_ENABLES_MRU);
6731 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6732 } else {
6733 req->rss_rule = cpu_to_le16(0xffff);
6734 }
6735
6736 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6737 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6738 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6739 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6740 } else {
6741 req->cos_rule = cpu_to_le16(0xffff);
6742 }
6743
6744 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6745 ring = 0;
6746 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6747 ring = vnic->vnic_id - 1;
6748 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6749 ring = bp->rx_nr_rings - 1;
6750
6751 grp_idx = bp->rx_ring[ring].bnapi->index;
6752 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6753 req->lb_rule = cpu_to_le16(0xffff);
6754 vnic_mru:
6755 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
6756 req->mru = cpu_to_le16(vnic->mru);
6757
6758 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6759 #ifdef CONFIG_BNXT_SRIOV
6760 if (BNXT_VF(bp))
6761 def_vlan = bp->vf.vlan;
6762 #endif
6763 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6764 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6765 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6766 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6767
6768 return hwrm_req_send(bp, req);
6769 }
6770
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6771 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6772 struct bnxt_vnic_info *vnic)
6773 {
6774 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6775 struct hwrm_vnic_free_input *req;
6776
6777 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6778 return;
6779
6780 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6781
6782 hwrm_req_send(bp, req);
6783 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6784 }
6785 }
6786
bnxt_hwrm_vnic_free(struct bnxt * bp)6787 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6788 {
6789 u16 i;
6790
6791 for (i = 0; i < bp->nr_vnics; i++)
6792 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6793 }
6794
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6795 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6796 unsigned int start_rx_ring_idx,
6797 unsigned int nr_rings)
6798 {
6799 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6800 struct hwrm_vnic_alloc_output *resp;
6801 struct hwrm_vnic_alloc_input *req;
6802 int rc;
6803
6804 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6805 if (rc)
6806 return rc;
6807
6808 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6809 goto vnic_no_ring_grps;
6810
6811 /* map ring groups to this vnic */
6812 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6813 grp_idx = bp->rx_ring[i].bnapi->index;
6814 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6815 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6816 j, nr_rings);
6817 break;
6818 }
6819 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6820 }
6821
6822 vnic_no_ring_grps:
6823 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6824 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6825 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6826 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6827
6828 resp = hwrm_req_hold(bp, req);
6829 rc = hwrm_req_send(bp, req);
6830 if (!rc)
6831 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6832 hwrm_req_drop(bp, req);
6833 return rc;
6834 }
6835
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6836 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6837 {
6838 struct hwrm_vnic_qcaps_output *resp;
6839 struct hwrm_vnic_qcaps_input *req;
6840 int rc;
6841
6842 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6843 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6844 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6845 if (bp->hwrm_spec_code < 0x10600)
6846 return 0;
6847
6848 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6849 if (rc)
6850 return rc;
6851
6852 resp = hwrm_req_hold(bp, req);
6853 rc = hwrm_req_send(bp, req);
6854 if (!rc) {
6855 u32 flags = le32_to_cpu(resp->flags);
6856
6857 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6858 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6859 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6860 if (flags &
6861 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6862 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6863
6864 /* Older P5 fw before EXT_HW_STATS support did not set
6865 * VLAN_STRIP_CAP properly.
6866 */
6867 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6868 (BNXT_CHIP_P5(bp) &&
6869 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6870 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6871 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6872 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6873 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6874 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6875 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6876 if (bp->max_tpa_v2) {
6877 if (BNXT_CHIP_P5(bp))
6878 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6879 else
6880 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6881 }
6882 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6883 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6884 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6885 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6886 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6887 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6888 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6889 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6890 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6891 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6892 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6893 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6894 }
6895 hwrm_req_drop(bp, req);
6896 return rc;
6897 }
6898
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6899 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6900 {
6901 struct hwrm_ring_grp_alloc_output *resp;
6902 struct hwrm_ring_grp_alloc_input *req;
6903 int rc;
6904 u16 i;
6905
6906 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6907 return 0;
6908
6909 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6910 if (rc)
6911 return rc;
6912
6913 resp = hwrm_req_hold(bp, req);
6914 for (i = 0; i < bp->rx_nr_rings; i++) {
6915 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6916
6917 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6918 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6919 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6920 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6921
6922 rc = hwrm_req_send(bp, req);
6923
6924 if (rc)
6925 break;
6926
6927 bp->grp_info[grp_idx].fw_grp_id =
6928 le32_to_cpu(resp->ring_group_id);
6929 }
6930 hwrm_req_drop(bp, req);
6931 return rc;
6932 }
6933
bnxt_hwrm_ring_grp_free(struct bnxt * bp)6934 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6935 {
6936 struct hwrm_ring_grp_free_input *req;
6937 u16 i;
6938
6939 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6940 return;
6941
6942 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6943 return;
6944
6945 hwrm_req_hold(bp, req);
6946 for (i = 0; i < bp->cp_nr_rings; i++) {
6947 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6948 continue;
6949 req->ring_group_id =
6950 cpu_to_le32(bp->grp_info[i].fw_grp_id);
6951
6952 hwrm_req_send(bp, req);
6953 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6954 }
6955 hwrm_req_drop(bp, req);
6956 }
6957
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)6958 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6959 struct bnxt_ring_struct *ring,
6960 u32 ring_type, u32 map_index)
6961 {
6962 struct hwrm_ring_alloc_output *resp;
6963 struct hwrm_ring_alloc_input *req;
6964 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6965 struct bnxt_ring_grp_info *grp_info;
6966 int rc, err = 0;
6967 u16 ring_id;
6968
6969 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6970 if (rc)
6971 goto exit;
6972
6973 req->enables = 0;
6974 if (rmem->nr_pages > 1) {
6975 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6976 /* Page size is in log2 units */
6977 req->page_size = BNXT_PAGE_SHIFT;
6978 req->page_tbl_depth = 1;
6979 } else {
6980 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
6981 }
6982 req->fbo = 0;
6983 /* Association of ring index with doorbell index and MSIX number */
6984 req->logical_id = cpu_to_le16(map_index);
6985
6986 switch (ring_type) {
6987 case HWRM_RING_ALLOC_TX: {
6988 struct bnxt_tx_ring_info *txr;
6989 u16 flags = 0;
6990
6991 txr = container_of(ring, struct bnxt_tx_ring_info,
6992 tx_ring_struct);
6993 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6994 /* Association of transmit ring with completion ring */
6995 grp_info = &bp->grp_info[ring->grp_idx];
6996 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6997 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6998 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6999 req->queue_id = cpu_to_le16(ring->queue_id);
7000 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7001 req->cmpl_coal_cnt =
7002 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7003 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7004 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7005 req->flags = cpu_to_le16(flags);
7006 break;
7007 }
7008 case HWRM_RING_ALLOC_RX:
7009 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7010 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
7011 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7012 u16 flags = 0;
7013
7014 /* Association of rx ring with stats context */
7015 grp_info = &bp->grp_info[ring->grp_idx];
7016 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7017 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7018 req->enables |= cpu_to_le32(
7019 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
7020 if (NET_IP_ALIGN == 2)
7021 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
7022 req->flags = cpu_to_le16(flags);
7023 }
7024 break;
7025 case HWRM_RING_ALLOC_AGG:
7026 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7027 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7028 /* Association of agg ring with rx ring */
7029 grp_info = &bp->grp_info[ring->grp_idx];
7030 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7031 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7032 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7033 req->enables |= cpu_to_le32(
7034 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
7035 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
7036 } else {
7037 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7038 }
7039 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
7040 break;
7041 case HWRM_RING_ALLOC_CMPL:
7042 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7043 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7044 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7045 /* Association of cp ring with nq */
7046 grp_info = &bp->grp_info[map_index];
7047 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7048 req->cq_handle = cpu_to_le64(ring->handle);
7049 req->enables |= cpu_to_le32(
7050 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7051 } else {
7052 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7053 }
7054 break;
7055 case HWRM_RING_ALLOC_NQ:
7056 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7057 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7058 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7059 break;
7060 default:
7061 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7062 ring_type);
7063 return -1;
7064 }
7065
7066 resp = hwrm_req_hold(bp, req);
7067 rc = hwrm_req_send(bp, req);
7068 err = le16_to_cpu(resp->error_code);
7069 ring_id = le16_to_cpu(resp->ring_id);
7070 hwrm_req_drop(bp, req);
7071
7072 exit:
7073 if (rc || err) {
7074 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7075 ring_type, rc, err);
7076 return -EIO;
7077 }
7078 ring->fw_ring_id = ring_id;
7079 return rc;
7080 }
7081
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7082 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7083 {
7084 int rc;
7085
7086 if (BNXT_PF(bp)) {
7087 struct hwrm_func_cfg_input *req;
7088
7089 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7090 if (rc)
7091 return rc;
7092
7093 req->fid = cpu_to_le16(0xffff);
7094 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7095 req->async_event_cr = cpu_to_le16(idx);
7096 return hwrm_req_send(bp, req);
7097 } else {
7098 struct hwrm_func_vf_cfg_input *req;
7099
7100 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7101 if (rc)
7102 return rc;
7103
7104 req->enables =
7105 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7106 req->async_event_cr = cpu_to_le16(idx);
7107 return hwrm_req_send(bp, req);
7108 }
7109 }
7110
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7111 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7112 u32 ring_type)
7113 {
7114 switch (ring_type) {
7115 case HWRM_RING_ALLOC_TX:
7116 db->db_ring_mask = bp->tx_ring_mask;
7117 break;
7118 case HWRM_RING_ALLOC_RX:
7119 db->db_ring_mask = bp->rx_ring_mask;
7120 break;
7121 case HWRM_RING_ALLOC_AGG:
7122 db->db_ring_mask = bp->rx_agg_ring_mask;
7123 break;
7124 case HWRM_RING_ALLOC_CMPL:
7125 case HWRM_RING_ALLOC_NQ:
7126 db->db_ring_mask = bp->cp_ring_mask;
7127 break;
7128 }
7129 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7130 db->db_epoch_mask = db->db_ring_mask + 1;
7131 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7132 }
7133 }
7134
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7135 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7136 u32 map_idx, u32 xid)
7137 {
7138 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7139 switch (ring_type) {
7140 case HWRM_RING_ALLOC_TX:
7141 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7142 break;
7143 case HWRM_RING_ALLOC_RX:
7144 case HWRM_RING_ALLOC_AGG:
7145 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7146 break;
7147 case HWRM_RING_ALLOC_CMPL:
7148 db->db_key64 = DBR_PATH_L2;
7149 break;
7150 case HWRM_RING_ALLOC_NQ:
7151 db->db_key64 = DBR_PATH_L2;
7152 break;
7153 }
7154 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7155
7156 if (bp->flags & BNXT_FLAG_CHIP_P7)
7157 db->db_key64 |= DBR_VALID;
7158
7159 db->doorbell = bp->bar1 + bp->db_offset;
7160 } else {
7161 db->doorbell = bp->bar1 + map_idx * 0x80;
7162 switch (ring_type) {
7163 case HWRM_RING_ALLOC_TX:
7164 db->db_key32 = DB_KEY_TX;
7165 break;
7166 case HWRM_RING_ALLOC_RX:
7167 case HWRM_RING_ALLOC_AGG:
7168 db->db_key32 = DB_KEY_RX;
7169 break;
7170 case HWRM_RING_ALLOC_CMPL:
7171 db->db_key32 = DB_KEY_CP;
7172 break;
7173 }
7174 }
7175 bnxt_set_db_mask(bp, db, ring_type);
7176 }
7177
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7178 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7179 struct bnxt_rx_ring_info *rxr)
7180 {
7181 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7182 struct bnxt_napi *bnapi = rxr->bnapi;
7183 u32 type = HWRM_RING_ALLOC_RX;
7184 u32 map_idx = bnapi->index;
7185 int rc;
7186
7187 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7188 if (rc)
7189 return rc;
7190
7191 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7192 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7193
7194 return 0;
7195 }
7196
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7197 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7198 struct bnxt_rx_ring_info *rxr)
7199 {
7200 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7201 u32 type = HWRM_RING_ALLOC_AGG;
7202 u32 grp_idx = ring->grp_idx;
7203 u32 map_idx;
7204 int rc;
7205
7206 map_idx = grp_idx + bp->rx_nr_rings;
7207 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7208 if (rc)
7209 return rc;
7210
7211 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7212 ring->fw_ring_id);
7213 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7214 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7215 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7216
7217 return 0;
7218 }
7219
bnxt_hwrm_ring_alloc(struct bnxt * bp)7220 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7221 {
7222 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7223 int i, rc = 0;
7224 u32 type;
7225
7226 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7227 type = HWRM_RING_ALLOC_NQ;
7228 else
7229 type = HWRM_RING_ALLOC_CMPL;
7230 for (i = 0; i < bp->cp_nr_rings; i++) {
7231 struct bnxt_napi *bnapi = bp->bnapi[i];
7232 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7233 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7234 u32 map_idx = ring->map_idx;
7235 unsigned int vector;
7236
7237 vector = bp->irq_tbl[map_idx].vector;
7238 disable_irq_nosync(vector);
7239 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7240 if (rc) {
7241 enable_irq(vector);
7242 goto err_out;
7243 }
7244 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7245 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7246 enable_irq(vector);
7247 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7248
7249 if (!i) {
7250 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7251 if (rc)
7252 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7253 }
7254 }
7255
7256 type = HWRM_RING_ALLOC_TX;
7257 for (i = 0; i < bp->tx_nr_rings; i++) {
7258 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7259 struct bnxt_ring_struct *ring;
7260 u32 map_idx;
7261
7262 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7263 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
7264 struct bnxt_napi *bnapi = txr->bnapi;
7265 u32 type2 = HWRM_RING_ALLOC_CMPL;
7266
7267 ring = &cpr2->cp_ring_struct;
7268 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7269 map_idx = bnapi->index;
7270 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7271 if (rc)
7272 goto err_out;
7273 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7274 ring->fw_ring_id);
7275 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7276 }
7277 ring = &txr->tx_ring_struct;
7278 map_idx = i;
7279 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7280 if (rc)
7281 goto err_out;
7282 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
7283 }
7284
7285 for (i = 0; i < bp->rx_nr_rings; i++) {
7286 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7287
7288 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7289 if (rc)
7290 goto err_out;
7291 /* If we have agg rings, post agg buffers first. */
7292 if (!agg_rings)
7293 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7294 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7295 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
7296 struct bnxt_napi *bnapi = rxr->bnapi;
7297 u32 type2 = HWRM_RING_ALLOC_CMPL;
7298 struct bnxt_ring_struct *ring;
7299 u32 map_idx = bnapi->index;
7300
7301 ring = &cpr2->cp_ring_struct;
7302 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7303 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7304 if (rc)
7305 goto err_out;
7306 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7307 ring->fw_ring_id);
7308 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7309 }
7310 }
7311
7312 if (agg_rings) {
7313 for (i = 0; i < bp->rx_nr_rings; i++) {
7314 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7315 if (rc)
7316 goto err_out;
7317 }
7318 }
7319 err_out:
7320 return rc;
7321 }
7322
bnxt_cancel_dim(struct bnxt * bp)7323 static void bnxt_cancel_dim(struct bnxt *bp)
7324 {
7325 int i;
7326
7327 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7328 * if NAPI is enabled.
7329 */
7330 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7331 return;
7332
7333 /* Make sure NAPI sees that the VNIC is disabled */
7334 synchronize_net();
7335 for (i = 0; i < bp->rx_nr_rings; i++) {
7336 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7337 struct bnxt_napi *bnapi = rxr->bnapi;
7338
7339 cancel_work_sync(&bnapi->cp_ring.dim.work);
7340 }
7341 }
7342
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7343 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7344 struct bnxt_ring_struct *ring,
7345 u32 ring_type, int cmpl_ring_id)
7346 {
7347 struct hwrm_ring_free_output *resp;
7348 struct hwrm_ring_free_input *req;
7349 u16 error_code = 0;
7350 int rc;
7351
7352 if (BNXT_NO_FW_ACCESS(bp))
7353 return 0;
7354
7355 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7356 if (rc)
7357 goto exit;
7358
7359 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7360 req->ring_type = ring_type;
7361 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7362
7363 resp = hwrm_req_hold(bp, req);
7364 rc = hwrm_req_send(bp, req);
7365 error_code = le16_to_cpu(resp->error_code);
7366 hwrm_req_drop(bp, req);
7367 exit:
7368 if (rc || error_code) {
7369 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7370 ring_type, rc, error_code);
7371 return -EIO;
7372 }
7373 return 0;
7374 }
7375
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7376 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7377 struct bnxt_rx_ring_info *rxr,
7378 bool close_path)
7379 {
7380 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7381 u32 grp_idx = rxr->bnapi->index;
7382 u32 cmpl_ring_id;
7383
7384 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7385 return;
7386
7387 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7388 hwrm_ring_free_send_msg(bp, ring,
7389 RING_FREE_REQ_RING_TYPE_RX,
7390 close_path ? cmpl_ring_id :
7391 INVALID_HW_RING_ID);
7392 ring->fw_ring_id = INVALID_HW_RING_ID;
7393 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7394 }
7395
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7396 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7397 struct bnxt_rx_ring_info *rxr,
7398 bool close_path)
7399 {
7400 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7401 u32 grp_idx = rxr->bnapi->index;
7402 u32 type, cmpl_ring_id;
7403
7404 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7405 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7406 else
7407 type = RING_FREE_REQ_RING_TYPE_RX;
7408
7409 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7410 return;
7411
7412 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7413 hwrm_ring_free_send_msg(bp, ring, type,
7414 close_path ? cmpl_ring_id :
7415 INVALID_HW_RING_ID);
7416 ring->fw_ring_id = INVALID_HW_RING_ID;
7417 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7418 }
7419
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7420 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7421 {
7422 u32 type;
7423 int i;
7424
7425 if (!bp->bnapi)
7426 return;
7427
7428 for (i = 0; i < bp->tx_nr_rings; i++) {
7429 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7430 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7431
7432 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7433 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
7434
7435 hwrm_ring_free_send_msg(bp, ring,
7436 RING_FREE_REQ_RING_TYPE_TX,
7437 close_path ? cmpl_ring_id :
7438 INVALID_HW_RING_ID);
7439 ring->fw_ring_id = INVALID_HW_RING_ID;
7440 }
7441 }
7442
7443 bnxt_cancel_dim(bp);
7444 for (i = 0; i < bp->rx_nr_rings; i++) {
7445 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7446 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7447 }
7448
7449 /* The completion rings are about to be freed. After that the
7450 * IRQ doorbell will not work anymore. So we need to disable
7451 * IRQ here.
7452 */
7453 bnxt_disable_int_sync(bp);
7454
7455 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7456 type = RING_FREE_REQ_RING_TYPE_NQ;
7457 else
7458 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7459 for (i = 0; i < bp->cp_nr_rings; i++) {
7460 struct bnxt_napi *bnapi = bp->bnapi[i];
7461 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7462 struct bnxt_ring_struct *ring;
7463 int j;
7464
7465 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7466 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7467
7468 ring = &cpr2->cp_ring_struct;
7469 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7470 continue;
7471 hwrm_ring_free_send_msg(bp, ring,
7472 RING_FREE_REQ_RING_TYPE_L2_CMPL,
7473 INVALID_HW_RING_ID);
7474 ring->fw_ring_id = INVALID_HW_RING_ID;
7475 }
7476 ring = &cpr->cp_ring_struct;
7477 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7478 hwrm_ring_free_send_msg(bp, ring, type,
7479 INVALID_HW_RING_ID);
7480 ring->fw_ring_id = INVALID_HW_RING_ID;
7481 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7482 }
7483 }
7484 }
7485
7486 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7487 bool shared);
7488 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7489 bool shared);
7490
bnxt_hwrm_get_rings(struct bnxt * bp)7491 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7492 {
7493 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7494 struct hwrm_func_qcfg_output *resp;
7495 struct hwrm_func_qcfg_input *req;
7496 int rc;
7497
7498 if (bp->hwrm_spec_code < 0x10601)
7499 return 0;
7500
7501 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7502 if (rc)
7503 return rc;
7504
7505 req->fid = cpu_to_le16(0xffff);
7506 resp = hwrm_req_hold(bp, req);
7507 rc = hwrm_req_send(bp, req);
7508 if (rc) {
7509 hwrm_req_drop(bp, req);
7510 return rc;
7511 }
7512
7513 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7514 if (BNXT_NEW_RM(bp)) {
7515 u16 cp, stats;
7516
7517 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7518 hw_resc->resv_hw_ring_grps =
7519 le32_to_cpu(resp->alloc_hw_ring_grps);
7520 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7521 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7522 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7523 stats = le16_to_cpu(resp->alloc_stat_ctx);
7524 hw_resc->resv_irqs = cp;
7525 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7526 int rx = hw_resc->resv_rx_rings;
7527 int tx = hw_resc->resv_tx_rings;
7528
7529 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7530 rx >>= 1;
7531 if (cp < (rx + tx)) {
7532 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7533 if (rc)
7534 goto get_rings_exit;
7535 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7536 rx <<= 1;
7537 hw_resc->resv_rx_rings = rx;
7538 hw_resc->resv_tx_rings = tx;
7539 }
7540 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7541 hw_resc->resv_hw_ring_grps = rx;
7542 }
7543 hw_resc->resv_cp_rings = cp;
7544 hw_resc->resv_stat_ctxs = stats;
7545 }
7546 get_rings_exit:
7547 hwrm_req_drop(bp, req);
7548 return rc;
7549 }
7550
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7551 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7552 {
7553 struct hwrm_func_qcfg_output *resp;
7554 struct hwrm_func_qcfg_input *req;
7555 int rc;
7556
7557 if (bp->hwrm_spec_code < 0x10601)
7558 return 0;
7559
7560 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7561 if (rc)
7562 return rc;
7563
7564 req->fid = cpu_to_le16(fid);
7565 resp = hwrm_req_hold(bp, req);
7566 rc = hwrm_req_send(bp, req);
7567 if (!rc)
7568 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7569
7570 hwrm_req_drop(bp, req);
7571 return rc;
7572 }
7573
7574 static bool bnxt_rfs_supported(struct bnxt *bp);
7575
7576 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7577 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7578 {
7579 struct hwrm_func_cfg_input *req;
7580 u32 enables = 0;
7581
7582 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7583 return NULL;
7584
7585 req->fid = cpu_to_le16(0xffff);
7586 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7587 req->num_tx_rings = cpu_to_le16(hwr->tx);
7588 if (BNXT_NEW_RM(bp)) {
7589 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7590 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7591 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7592 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7593 enables |= hwr->cp_p5 ?
7594 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7595 } else {
7596 enables |= hwr->cp ?
7597 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7598 enables |= hwr->grp ?
7599 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7600 }
7601 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7602 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7603 0;
7604 req->num_rx_rings = cpu_to_le16(hwr->rx);
7605 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7606 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7607 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7608 req->num_msix = cpu_to_le16(hwr->cp);
7609 } else {
7610 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7611 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7612 }
7613 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7614 req->num_vnics = cpu_to_le16(hwr->vnic);
7615 }
7616 req->enables = cpu_to_le32(enables);
7617 return req;
7618 }
7619
7620 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7621 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7622 {
7623 struct hwrm_func_vf_cfg_input *req;
7624 u32 enables = 0;
7625
7626 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7627 return NULL;
7628
7629 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7630 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7631 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7632 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7633 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7634 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7635 enables |= hwr->cp_p5 ?
7636 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7637 } else {
7638 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7639 enables |= hwr->grp ?
7640 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7641 }
7642 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7643 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7644
7645 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7646 req->num_tx_rings = cpu_to_le16(hwr->tx);
7647 req->num_rx_rings = cpu_to_le16(hwr->rx);
7648 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7649 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7650 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7651 } else {
7652 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7653 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7654 }
7655 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7656 req->num_vnics = cpu_to_le16(hwr->vnic);
7657
7658 req->enables = cpu_to_le32(enables);
7659 return req;
7660 }
7661
7662 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7663 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7664 {
7665 struct hwrm_func_cfg_input *req;
7666 int rc;
7667
7668 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7669 if (!req)
7670 return -ENOMEM;
7671
7672 if (!req->enables) {
7673 hwrm_req_drop(bp, req);
7674 return 0;
7675 }
7676
7677 rc = hwrm_req_send(bp, req);
7678 if (rc)
7679 return rc;
7680
7681 if (bp->hwrm_spec_code < 0x10601)
7682 bp->hw_resc.resv_tx_rings = hwr->tx;
7683
7684 return bnxt_hwrm_get_rings(bp);
7685 }
7686
7687 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7688 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7689 {
7690 struct hwrm_func_vf_cfg_input *req;
7691 int rc;
7692
7693 if (!BNXT_NEW_RM(bp)) {
7694 bp->hw_resc.resv_tx_rings = hwr->tx;
7695 return 0;
7696 }
7697
7698 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7699 if (!req)
7700 return -ENOMEM;
7701
7702 rc = hwrm_req_send(bp, req);
7703 if (rc)
7704 return rc;
7705
7706 return bnxt_hwrm_get_rings(bp);
7707 }
7708
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7709 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7710 {
7711 if (BNXT_PF(bp))
7712 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7713 else
7714 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7715 }
7716
bnxt_nq_rings_in_use(struct bnxt * bp)7717 int bnxt_nq_rings_in_use(struct bnxt *bp)
7718 {
7719 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7720 }
7721
bnxt_cp_rings_in_use(struct bnxt * bp)7722 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7723 {
7724 int cp;
7725
7726 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7727 return bnxt_nq_rings_in_use(bp);
7728
7729 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7730 return cp;
7731 }
7732
bnxt_get_func_stat_ctxs(struct bnxt * bp)7733 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7734 {
7735 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7736 }
7737
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7738 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7739 {
7740 if (!hwr->grp)
7741 return 0;
7742 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7743 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7744
7745 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7746 rss_ctx *= hwr->vnic;
7747 return rss_ctx;
7748 }
7749 if (BNXT_VF(bp))
7750 return BNXT_VF_MAX_RSS_CTX;
7751 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7752 return hwr->grp + 1;
7753 return 1;
7754 }
7755
7756 /* Check if a default RSS map needs to be setup. This function is only
7757 * used on older firmware that does not require reserving RX rings.
7758 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7759 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7760 {
7761 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7762
7763 /* The RSS map is valid for RX rings set to resv_rx_rings */
7764 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7765 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7766 if (!netif_is_rxfh_configured(bp->dev))
7767 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7768 }
7769 }
7770
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7771 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7772 {
7773 if (bp->flags & BNXT_FLAG_RFS) {
7774 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7775 return 2 + bp->num_rss_ctx;
7776 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7777 return rx_rings + 1;
7778 }
7779 return 1;
7780 }
7781
bnxt_need_reserve_rings(struct bnxt * bp)7782 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7783 {
7784 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7785 int cp = bnxt_cp_rings_in_use(bp);
7786 int nq = bnxt_nq_rings_in_use(bp);
7787 int rx = bp->rx_nr_rings, stat;
7788 int vnic, grp = rx;
7789
7790 /* Old firmware does not need RX ring reservations but we still
7791 * need to setup a default RSS map when needed. With new firmware
7792 * we go through RX ring reservations first and then set up the
7793 * RSS map for the successfully reserved RX rings when needed.
7794 */
7795 if (!BNXT_NEW_RM(bp))
7796 bnxt_check_rss_tbl_no_rmgr(bp);
7797
7798 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7799 bp->hwrm_spec_code >= 0x10601)
7800 return true;
7801
7802 if (!BNXT_NEW_RM(bp))
7803 return false;
7804
7805 vnic = bnxt_get_total_vnics(bp, rx);
7806
7807 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7808 rx <<= 1;
7809 stat = bnxt_get_func_stat_ctxs(bp);
7810 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7811 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7812 (hw_resc->resv_hw_ring_grps != grp &&
7813 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7814 return true;
7815 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7816 hw_resc->resv_irqs != nq)
7817 return true;
7818 return false;
7819 }
7820
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7821 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7822 {
7823 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7824
7825 hwr->tx = hw_resc->resv_tx_rings;
7826 if (BNXT_NEW_RM(bp)) {
7827 hwr->rx = hw_resc->resv_rx_rings;
7828 hwr->cp = hw_resc->resv_irqs;
7829 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7830 hwr->cp_p5 = hw_resc->resv_cp_rings;
7831 hwr->grp = hw_resc->resv_hw_ring_grps;
7832 hwr->vnic = hw_resc->resv_vnics;
7833 hwr->stat = hw_resc->resv_stat_ctxs;
7834 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7835 }
7836 }
7837
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7838 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7839 {
7840 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7841 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7842 }
7843
7844 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7845
__bnxt_reserve_rings(struct bnxt * bp)7846 static int __bnxt_reserve_rings(struct bnxt *bp)
7847 {
7848 struct bnxt_hw_rings hwr = {0};
7849 int rx_rings, old_rx_rings, rc;
7850 int cp = bp->cp_nr_rings;
7851 int ulp_msix = 0;
7852 bool sh = false;
7853 int tx_cp;
7854
7855 if (!bnxt_need_reserve_rings(bp))
7856 return 0;
7857
7858 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7859 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7860 if (!ulp_msix)
7861 bnxt_set_ulp_stat_ctxs(bp, 0);
7862
7863 if (ulp_msix > bp->ulp_num_msix_want)
7864 ulp_msix = bp->ulp_num_msix_want;
7865 hwr.cp = cp + ulp_msix;
7866 } else {
7867 hwr.cp = bnxt_nq_rings_in_use(bp);
7868 }
7869
7870 hwr.tx = bp->tx_nr_rings;
7871 hwr.rx = bp->rx_nr_rings;
7872 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7873 sh = true;
7874 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7875 hwr.cp_p5 = hwr.rx + hwr.tx;
7876
7877 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7878
7879 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7880 hwr.rx <<= 1;
7881 hwr.grp = bp->rx_nr_rings;
7882 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7883 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7884 old_rx_rings = bp->hw_resc.resv_rx_rings;
7885
7886 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7887 if (rc)
7888 return rc;
7889
7890 bnxt_copy_reserved_rings(bp, &hwr);
7891
7892 rx_rings = hwr.rx;
7893 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7894 if (hwr.rx >= 2) {
7895 rx_rings = hwr.rx >> 1;
7896 } else {
7897 if (netif_running(bp->dev))
7898 return -ENOMEM;
7899
7900 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7901 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7902 bp->dev->hw_features &= ~NETIF_F_LRO;
7903 bp->dev->features &= ~NETIF_F_LRO;
7904 bnxt_set_ring_params(bp);
7905 }
7906 }
7907 rx_rings = min_t(int, rx_rings, hwr.grp);
7908 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7909 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7910 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7911 hwr.cp = min_t(int, hwr.cp, hwr.stat);
7912 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7913 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7914 hwr.rx = rx_rings << 1;
7915 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7916 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7917 bp->tx_nr_rings = hwr.tx;
7918
7919 /* If we cannot reserve all the RX rings, reset the RSS map only
7920 * if absolutely necessary
7921 */
7922 if (rx_rings != bp->rx_nr_rings) {
7923 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7924 rx_rings, bp->rx_nr_rings);
7925 if (netif_is_rxfh_configured(bp->dev) &&
7926 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7927 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7928 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7929 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7930 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7931 }
7932 }
7933 bp->rx_nr_rings = rx_rings;
7934 bp->cp_nr_rings = hwr.cp;
7935
7936 if (!bnxt_rings_ok(bp, &hwr))
7937 return -ENOMEM;
7938
7939 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
7940 !netif_is_rxfh_configured(bp->dev))
7941 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7942
7943 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
7944 int resv_msix, resv_ctx, ulp_ctxs;
7945 struct bnxt_hw_resc *hw_resc;
7946
7947 hw_resc = &bp->hw_resc;
7948 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
7949 ulp_msix = min_t(int, resv_msix, ulp_msix);
7950 bnxt_set_ulp_msix_num(bp, ulp_msix);
7951 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
7952 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
7953 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
7954 }
7955
7956 return rc;
7957 }
7958
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7959 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7960 {
7961 struct hwrm_func_vf_cfg_input *req;
7962 u32 flags;
7963
7964 if (!BNXT_NEW_RM(bp))
7965 return 0;
7966
7967 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7968 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7969 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7970 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7971 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7972 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7973 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7974 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7975 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7976
7977 req->flags = cpu_to_le32(flags);
7978 return hwrm_req_send_silent(bp, req);
7979 }
7980
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7981 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7982 {
7983 struct hwrm_func_cfg_input *req;
7984 u32 flags;
7985
7986 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7987 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7988 if (BNXT_NEW_RM(bp)) {
7989 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7990 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7991 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7992 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7993 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7994 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7995 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7996 else
7997 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7998 }
7999
8000 req->flags = cpu_to_le32(flags);
8001 return hwrm_req_send_silent(bp, req);
8002 }
8003
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8004 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8005 {
8006 if (bp->hwrm_spec_code < 0x10801)
8007 return 0;
8008
8009 if (BNXT_PF(bp))
8010 return bnxt_hwrm_check_pf_rings(bp, hwr);
8011
8012 return bnxt_hwrm_check_vf_rings(bp, hwr);
8013 }
8014
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8015 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8016 {
8017 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8018 struct hwrm_ring_aggint_qcaps_output *resp;
8019 struct hwrm_ring_aggint_qcaps_input *req;
8020 int rc;
8021
8022 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8023 coal_cap->num_cmpl_dma_aggr_max = 63;
8024 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8025 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8026 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8027 coal_cap->int_lat_tmr_min_max = 65535;
8028 coal_cap->int_lat_tmr_max_max = 65535;
8029 coal_cap->num_cmpl_aggr_int_max = 65535;
8030 coal_cap->timer_units = 80;
8031
8032 if (bp->hwrm_spec_code < 0x10902)
8033 return;
8034
8035 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8036 return;
8037
8038 resp = hwrm_req_hold(bp, req);
8039 rc = hwrm_req_send_silent(bp, req);
8040 if (!rc) {
8041 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8042 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8043 coal_cap->num_cmpl_dma_aggr_max =
8044 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8045 coal_cap->num_cmpl_dma_aggr_during_int_max =
8046 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8047 coal_cap->cmpl_aggr_dma_tmr_max =
8048 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8049 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8050 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8051 coal_cap->int_lat_tmr_min_max =
8052 le16_to_cpu(resp->int_lat_tmr_min_max);
8053 coal_cap->int_lat_tmr_max_max =
8054 le16_to_cpu(resp->int_lat_tmr_max_max);
8055 coal_cap->num_cmpl_aggr_int_max =
8056 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8057 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8058 }
8059 hwrm_req_drop(bp, req);
8060 }
8061
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8062 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8063 {
8064 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8065
8066 return usec * 1000 / coal_cap->timer_units;
8067 }
8068
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8069 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8070 struct bnxt_coal *hw_coal,
8071 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8072 {
8073 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8074 u16 val, tmr, max, flags = hw_coal->flags;
8075 u32 cmpl_params = coal_cap->cmpl_params;
8076
8077 max = hw_coal->bufs_per_record * 128;
8078 if (hw_coal->budget)
8079 max = hw_coal->bufs_per_record * hw_coal->budget;
8080 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8081
8082 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8083 req->num_cmpl_aggr_int = cpu_to_le16(val);
8084
8085 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8086 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8087
8088 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8089 coal_cap->num_cmpl_dma_aggr_during_int_max);
8090 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8091
8092 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8093 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8094 req->int_lat_tmr_max = cpu_to_le16(tmr);
8095
8096 /* min timer set to 1/2 of interrupt timer */
8097 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8098 val = tmr / 2;
8099 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8100 req->int_lat_tmr_min = cpu_to_le16(val);
8101 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8102 }
8103
8104 /* buf timer set to 1/4 of interrupt timer */
8105 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8106 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8107
8108 if (cmpl_params &
8109 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8110 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8111 val = clamp_t(u16, tmr, 1,
8112 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8113 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8114 req->enables |=
8115 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8116 }
8117
8118 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8119 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8120 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8121 req->flags = cpu_to_le16(flags);
8122 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8123 }
8124
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8125 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8126 struct bnxt_coal *hw_coal)
8127 {
8128 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8129 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8130 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8131 u32 nq_params = coal_cap->nq_params;
8132 u16 tmr;
8133 int rc;
8134
8135 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8136 return 0;
8137
8138 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8139 if (rc)
8140 return rc;
8141
8142 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8143 req->flags =
8144 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8145
8146 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8147 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8148 req->int_lat_tmr_min = cpu_to_le16(tmr);
8149 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8150 return hwrm_req_send(bp, req);
8151 }
8152
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8153 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8154 {
8155 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8156 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8157 struct bnxt_coal coal;
8158 int rc;
8159
8160 /* Tick values in micro seconds.
8161 * 1 coal_buf x bufs_per_record = 1 completion record.
8162 */
8163 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8164
8165 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8166 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8167
8168 if (!bnapi->rx_ring)
8169 return -ENODEV;
8170
8171 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8172 if (rc)
8173 return rc;
8174
8175 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8176
8177 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8178
8179 return hwrm_req_send(bp, req_rx);
8180 }
8181
8182 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8183 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8184 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8185 {
8186 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8187
8188 req->ring_id = cpu_to_le16(ring_id);
8189 return hwrm_req_send(bp, req);
8190 }
8191
8192 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8193 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8194 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8195 {
8196 struct bnxt_tx_ring_info *txr;
8197 int i, rc;
8198
8199 bnxt_for_each_napi_tx(i, bnapi, txr) {
8200 u16 ring_id;
8201
8202 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8203 req->ring_id = cpu_to_le16(ring_id);
8204 rc = hwrm_req_send(bp, req);
8205 if (rc)
8206 return rc;
8207 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8208 return 0;
8209 }
8210 return 0;
8211 }
8212
bnxt_hwrm_set_coal(struct bnxt * bp)8213 int bnxt_hwrm_set_coal(struct bnxt *bp)
8214 {
8215 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8216 int i, rc;
8217
8218 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8219 if (rc)
8220 return rc;
8221
8222 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8223 if (rc) {
8224 hwrm_req_drop(bp, req_rx);
8225 return rc;
8226 }
8227
8228 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8229 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8230
8231 hwrm_req_hold(bp, req_rx);
8232 hwrm_req_hold(bp, req_tx);
8233 for (i = 0; i < bp->cp_nr_rings; i++) {
8234 struct bnxt_napi *bnapi = bp->bnapi[i];
8235 struct bnxt_coal *hw_coal;
8236
8237 if (!bnapi->rx_ring)
8238 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8239 else
8240 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8241 if (rc)
8242 break;
8243
8244 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8245 continue;
8246
8247 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8248 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8249 if (rc)
8250 break;
8251 }
8252 if (bnapi->rx_ring)
8253 hw_coal = &bp->rx_coal;
8254 else
8255 hw_coal = &bp->tx_coal;
8256 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8257 }
8258 hwrm_req_drop(bp, req_rx);
8259 hwrm_req_drop(bp, req_tx);
8260 return rc;
8261 }
8262
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8263 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8264 {
8265 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8266 struct hwrm_stat_ctx_free_input *req;
8267 int i;
8268
8269 if (!bp->bnapi)
8270 return;
8271
8272 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8273 return;
8274
8275 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8276 return;
8277 if (BNXT_FW_MAJ(bp) <= 20) {
8278 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8279 hwrm_req_drop(bp, req);
8280 return;
8281 }
8282 hwrm_req_hold(bp, req0);
8283 }
8284 hwrm_req_hold(bp, req);
8285 for (i = 0; i < bp->cp_nr_rings; i++) {
8286 struct bnxt_napi *bnapi = bp->bnapi[i];
8287 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8288
8289 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8290 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8291 if (req0) {
8292 req0->stat_ctx_id = req->stat_ctx_id;
8293 hwrm_req_send(bp, req0);
8294 }
8295 hwrm_req_send(bp, req);
8296
8297 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8298 }
8299 }
8300 hwrm_req_drop(bp, req);
8301 if (req0)
8302 hwrm_req_drop(bp, req0);
8303 }
8304
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8305 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8306 {
8307 struct hwrm_stat_ctx_alloc_output *resp;
8308 struct hwrm_stat_ctx_alloc_input *req;
8309 int rc, i;
8310
8311 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8312 return 0;
8313
8314 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8315 if (rc)
8316 return rc;
8317
8318 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8319 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8320
8321 resp = hwrm_req_hold(bp, req);
8322 for (i = 0; i < bp->cp_nr_rings; i++) {
8323 struct bnxt_napi *bnapi = bp->bnapi[i];
8324 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8325
8326 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8327
8328 rc = hwrm_req_send(bp, req);
8329 if (rc)
8330 break;
8331
8332 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8333
8334 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8335 }
8336 hwrm_req_drop(bp, req);
8337 return rc;
8338 }
8339
bnxt_hwrm_func_qcfg(struct bnxt * bp)8340 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8341 {
8342 struct hwrm_func_qcfg_output *resp;
8343 struct hwrm_func_qcfg_input *req;
8344 u16 flags;
8345 int rc;
8346
8347 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8348 if (rc)
8349 return rc;
8350
8351 req->fid = cpu_to_le16(0xffff);
8352 resp = hwrm_req_hold(bp, req);
8353 rc = hwrm_req_send(bp, req);
8354 if (rc)
8355 goto func_qcfg_exit;
8356
8357 flags = le16_to_cpu(resp->flags);
8358 #ifdef CONFIG_BNXT_SRIOV
8359 if (BNXT_VF(bp)) {
8360 struct bnxt_vf_info *vf = &bp->vf;
8361
8362 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8363 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8364 vf->flags |= BNXT_VF_TRUST;
8365 else
8366 vf->flags &= ~BNXT_VF_TRUST;
8367 } else {
8368 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8369 }
8370 #endif
8371 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8372 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8373 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8374 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8375 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8376 }
8377 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8378 bp->flags |= BNXT_FLAG_MULTI_HOST;
8379
8380 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8381 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8382
8383 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8384 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8385
8386 switch (resp->port_partition_type) {
8387 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8388 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8389 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8390 bp->port_partition_type = resp->port_partition_type;
8391 break;
8392 }
8393 if (bp->hwrm_spec_code < 0x10707 ||
8394 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8395 bp->br_mode = BRIDGE_MODE_VEB;
8396 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8397 bp->br_mode = BRIDGE_MODE_VEPA;
8398 else
8399 bp->br_mode = BRIDGE_MODE_UNDEF;
8400
8401 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8402 if (!bp->max_mtu)
8403 bp->max_mtu = BNXT_MAX_MTU;
8404
8405 if (bp->db_size)
8406 goto func_qcfg_exit;
8407
8408 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8409 if (BNXT_CHIP_P5(bp)) {
8410 if (BNXT_PF(bp))
8411 bp->db_offset = DB_PF_OFFSET_P5;
8412 else
8413 bp->db_offset = DB_VF_OFFSET_P5;
8414 }
8415 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8416 1024);
8417 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8418 bp->db_size <= bp->db_offset)
8419 bp->db_size = pci_resource_len(bp->pdev, 2);
8420
8421 func_qcfg_exit:
8422 hwrm_req_drop(bp, req);
8423 return rc;
8424 }
8425
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8426 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8427 u8 init_val, u8 init_offset,
8428 bool init_mask_set)
8429 {
8430 ctxm->init_value = init_val;
8431 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8432 if (init_mask_set)
8433 ctxm->init_offset = init_offset * 4;
8434 else
8435 ctxm->init_value = 0;
8436 }
8437
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8438 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8439 {
8440 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8441 u16 type;
8442
8443 for (type = 0; type < ctx_max; type++) {
8444 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8445 int n = 1;
8446
8447 if (!ctxm->max_entries || ctxm->pg_info)
8448 continue;
8449
8450 if (ctxm->instance_bmap)
8451 n = hweight32(ctxm->instance_bmap);
8452 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8453 if (!ctxm->pg_info)
8454 return -ENOMEM;
8455 }
8456 return 0;
8457 }
8458
8459 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8460 struct bnxt_ctx_mem_type *ctxm, bool force);
8461
8462 #define BNXT_CTX_INIT_VALID(flags) \
8463 (!!((flags) & \
8464 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8465
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8466 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8467 {
8468 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8469 struct hwrm_func_backing_store_qcaps_v2_input *req;
8470 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8471 u16 type;
8472 int rc;
8473
8474 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8475 if (rc)
8476 return rc;
8477
8478 if (!ctx) {
8479 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8480 if (!ctx)
8481 return -ENOMEM;
8482 bp->ctx = ctx;
8483 }
8484
8485 resp = hwrm_req_hold(bp, req);
8486
8487 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8488 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8489 u8 init_val, init_off, i;
8490 u32 max_entries;
8491 u16 entry_size;
8492 __le32 *p;
8493 u32 flags;
8494
8495 req->type = cpu_to_le16(type);
8496 rc = hwrm_req_send(bp, req);
8497 if (rc)
8498 goto ctx_done;
8499 flags = le32_to_cpu(resp->flags);
8500 type = le16_to_cpu(resp->next_valid_type);
8501 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8502 bnxt_free_one_ctx_mem(bp, ctxm, true);
8503 continue;
8504 }
8505 entry_size = le16_to_cpu(resp->entry_size);
8506 max_entries = le32_to_cpu(resp->max_num_entries);
8507 if (ctxm->mem_valid) {
8508 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8509 ctxm->entry_size != entry_size ||
8510 ctxm->max_entries != max_entries)
8511 bnxt_free_one_ctx_mem(bp, ctxm, true);
8512 else
8513 continue;
8514 }
8515 ctxm->type = le16_to_cpu(resp->type);
8516 ctxm->entry_size = entry_size;
8517 ctxm->flags = flags;
8518 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8519 ctxm->entry_multiple = resp->entry_multiple;
8520 ctxm->max_entries = max_entries;
8521 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8522 init_val = resp->ctx_init_value;
8523 init_off = resp->ctx_init_offset;
8524 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8525 BNXT_CTX_INIT_VALID(flags));
8526 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8527 BNXT_MAX_SPLIT_ENTRY);
8528 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8529 i++, p++)
8530 ctxm->split[i] = le32_to_cpu(*p);
8531 }
8532 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8533
8534 ctx_done:
8535 hwrm_req_drop(bp, req);
8536 return rc;
8537 }
8538
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8539 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8540 {
8541 struct hwrm_func_backing_store_qcaps_output *resp;
8542 struct hwrm_func_backing_store_qcaps_input *req;
8543 int rc;
8544
8545 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8546 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8547 return 0;
8548
8549 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8550 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8551
8552 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8553 if (rc)
8554 return rc;
8555
8556 resp = hwrm_req_hold(bp, req);
8557 rc = hwrm_req_send_silent(bp, req);
8558 if (!rc) {
8559 struct bnxt_ctx_mem_type *ctxm;
8560 struct bnxt_ctx_mem_info *ctx;
8561 u8 init_val, init_idx = 0;
8562 u16 init_mask;
8563
8564 ctx = bp->ctx;
8565 if (!ctx) {
8566 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8567 if (!ctx) {
8568 rc = -ENOMEM;
8569 goto ctx_err;
8570 }
8571 bp->ctx = ctx;
8572 }
8573 init_val = resp->ctx_kind_initializer;
8574 init_mask = le16_to_cpu(resp->ctx_init_mask);
8575
8576 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8577 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8578 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8579 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8580 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8581 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8582 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8583 (init_mask & (1 << init_idx++)) != 0);
8584
8585 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8586 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8587 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8588 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8589 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8590 (init_mask & (1 << init_idx++)) != 0);
8591
8592 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8593 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8594 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8595 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8596 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8597 (init_mask & (1 << init_idx++)) != 0);
8598
8599 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8600 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8601 ctxm->max_entries = ctxm->vnic_entries +
8602 le16_to_cpu(resp->vnic_max_ring_table_entries);
8603 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8604 bnxt_init_ctx_initializer(ctxm, init_val,
8605 resp->vnic_init_offset,
8606 (init_mask & (1 << init_idx++)) != 0);
8607
8608 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8609 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8610 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8611 bnxt_init_ctx_initializer(ctxm, init_val,
8612 resp->stat_init_offset,
8613 (init_mask & (1 << init_idx++)) != 0);
8614
8615 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8616 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8617 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8618 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8619 ctxm->entry_multiple = resp->tqm_entries_multiple;
8620 if (!ctxm->entry_multiple)
8621 ctxm->entry_multiple = 1;
8622
8623 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8624
8625 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8626 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8627 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8628 ctxm->mrav_num_entries_units =
8629 le16_to_cpu(resp->mrav_num_entries_units);
8630 bnxt_init_ctx_initializer(ctxm, init_val,
8631 resp->mrav_init_offset,
8632 (init_mask & (1 << init_idx++)) != 0);
8633
8634 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8635 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8636 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8637
8638 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8639 if (!ctx->tqm_fp_rings_count)
8640 ctx->tqm_fp_rings_count = bp->max_q;
8641 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8642 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8643
8644 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8645 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8646 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8647
8648 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8649 } else {
8650 rc = 0;
8651 }
8652 ctx_err:
8653 hwrm_req_drop(bp, req);
8654 return rc;
8655 }
8656
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8657 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8658 __le64 *pg_dir)
8659 {
8660 if (!rmem->nr_pages)
8661 return;
8662
8663 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8664 if (rmem->depth >= 1) {
8665 if (rmem->depth == 2)
8666 *pg_attr |= 2;
8667 else
8668 *pg_attr |= 1;
8669 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8670 } else {
8671 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8672 }
8673 }
8674
8675 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8676 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8677 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8678 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8679 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8680 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8681
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8682 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8683 {
8684 struct hwrm_func_backing_store_cfg_input *req;
8685 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8686 struct bnxt_ctx_pg_info *ctx_pg;
8687 struct bnxt_ctx_mem_type *ctxm;
8688 void **__req = (void **)&req;
8689 u32 req_len = sizeof(*req);
8690 __le32 *num_entries;
8691 __le64 *pg_dir;
8692 u32 flags = 0;
8693 u8 *pg_attr;
8694 u32 ena;
8695 int rc;
8696 int i;
8697
8698 if (!ctx)
8699 return 0;
8700
8701 if (req_len > bp->hwrm_max_ext_req_len)
8702 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8703 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8704 if (rc)
8705 return rc;
8706
8707 req->enables = cpu_to_le32(enables);
8708 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8709 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8710 ctx_pg = ctxm->pg_info;
8711 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8712 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8713 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8714 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8715 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8716 &req->qpc_pg_size_qpc_lvl,
8717 &req->qpc_page_dir);
8718
8719 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8720 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8721 }
8722 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8723 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8724 ctx_pg = ctxm->pg_info;
8725 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8726 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8727 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8728 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8729 &req->srq_pg_size_srq_lvl,
8730 &req->srq_page_dir);
8731 }
8732 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8733 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8734 ctx_pg = ctxm->pg_info;
8735 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8736 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8737 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8738 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8739 &req->cq_pg_size_cq_lvl,
8740 &req->cq_page_dir);
8741 }
8742 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8743 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8744 ctx_pg = ctxm->pg_info;
8745 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8746 req->vnic_num_ring_table_entries =
8747 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8748 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8749 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8750 &req->vnic_pg_size_vnic_lvl,
8751 &req->vnic_page_dir);
8752 }
8753 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8754 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8755 ctx_pg = ctxm->pg_info;
8756 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8757 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8758 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8759 &req->stat_pg_size_stat_lvl,
8760 &req->stat_page_dir);
8761 }
8762 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8763 u32 units;
8764
8765 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8766 ctx_pg = ctxm->pg_info;
8767 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8768 units = ctxm->mrav_num_entries_units;
8769 if (units) {
8770 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8771 u32 entries;
8772
8773 num_mr = ctx_pg->entries - num_ah;
8774 entries = ((num_mr / units) << 16) | (num_ah / units);
8775 req->mrav_num_entries = cpu_to_le32(entries);
8776 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8777 }
8778 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8779 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8780 &req->mrav_pg_size_mrav_lvl,
8781 &req->mrav_page_dir);
8782 }
8783 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8784 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8785 ctx_pg = ctxm->pg_info;
8786 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8787 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8788 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8789 &req->tim_pg_size_tim_lvl,
8790 &req->tim_page_dir);
8791 }
8792 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8793 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8794 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8795 pg_dir = &req->tqm_sp_page_dir,
8796 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8797 ctx_pg = ctxm->pg_info;
8798 i < BNXT_MAX_TQM_RINGS;
8799 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8800 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8801 if (!(enables & ena))
8802 continue;
8803
8804 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8805 *num_entries = cpu_to_le32(ctx_pg->entries);
8806 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8807 }
8808 req->flags = cpu_to_le32(flags);
8809 return hwrm_req_send(bp, req);
8810 }
8811
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8812 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8813 struct bnxt_ctx_pg_info *ctx_pg)
8814 {
8815 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8816
8817 rmem->page_size = BNXT_PAGE_SIZE;
8818 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8819 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8820 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8821 if (rmem->depth >= 1)
8822 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8823 return bnxt_alloc_ring(bp, rmem);
8824 }
8825
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8826 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8827 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8828 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8829 {
8830 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8831 int rc;
8832
8833 if (!mem_size)
8834 return -EINVAL;
8835
8836 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8837 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8838 ctx_pg->nr_pages = 0;
8839 return -EINVAL;
8840 }
8841 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8842 int nr_tbls, i;
8843
8844 rmem->depth = 2;
8845 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8846 GFP_KERNEL);
8847 if (!ctx_pg->ctx_pg_tbl)
8848 return -ENOMEM;
8849 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8850 rmem->nr_pages = nr_tbls;
8851 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8852 if (rc)
8853 return rc;
8854 for (i = 0; i < nr_tbls; i++) {
8855 struct bnxt_ctx_pg_info *pg_tbl;
8856
8857 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8858 if (!pg_tbl)
8859 return -ENOMEM;
8860 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8861 rmem = &pg_tbl->ring_mem;
8862 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8863 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8864 rmem->depth = 1;
8865 rmem->nr_pages = MAX_CTX_PAGES;
8866 rmem->ctx_mem = ctxm;
8867 if (i == (nr_tbls - 1)) {
8868 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8869
8870 if (rem)
8871 rmem->nr_pages = rem;
8872 }
8873 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8874 if (rc)
8875 break;
8876 }
8877 } else {
8878 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8879 if (rmem->nr_pages > 1 || depth)
8880 rmem->depth = 1;
8881 rmem->ctx_mem = ctxm;
8882 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8883 }
8884 return rc;
8885 }
8886
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)8887 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
8888 struct bnxt_ctx_pg_info *ctx_pg,
8889 void *buf, size_t offset, size_t head,
8890 size_t tail)
8891 {
8892 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8893 size_t nr_pages = ctx_pg->nr_pages;
8894 int page_size = rmem->page_size;
8895 size_t len = 0, total_len = 0;
8896 u16 depth = rmem->depth;
8897
8898 tail %= nr_pages * page_size;
8899 do {
8900 if (depth > 1) {
8901 int i = head / (page_size * MAX_CTX_PAGES);
8902 struct bnxt_ctx_pg_info *pg_tbl;
8903
8904 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8905 rmem = &pg_tbl->ring_mem;
8906 }
8907 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
8908 head += len;
8909 offset += len;
8910 total_len += len;
8911 if (head >= nr_pages * page_size)
8912 head = 0;
8913 } while (head != tail);
8914 return total_len;
8915 }
8916
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8917 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8918 struct bnxt_ctx_pg_info *ctx_pg)
8919 {
8920 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8921
8922 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8923 ctx_pg->ctx_pg_tbl) {
8924 int i, nr_tbls = rmem->nr_pages;
8925
8926 for (i = 0; i < nr_tbls; i++) {
8927 struct bnxt_ctx_pg_info *pg_tbl;
8928 struct bnxt_ring_mem_info *rmem2;
8929
8930 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8931 if (!pg_tbl)
8932 continue;
8933 rmem2 = &pg_tbl->ring_mem;
8934 bnxt_free_ring(bp, rmem2);
8935 ctx_pg->ctx_pg_arr[i] = NULL;
8936 kfree(pg_tbl);
8937 ctx_pg->ctx_pg_tbl[i] = NULL;
8938 }
8939 kfree(ctx_pg->ctx_pg_tbl);
8940 ctx_pg->ctx_pg_tbl = NULL;
8941 }
8942 bnxt_free_ring(bp, rmem);
8943 ctx_pg->nr_pages = 0;
8944 }
8945
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)8946 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8947 struct bnxt_ctx_mem_type *ctxm, u32 entries,
8948 u8 pg_lvl)
8949 {
8950 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8951 int i, rc = 0, n = 1;
8952 u32 mem_size;
8953
8954 if (!ctxm->entry_size || !ctx_pg)
8955 return -EINVAL;
8956 if (ctxm->instance_bmap)
8957 n = hweight32(ctxm->instance_bmap);
8958 if (ctxm->entry_multiple)
8959 entries = roundup(entries, ctxm->entry_multiple);
8960 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8961 mem_size = entries * ctxm->entry_size;
8962 for (i = 0; i < n && !rc; i++) {
8963 ctx_pg[i].entries = entries;
8964 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8965 ctxm->init_value ? ctxm : NULL);
8966 }
8967 if (!rc)
8968 ctxm->mem_valid = 1;
8969 return rc;
8970 }
8971
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)8972 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8973 struct bnxt_ctx_mem_type *ctxm,
8974 bool last)
8975 {
8976 struct hwrm_func_backing_store_cfg_v2_input *req;
8977 u32 instance_bmap = ctxm->instance_bmap;
8978 int i, j, rc = 0, n = 1;
8979 __le32 *p;
8980
8981 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8982 return 0;
8983
8984 if (instance_bmap)
8985 n = hweight32(ctxm->instance_bmap);
8986 else
8987 instance_bmap = 1;
8988
8989 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8990 if (rc)
8991 return rc;
8992 hwrm_req_hold(bp, req);
8993 req->type = cpu_to_le16(ctxm->type);
8994 req->entry_size = cpu_to_le16(ctxm->entry_size);
8995 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
8996 bnxt_bs_trace_avail(bp, ctxm->type)) {
8997 struct bnxt_bs_trace_info *bs_trace;
8998 u32 enables;
8999
9000 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9001 req->enables = cpu_to_le32(enables);
9002 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9003 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9004 }
9005 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9006 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9007 p[i] = cpu_to_le32(ctxm->split[i]);
9008 for (i = 0, j = 0; j < n && !rc; i++) {
9009 struct bnxt_ctx_pg_info *ctx_pg;
9010
9011 if (!(instance_bmap & (1 << i)))
9012 continue;
9013 req->instance = cpu_to_le16(i);
9014 ctx_pg = &ctxm->pg_info[j++];
9015 if (!ctx_pg->entries)
9016 continue;
9017 req->num_entries = cpu_to_le32(ctx_pg->entries);
9018 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9019 &req->page_size_pbl_level,
9020 &req->page_dir);
9021 if (last && j == n)
9022 req->flags =
9023 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9024 rc = hwrm_req_send(bp, req);
9025 }
9026 hwrm_req_drop(bp, req);
9027 return rc;
9028 }
9029
bnxt_backing_store_cfg_v2(struct bnxt * bp,u32 ena)9030 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
9031 {
9032 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9033 struct bnxt_ctx_mem_type *ctxm;
9034 u16 last_type = BNXT_CTX_INV;
9035 int rc = 0;
9036 u16 type;
9037
9038 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
9039 ctxm = &ctx->ctx_arr[type];
9040 if (!bnxt_bs_trace_avail(bp, type))
9041 continue;
9042 if (!ctxm->mem_valid) {
9043 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9044 ctxm->max_entries, 1);
9045 if (rc) {
9046 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9047 type);
9048 continue;
9049 }
9050 bnxt_bs_trace_init(bp, ctxm);
9051 }
9052 last_type = type;
9053 }
9054
9055 if (last_type == BNXT_CTX_INV) {
9056 if (!ena)
9057 return 0;
9058 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
9059 last_type = BNXT_CTX_MAX - 1;
9060 else
9061 last_type = BNXT_CTX_L2_MAX - 1;
9062 }
9063 ctx->ctx_arr[last_type].last = 1;
9064
9065 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9066 ctxm = &ctx->ctx_arr[type];
9067
9068 if (!ctxm->mem_valid)
9069 continue;
9070 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9071 if (rc)
9072 return rc;
9073 }
9074 return 0;
9075 }
9076
9077 /**
9078 * __bnxt_copy_ctx_mem - copy host context memory
9079 * @bp: The driver context
9080 * @ctxm: The pointer to the context memory type
9081 * @buf: The destination buffer or NULL to just obtain the length
9082 * @offset: The buffer offset to copy the data to
9083 * @head: The head offset of context memory to copy from
9084 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9085 *
9086 * This function is called for debugging purposes to dump the host context
9087 * used by the chip.
9088 *
9089 * Return: Length of memory copied
9090 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9091 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9092 struct bnxt_ctx_mem_type *ctxm, void *buf,
9093 size_t offset, size_t head, size_t tail)
9094 {
9095 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9096 size_t len = 0, total_len = 0;
9097 int i, n = 1;
9098
9099 if (!ctx_pg)
9100 return 0;
9101
9102 if (ctxm->instance_bmap)
9103 n = hweight32(ctxm->instance_bmap);
9104 for (i = 0; i < n; i++) {
9105 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9106 tail);
9107 offset += len;
9108 total_len += len;
9109 }
9110 return total_len;
9111 }
9112
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9113 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9114 void *buf, size_t offset)
9115 {
9116 size_t tail = ctxm->max_entries * ctxm->entry_size;
9117
9118 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9119 }
9120
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9121 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9122 struct bnxt_ctx_mem_type *ctxm, bool force)
9123 {
9124 struct bnxt_ctx_pg_info *ctx_pg;
9125 int i, n = 1;
9126
9127 ctxm->last = 0;
9128
9129 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9130 return;
9131
9132 ctx_pg = ctxm->pg_info;
9133 if (ctx_pg) {
9134 if (ctxm->instance_bmap)
9135 n = hweight32(ctxm->instance_bmap);
9136 for (i = 0; i < n; i++)
9137 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9138
9139 kfree(ctx_pg);
9140 ctxm->pg_info = NULL;
9141 ctxm->mem_valid = 0;
9142 }
9143 memset(ctxm, 0, sizeof(*ctxm));
9144 }
9145
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9146 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9147 {
9148 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9149 u16 type;
9150
9151 if (!ctx)
9152 return;
9153
9154 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9155 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9156
9157 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9158 if (force) {
9159 kfree(ctx);
9160 bp->ctx = NULL;
9161 }
9162 }
9163
bnxt_alloc_ctx_mem(struct bnxt * bp)9164 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9165 {
9166 struct bnxt_ctx_mem_type *ctxm;
9167 struct bnxt_ctx_mem_info *ctx;
9168 u32 l2_qps, qp1_qps, max_qps;
9169 u32 ena, entries_sp, entries;
9170 u32 srqs, max_srqs, min;
9171 u32 num_mr, num_ah;
9172 u32 extra_srqs = 0;
9173 u32 extra_qps = 0;
9174 u32 fast_qpmd_qps;
9175 u8 pg_lvl = 1;
9176 int i, rc;
9177
9178 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9179 if (rc) {
9180 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9181 rc);
9182 return rc;
9183 }
9184 ctx = bp->ctx;
9185 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9186 return 0;
9187
9188 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9189 l2_qps = ctxm->qp_l2_entries;
9190 qp1_qps = ctxm->qp_qp1_entries;
9191 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9192 max_qps = ctxm->max_entries;
9193 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9194 srqs = ctxm->srq_l2_entries;
9195 max_srqs = ctxm->max_entries;
9196 ena = 0;
9197 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9198 pg_lvl = 2;
9199 if (BNXT_SW_RES_LMT(bp)) {
9200 extra_qps = max_qps - l2_qps - qp1_qps;
9201 extra_srqs = max_srqs - srqs;
9202 } else {
9203 extra_qps = min_t(u32, 65536,
9204 max_qps - l2_qps - qp1_qps);
9205 /* allocate extra qps if fw supports RoCE fast qp
9206 * destroy feature
9207 */
9208 extra_qps += fast_qpmd_qps;
9209 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9210 }
9211 if (fast_qpmd_qps)
9212 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9213 }
9214
9215 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9216 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9217 pg_lvl);
9218 if (rc)
9219 return rc;
9220
9221 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9222 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9223 if (rc)
9224 return rc;
9225
9226 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9227 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9228 extra_qps * 2, pg_lvl);
9229 if (rc)
9230 return rc;
9231
9232 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9233 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9234 if (rc)
9235 return rc;
9236
9237 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9238 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9239 if (rc)
9240 return rc;
9241
9242 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9243 goto skip_rdma;
9244
9245 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9246 if (BNXT_SW_RES_LMT(bp) &&
9247 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9248 num_ah = ctxm->mrav_av_entries;
9249 num_mr = ctxm->max_entries - num_ah;
9250 } else {
9251 /* 128K extra is needed to accommodate static AH context
9252 * allocation by f/w.
9253 */
9254 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9255 num_ah = min_t(u32, num_mr, 1024 * 128);
9256 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9257 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9258 ctxm->mrav_av_entries = num_ah;
9259 }
9260
9261 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9262 if (rc)
9263 return rc;
9264 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9265
9266 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9267 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9268 if (rc)
9269 return rc;
9270 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9271
9272 skip_rdma:
9273 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9274 min = ctxm->min_entries;
9275 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9276 2 * (extra_qps + qp1_qps) + min;
9277 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9278 if (rc)
9279 return rc;
9280
9281 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9282 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9283 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9284 if (rc)
9285 return rc;
9286 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9287 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9288 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9289
9290 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9291 rc = bnxt_backing_store_cfg_v2(bp, ena);
9292 else
9293 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9294 if (rc) {
9295 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9296 rc);
9297 return rc;
9298 }
9299 ctx->flags |= BNXT_CTX_FLAG_INITED;
9300 return 0;
9301 }
9302
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9303 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9304 {
9305 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9306 u16 page_attr;
9307 int rc;
9308
9309 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9310 return 0;
9311
9312 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9313 if (rc)
9314 return rc;
9315
9316 if (BNXT_PAGE_SIZE == 0x2000)
9317 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9318 else if (BNXT_PAGE_SIZE == 0x10000)
9319 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9320 else
9321 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9322 req->pg_size_lvl = cpu_to_le16(page_attr |
9323 bp->fw_crash_mem->ring_mem.depth);
9324 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9325 req->size = cpu_to_le32(bp->fw_crash_len);
9326 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9327 return hwrm_req_send(bp, req);
9328 }
9329
bnxt_free_crash_dump_mem(struct bnxt * bp)9330 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9331 {
9332 if (bp->fw_crash_mem) {
9333 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9334 kfree(bp->fw_crash_mem);
9335 bp->fw_crash_mem = NULL;
9336 }
9337 }
9338
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9339 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9340 {
9341 u32 mem_size = 0;
9342 int rc;
9343
9344 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9345 return 0;
9346
9347 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9348 if (rc)
9349 return rc;
9350
9351 mem_size = round_up(mem_size, 4);
9352
9353 /* keep and use the existing pages */
9354 if (bp->fw_crash_mem &&
9355 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9356 goto alloc_done;
9357
9358 if (bp->fw_crash_mem)
9359 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9360 else
9361 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9362 GFP_KERNEL);
9363 if (!bp->fw_crash_mem)
9364 return -ENOMEM;
9365
9366 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9367 if (rc) {
9368 bnxt_free_crash_dump_mem(bp);
9369 return rc;
9370 }
9371
9372 alloc_done:
9373 bp->fw_crash_len = mem_size;
9374 return 0;
9375 }
9376
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9377 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9378 {
9379 struct hwrm_func_resource_qcaps_output *resp;
9380 struct hwrm_func_resource_qcaps_input *req;
9381 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9382 int rc;
9383
9384 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9385 if (rc)
9386 return rc;
9387
9388 req->fid = cpu_to_le16(0xffff);
9389 resp = hwrm_req_hold(bp, req);
9390 rc = hwrm_req_send_silent(bp, req);
9391 if (rc)
9392 goto hwrm_func_resc_qcaps_exit;
9393
9394 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9395 if (!all)
9396 goto hwrm_func_resc_qcaps_exit;
9397
9398 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9399 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9400 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9401 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9402 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9403 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9404 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9405 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9406 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9407 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9408 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9409 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9410 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9411 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9412 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9413 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9414
9415 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9416 u16 max_msix = le16_to_cpu(resp->max_msix);
9417
9418 hw_resc->max_nqs = max_msix;
9419 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9420 }
9421
9422 if (BNXT_PF(bp)) {
9423 struct bnxt_pf_info *pf = &bp->pf;
9424
9425 pf->vf_resv_strategy =
9426 le16_to_cpu(resp->vf_reservation_strategy);
9427 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9428 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9429 }
9430 hwrm_func_resc_qcaps_exit:
9431 hwrm_req_drop(bp, req);
9432 return rc;
9433 }
9434
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9435 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9436 {
9437 struct hwrm_port_mac_ptp_qcfg_output *resp;
9438 struct hwrm_port_mac_ptp_qcfg_input *req;
9439 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9440 u8 flags;
9441 int rc;
9442
9443 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9444 rc = -ENODEV;
9445 goto no_ptp;
9446 }
9447
9448 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9449 if (rc)
9450 goto no_ptp;
9451
9452 req->port_id = cpu_to_le16(bp->pf.port_id);
9453 resp = hwrm_req_hold(bp, req);
9454 rc = hwrm_req_send(bp, req);
9455 if (rc)
9456 goto exit;
9457
9458 flags = resp->flags;
9459 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9460 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9461 rc = -ENODEV;
9462 goto exit;
9463 }
9464 if (!ptp) {
9465 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9466 if (!ptp) {
9467 rc = -ENOMEM;
9468 goto exit;
9469 }
9470 ptp->bp = bp;
9471 bp->ptp_cfg = ptp;
9472 }
9473
9474 if (flags &
9475 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9476 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9477 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9478 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9479 } else if (BNXT_CHIP_P5(bp)) {
9480 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9481 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9482 } else {
9483 rc = -ENODEV;
9484 goto exit;
9485 }
9486 ptp->rtc_configured =
9487 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9488 rc = bnxt_ptp_init(bp);
9489 if (rc)
9490 netdev_warn(bp->dev, "PTP initialization failed.\n");
9491 exit:
9492 hwrm_req_drop(bp, req);
9493 if (!rc)
9494 return 0;
9495
9496 no_ptp:
9497 bnxt_ptp_clear(bp);
9498 kfree(ptp);
9499 bp->ptp_cfg = NULL;
9500 return rc;
9501 }
9502
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9503 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9504 {
9505 struct hwrm_func_qcaps_output *resp;
9506 struct hwrm_func_qcaps_input *req;
9507 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9508 u32 flags, flags_ext, flags_ext2;
9509 int rc;
9510
9511 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9512 if (rc)
9513 return rc;
9514
9515 req->fid = cpu_to_le16(0xffff);
9516 resp = hwrm_req_hold(bp, req);
9517 rc = hwrm_req_send(bp, req);
9518 if (rc)
9519 goto hwrm_func_qcaps_exit;
9520
9521 flags = le32_to_cpu(resp->flags);
9522 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9523 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9524 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9525 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9526 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9527 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9528 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9529 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9530 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9531 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9532 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9533 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9534 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9535 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9536 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9537 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9538 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9539 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9540
9541 flags_ext = le32_to_cpu(resp->flags_ext);
9542 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9543 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9544 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9545 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9546 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9547 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9548 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9549 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9550 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9551 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9552 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9553 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9554 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9555 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9556 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9557 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9558
9559 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9560 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9561 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9562 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9563 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9564 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9565 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9566 if (flags_ext2 &
9567 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9568 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9569 if (BNXT_PF(bp) &&
9570 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9571 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9572
9573 bp->tx_push_thresh = 0;
9574 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9575 BNXT_FW_MAJ(bp) > 217)
9576 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9577
9578 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9579 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9580 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9581 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9582 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9583 if (!hw_resc->max_hw_ring_grps)
9584 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9585 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9586 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9587 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9588
9589 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9590 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9591 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9592 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9593 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9594 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9595
9596 if (BNXT_PF(bp)) {
9597 struct bnxt_pf_info *pf = &bp->pf;
9598
9599 pf->fw_fid = le16_to_cpu(resp->fid);
9600 pf->port_id = le16_to_cpu(resp->port_id);
9601 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9602 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9603 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9604 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9605 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9606 bp->flags |= BNXT_FLAG_WOL_CAP;
9607 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9608 bp->fw_cap |= BNXT_FW_CAP_PTP;
9609 } else {
9610 bnxt_ptp_clear(bp);
9611 kfree(bp->ptp_cfg);
9612 bp->ptp_cfg = NULL;
9613 }
9614 } else {
9615 #ifdef CONFIG_BNXT_SRIOV
9616 struct bnxt_vf_info *vf = &bp->vf;
9617
9618 vf->fw_fid = le16_to_cpu(resp->fid);
9619 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9620 #endif
9621 }
9622 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9623
9624 hwrm_func_qcaps_exit:
9625 hwrm_req_drop(bp, req);
9626 return rc;
9627 }
9628
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9629 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9630 {
9631 struct hwrm_dbg_qcaps_output *resp;
9632 struct hwrm_dbg_qcaps_input *req;
9633 int rc;
9634
9635 bp->fw_dbg_cap = 0;
9636 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9637 return;
9638
9639 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9640 if (rc)
9641 return;
9642
9643 req->fid = cpu_to_le16(0xffff);
9644 resp = hwrm_req_hold(bp, req);
9645 rc = hwrm_req_send(bp, req);
9646 if (rc)
9647 goto hwrm_dbg_qcaps_exit;
9648
9649 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9650
9651 hwrm_dbg_qcaps_exit:
9652 hwrm_req_drop(bp, req);
9653 }
9654
9655 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9656
bnxt_hwrm_func_qcaps(struct bnxt * bp)9657 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9658 {
9659 int rc;
9660
9661 rc = __bnxt_hwrm_func_qcaps(bp);
9662 if (rc)
9663 return rc;
9664
9665 bnxt_hwrm_dbg_qcaps(bp);
9666
9667 rc = bnxt_hwrm_queue_qportcfg(bp);
9668 if (rc) {
9669 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9670 return rc;
9671 }
9672 if (bp->hwrm_spec_code >= 0x10803) {
9673 rc = bnxt_alloc_ctx_mem(bp);
9674 if (rc)
9675 return rc;
9676 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9677 if (!rc)
9678 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9679 }
9680 return 0;
9681 }
9682
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9683 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9684 {
9685 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9686 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9687 u32 flags;
9688 int rc;
9689
9690 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9691 return 0;
9692
9693 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9694 if (rc)
9695 return rc;
9696
9697 resp = hwrm_req_hold(bp, req);
9698 rc = hwrm_req_send(bp, req);
9699 if (rc)
9700 goto hwrm_cfa_adv_qcaps_exit;
9701
9702 flags = le32_to_cpu(resp->flags);
9703 if (flags &
9704 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9705 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9706
9707 if (flags &
9708 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9709 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9710
9711 if (flags &
9712 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9713 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9714
9715 hwrm_cfa_adv_qcaps_exit:
9716 hwrm_req_drop(bp, req);
9717 return rc;
9718 }
9719
__bnxt_alloc_fw_health(struct bnxt * bp)9720 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9721 {
9722 if (bp->fw_health)
9723 return 0;
9724
9725 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9726 if (!bp->fw_health)
9727 return -ENOMEM;
9728
9729 mutex_init(&bp->fw_health->lock);
9730 return 0;
9731 }
9732
bnxt_alloc_fw_health(struct bnxt * bp)9733 static int bnxt_alloc_fw_health(struct bnxt *bp)
9734 {
9735 int rc;
9736
9737 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9738 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9739 return 0;
9740
9741 rc = __bnxt_alloc_fw_health(bp);
9742 if (rc) {
9743 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9744 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9745 return rc;
9746 }
9747
9748 return 0;
9749 }
9750
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9751 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9752 {
9753 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9754 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9755 BNXT_FW_HEALTH_WIN_MAP_OFF);
9756 }
9757
bnxt_inv_fw_health_reg(struct bnxt * bp)9758 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9759 {
9760 struct bnxt_fw_health *fw_health = bp->fw_health;
9761 u32 reg_type;
9762
9763 if (!fw_health)
9764 return;
9765
9766 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9767 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9768 fw_health->status_reliable = false;
9769
9770 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9771 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9772 fw_health->resets_reliable = false;
9773 }
9774
bnxt_try_map_fw_health_reg(struct bnxt * bp)9775 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9776 {
9777 void __iomem *hs;
9778 u32 status_loc;
9779 u32 reg_type;
9780 u32 sig;
9781
9782 if (bp->fw_health)
9783 bp->fw_health->status_reliable = false;
9784
9785 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9786 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9787
9788 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9789 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9790 if (!bp->chip_num) {
9791 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9792 bp->chip_num = readl(bp->bar0 +
9793 BNXT_FW_HEALTH_WIN_BASE +
9794 BNXT_GRC_REG_CHIP_NUM);
9795 }
9796 if (!BNXT_CHIP_P5_PLUS(bp))
9797 return;
9798
9799 status_loc = BNXT_GRC_REG_STATUS_P5 |
9800 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9801 } else {
9802 status_loc = readl(hs + offsetof(struct hcomm_status,
9803 fw_status_loc));
9804 }
9805
9806 if (__bnxt_alloc_fw_health(bp)) {
9807 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9808 return;
9809 }
9810
9811 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9812 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9813 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9814 __bnxt_map_fw_health_reg(bp, status_loc);
9815 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9816 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9817 }
9818
9819 bp->fw_health->status_reliable = true;
9820 }
9821
bnxt_map_fw_health_regs(struct bnxt * bp)9822 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9823 {
9824 struct bnxt_fw_health *fw_health = bp->fw_health;
9825 u32 reg_base = 0xffffffff;
9826 int i;
9827
9828 bp->fw_health->status_reliable = false;
9829 bp->fw_health->resets_reliable = false;
9830 /* Only pre-map the monitoring GRC registers using window 3 */
9831 for (i = 0; i < 4; i++) {
9832 u32 reg = fw_health->regs[i];
9833
9834 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9835 continue;
9836 if (reg_base == 0xffffffff)
9837 reg_base = reg & BNXT_GRC_BASE_MASK;
9838 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9839 return -ERANGE;
9840 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9841 }
9842 bp->fw_health->status_reliable = true;
9843 bp->fw_health->resets_reliable = true;
9844 if (reg_base == 0xffffffff)
9845 return 0;
9846
9847 __bnxt_map_fw_health_reg(bp, reg_base);
9848 return 0;
9849 }
9850
bnxt_remap_fw_health_regs(struct bnxt * bp)9851 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9852 {
9853 if (!bp->fw_health)
9854 return;
9855
9856 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9857 bp->fw_health->status_reliable = true;
9858 bp->fw_health->resets_reliable = true;
9859 } else {
9860 bnxt_try_map_fw_health_reg(bp);
9861 }
9862 }
9863
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)9864 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9865 {
9866 struct bnxt_fw_health *fw_health = bp->fw_health;
9867 struct hwrm_error_recovery_qcfg_output *resp;
9868 struct hwrm_error_recovery_qcfg_input *req;
9869 int rc, i;
9870
9871 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9872 return 0;
9873
9874 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9875 if (rc)
9876 return rc;
9877
9878 resp = hwrm_req_hold(bp, req);
9879 rc = hwrm_req_send(bp, req);
9880 if (rc)
9881 goto err_recovery_out;
9882 fw_health->flags = le32_to_cpu(resp->flags);
9883 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9884 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9885 rc = -EINVAL;
9886 goto err_recovery_out;
9887 }
9888 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9889 fw_health->master_func_wait_dsecs =
9890 le32_to_cpu(resp->master_func_wait_period);
9891 fw_health->normal_func_wait_dsecs =
9892 le32_to_cpu(resp->normal_func_wait_period);
9893 fw_health->post_reset_wait_dsecs =
9894 le32_to_cpu(resp->master_func_wait_period_after_reset);
9895 fw_health->post_reset_max_wait_dsecs =
9896 le32_to_cpu(resp->max_bailout_time_after_reset);
9897 fw_health->regs[BNXT_FW_HEALTH_REG] =
9898 le32_to_cpu(resp->fw_health_status_reg);
9899 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9900 le32_to_cpu(resp->fw_heartbeat_reg);
9901 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9902 le32_to_cpu(resp->fw_reset_cnt_reg);
9903 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9904 le32_to_cpu(resp->reset_inprogress_reg);
9905 fw_health->fw_reset_inprog_reg_mask =
9906 le32_to_cpu(resp->reset_inprogress_reg_mask);
9907 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9908 if (fw_health->fw_reset_seq_cnt >= 16) {
9909 rc = -EINVAL;
9910 goto err_recovery_out;
9911 }
9912 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9913 fw_health->fw_reset_seq_regs[i] =
9914 le32_to_cpu(resp->reset_reg[i]);
9915 fw_health->fw_reset_seq_vals[i] =
9916 le32_to_cpu(resp->reset_reg_val[i]);
9917 fw_health->fw_reset_seq_delay_msec[i] =
9918 resp->delay_after_reset[i];
9919 }
9920 err_recovery_out:
9921 hwrm_req_drop(bp, req);
9922 if (!rc)
9923 rc = bnxt_map_fw_health_regs(bp);
9924 if (rc)
9925 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9926 return rc;
9927 }
9928
bnxt_hwrm_func_reset(struct bnxt * bp)9929 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9930 {
9931 struct hwrm_func_reset_input *req;
9932 int rc;
9933
9934 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9935 if (rc)
9936 return rc;
9937
9938 req->enables = 0;
9939 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9940 return hwrm_req_send(bp, req);
9941 }
9942
bnxt_nvm_cfg_ver_get(struct bnxt * bp)9943 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9944 {
9945 struct hwrm_nvm_get_dev_info_output nvm_info;
9946
9947 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9948 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9949 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9950 nvm_info.nvm_cfg_ver_upd);
9951 }
9952
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)9953 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9954 {
9955 struct hwrm_queue_qportcfg_output *resp;
9956 struct hwrm_queue_qportcfg_input *req;
9957 u8 i, j, *qptr;
9958 bool no_rdma;
9959 int rc = 0;
9960
9961 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9962 if (rc)
9963 return rc;
9964
9965 resp = hwrm_req_hold(bp, req);
9966 rc = hwrm_req_send(bp, req);
9967 if (rc)
9968 goto qportcfg_exit;
9969
9970 if (!resp->max_configurable_queues) {
9971 rc = -EINVAL;
9972 goto qportcfg_exit;
9973 }
9974 bp->max_tc = resp->max_configurable_queues;
9975 bp->max_lltc = resp->max_configurable_lossless_queues;
9976 if (bp->max_tc > BNXT_MAX_QUEUE)
9977 bp->max_tc = BNXT_MAX_QUEUE;
9978
9979 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9980 qptr = &resp->queue_id0;
9981 for (i = 0, j = 0; i < bp->max_tc; i++) {
9982 bp->q_info[j].queue_id = *qptr;
9983 bp->q_ids[i] = *qptr++;
9984 bp->q_info[j].queue_profile = *qptr++;
9985 bp->tc_to_qidx[j] = j;
9986 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9987 (no_rdma && BNXT_PF(bp)))
9988 j++;
9989 }
9990 bp->max_q = bp->max_tc;
9991 bp->max_tc = max_t(u8, j, 1);
9992
9993 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9994 bp->max_tc = 1;
9995
9996 if (bp->max_lltc > bp->max_tc)
9997 bp->max_lltc = bp->max_tc;
9998
9999 qportcfg_exit:
10000 hwrm_req_drop(bp, req);
10001 return rc;
10002 }
10003
bnxt_hwrm_poll(struct bnxt * bp)10004 static int bnxt_hwrm_poll(struct bnxt *bp)
10005 {
10006 struct hwrm_ver_get_input *req;
10007 int rc;
10008
10009 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10010 if (rc)
10011 return rc;
10012
10013 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10014 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10015 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10016
10017 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10018 rc = hwrm_req_send(bp, req);
10019 return rc;
10020 }
10021
bnxt_hwrm_ver_get(struct bnxt * bp)10022 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10023 {
10024 struct hwrm_ver_get_output *resp;
10025 struct hwrm_ver_get_input *req;
10026 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10027 u32 dev_caps_cfg, hwrm_ver;
10028 int rc, len;
10029
10030 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10031 if (rc)
10032 return rc;
10033
10034 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10035 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10036 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10037 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10038 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10039
10040 resp = hwrm_req_hold(bp, req);
10041 rc = hwrm_req_send(bp, req);
10042 if (rc)
10043 goto hwrm_ver_get_exit;
10044
10045 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10046
10047 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10048 resp->hwrm_intf_min_8b << 8 |
10049 resp->hwrm_intf_upd_8b;
10050 if (resp->hwrm_intf_maj_8b < 1) {
10051 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10052 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10053 resp->hwrm_intf_upd_8b);
10054 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10055 }
10056
10057 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10058 HWRM_VERSION_UPDATE;
10059
10060 if (bp->hwrm_spec_code > hwrm_ver)
10061 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10062 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10063 HWRM_VERSION_UPDATE);
10064 else
10065 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10066 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10067 resp->hwrm_intf_upd_8b);
10068
10069 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10070 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10071 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10072 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10073 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10074 len = FW_VER_STR_LEN;
10075 } else {
10076 fw_maj = resp->hwrm_fw_maj_8b;
10077 fw_min = resp->hwrm_fw_min_8b;
10078 fw_bld = resp->hwrm_fw_bld_8b;
10079 fw_rsv = resp->hwrm_fw_rsvd_8b;
10080 len = BC_HWRM_STR_LEN;
10081 }
10082 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10083 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10084 fw_rsv);
10085
10086 if (strlen(resp->active_pkg_name)) {
10087 int fw_ver_len = strlen(bp->fw_ver_str);
10088
10089 snprintf(bp->fw_ver_str + fw_ver_len,
10090 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10091 resp->active_pkg_name);
10092 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10093 }
10094
10095 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10096 if (!bp->hwrm_cmd_timeout)
10097 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10098 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10099 if (!bp->hwrm_cmd_max_timeout)
10100 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10101 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
10102 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
10103 bp->hwrm_cmd_max_timeout / 1000);
10104
10105 if (resp->hwrm_intf_maj_8b >= 1) {
10106 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10107 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10108 }
10109 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10110 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10111
10112 bp->chip_num = le16_to_cpu(resp->chip_num);
10113 bp->chip_rev = resp->chip_rev;
10114 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10115 !resp->chip_metal)
10116 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10117
10118 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10119 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10120 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10121 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10122
10123 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10124 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10125
10126 if (dev_caps_cfg &
10127 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10128 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10129
10130 if (dev_caps_cfg &
10131 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10132 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10133
10134 if (dev_caps_cfg &
10135 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10136 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10137
10138 hwrm_ver_get_exit:
10139 hwrm_req_drop(bp, req);
10140 return rc;
10141 }
10142
bnxt_hwrm_fw_set_time(struct bnxt * bp)10143 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10144 {
10145 struct hwrm_fw_set_time_input *req;
10146 struct tm tm;
10147 time64_t now = ktime_get_real_seconds();
10148 int rc;
10149
10150 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10151 bp->hwrm_spec_code < 0x10400)
10152 return -EOPNOTSUPP;
10153
10154 time64_to_tm(now, 0, &tm);
10155 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10156 if (rc)
10157 return rc;
10158
10159 req->year = cpu_to_le16(1900 + tm.tm_year);
10160 req->month = 1 + tm.tm_mon;
10161 req->day = tm.tm_mday;
10162 req->hour = tm.tm_hour;
10163 req->minute = tm.tm_min;
10164 req->second = tm.tm_sec;
10165 return hwrm_req_send(bp, req);
10166 }
10167
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10168 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10169 {
10170 u64 sw_tmp;
10171
10172 hw &= mask;
10173 sw_tmp = (*sw & ~mask) | hw;
10174 if (hw < (*sw & mask))
10175 sw_tmp += mask + 1;
10176 WRITE_ONCE(*sw, sw_tmp);
10177 }
10178
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10179 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10180 int count, bool ignore_zero)
10181 {
10182 int i;
10183
10184 for (i = 0; i < count; i++) {
10185 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10186
10187 if (ignore_zero && !hw)
10188 continue;
10189
10190 if (masks[i] == -1ULL)
10191 sw_stats[i] = hw;
10192 else
10193 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10194 }
10195 }
10196
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10197 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10198 {
10199 if (!stats->hw_stats)
10200 return;
10201
10202 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10203 stats->hw_masks, stats->len / 8, false);
10204 }
10205
bnxt_accumulate_all_stats(struct bnxt * bp)10206 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10207 {
10208 struct bnxt_stats_mem *ring0_stats;
10209 bool ignore_zero = false;
10210 int i;
10211
10212 /* Chip bug. Counter intermittently becomes 0. */
10213 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10214 ignore_zero = true;
10215
10216 for (i = 0; i < bp->cp_nr_rings; i++) {
10217 struct bnxt_napi *bnapi = bp->bnapi[i];
10218 struct bnxt_cp_ring_info *cpr;
10219 struct bnxt_stats_mem *stats;
10220
10221 cpr = &bnapi->cp_ring;
10222 stats = &cpr->stats;
10223 if (!i)
10224 ring0_stats = stats;
10225 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10226 ring0_stats->hw_masks,
10227 ring0_stats->len / 8, ignore_zero);
10228 }
10229 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10230 struct bnxt_stats_mem *stats = &bp->port_stats;
10231 __le64 *hw_stats = stats->hw_stats;
10232 u64 *sw_stats = stats->sw_stats;
10233 u64 *masks = stats->hw_masks;
10234 int cnt;
10235
10236 cnt = sizeof(struct rx_port_stats) / 8;
10237 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10238
10239 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10240 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10241 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10242 cnt = sizeof(struct tx_port_stats) / 8;
10243 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10244 }
10245 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10246 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10247 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10248 }
10249 }
10250
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10251 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10252 {
10253 struct hwrm_port_qstats_input *req;
10254 struct bnxt_pf_info *pf = &bp->pf;
10255 int rc;
10256
10257 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10258 return 0;
10259
10260 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10261 return -EOPNOTSUPP;
10262
10263 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10264 if (rc)
10265 return rc;
10266
10267 req->flags = flags;
10268 req->port_id = cpu_to_le16(pf->port_id);
10269 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10270 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10271 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10272 return hwrm_req_send(bp, req);
10273 }
10274
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10275 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10276 {
10277 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10278 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10279 struct hwrm_port_qstats_ext_output *resp_qs;
10280 struct hwrm_port_qstats_ext_input *req_qs;
10281 struct bnxt_pf_info *pf = &bp->pf;
10282 u32 tx_stat_size;
10283 int rc;
10284
10285 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10286 return 0;
10287
10288 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10289 return -EOPNOTSUPP;
10290
10291 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10292 if (rc)
10293 return rc;
10294
10295 req_qs->flags = flags;
10296 req_qs->port_id = cpu_to_le16(pf->port_id);
10297 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10298 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10299 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10300 sizeof(struct tx_port_stats_ext) : 0;
10301 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10302 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10303 resp_qs = hwrm_req_hold(bp, req_qs);
10304 rc = hwrm_req_send(bp, req_qs);
10305 if (!rc) {
10306 bp->fw_rx_stats_ext_size =
10307 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10308 if (BNXT_FW_MAJ(bp) < 220 &&
10309 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10310 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10311
10312 bp->fw_tx_stats_ext_size = tx_stat_size ?
10313 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10314 } else {
10315 bp->fw_rx_stats_ext_size = 0;
10316 bp->fw_tx_stats_ext_size = 0;
10317 }
10318 hwrm_req_drop(bp, req_qs);
10319
10320 if (flags)
10321 return rc;
10322
10323 if (bp->fw_tx_stats_ext_size <=
10324 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10325 bp->pri2cos_valid = 0;
10326 return rc;
10327 }
10328
10329 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10330 if (rc)
10331 return rc;
10332
10333 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10334
10335 resp_qc = hwrm_req_hold(bp, req_qc);
10336 rc = hwrm_req_send(bp, req_qc);
10337 if (!rc) {
10338 u8 *pri2cos;
10339 int i, j;
10340
10341 pri2cos = &resp_qc->pri0_cos_queue_id;
10342 for (i = 0; i < 8; i++) {
10343 u8 queue_id = pri2cos[i];
10344 u8 queue_idx;
10345
10346 /* Per port queue IDs start from 0, 10, 20, etc */
10347 queue_idx = queue_id % 10;
10348 if (queue_idx > BNXT_MAX_QUEUE) {
10349 bp->pri2cos_valid = false;
10350 hwrm_req_drop(bp, req_qc);
10351 return rc;
10352 }
10353 for (j = 0; j < bp->max_q; j++) {
10354 if (bp->q_ids[j] == queue_id)
10355 bp->pri2cos_idx[i] = queue_idx;
10356 }
10357 }
10358 bp->pri2cos_valid = true;
10359 }
10360 hwrm_req_drop(bp, req_qc);
10361
10362 return rc;
10363 }
10364
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10365 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10366 {
10367 bnxt_hwrm_tunnel_dst_port_free(bp,
10368 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10369 bnxt_hwrm_tunnel_dst_port_free(bp,
10370 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10371 }
10372
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10373 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10374 {
10375 int rc, i;
10376 u32 tpa_flags = 0;
10377
10378 if (set_tpa)
10379 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10380 else if (BNXT_NO_FW_ACCESS(bp))
10381 return 0;
10382 for (i = 0; i < bp->nr_vnics; i++) {
10383 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10384 if (rc) {
10385 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10386 i, rc);
10387 return rc;
10388 }
10389 }
10390 return 0;
10391 }
10392
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10393 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10394 {
10395 int i;
10396
10397 for (i = 0; i < bp->nr_vnics; i++)
10398 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10399 }
10400
bnxt_clear_vnic(struct bnxt * bp)10401 static void bnxt_clear_vnic(struct bnxt *bp)
10402 {
10403 if (!bp->vnic_info)
10404 return;
10405
10406 bnxt_hwrm_clear_vnic_filter(bp);
10407 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10408 /* clear all RSS setting before free vnic ctx */
10409 bnxt_hwrm_clear_vnic_rss(bp);
10410 bnxt_hwrm_vnic_ctx_free(bp);
10411 }
10412 /* before free the vnic, undo the vnic tpa settings */
10413 if (bp->flags & BNXT_FLAG_TPA)
10414 bnxt_set_tpa(bp, false);
10415 bnxt_hwrm_vnic_free(bp);
10416 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10417 bnxt_hwrm_vnic_ctx_free(bp);
10418 }
10419
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10420 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10421 bool irq_re_init)
10422 {
10423 bnxt_clear_vnic(bp);
10424 bnxt_hwrm_ring_free(bp, close_path);
10425 bnxt_hwrm_ring_grp_free(bp);
10426 if (irq_re_init) {
10427 bnxt_hwrm_stat_ctx_free(bp);
10428 bnxt_hwrm_free_tunnel_ports(bp);
10429 }
10430 }
10431
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10432 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10433 {
10434 struct hwrm_func_cfg_input *req;
10435 u8 evb_mode;
10436 int rc;
10437
10438 if (br_mode == BRIDGE_MODE_VEB)
10439 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10440 else if (br_mode == BRIDGE_MODE_VEPA)
10441 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10442 else
10443 return -EINVAL;
10444
10445 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10446 if (rc)
10447 return rc;
10448
10449 req->fid = cpu_to_le16(0xffff);
10450 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10451 req->evb_mode = evb_mode;
10452 return hwrm_req_send(bp, req);
10453 }
10454
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10455 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10456 {
10457 struct hwrm_func_cfg_input *req;
10458 int rc;
10459
10460 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10461 return 0;
10462
10463 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10464 if (rc)
10465 return rc;
10466
10467 req->fid = cpu_to_le16(0xffff);
10468 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10469 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10470 if (size == 128)
10471 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10472
10473 return hwrm_req_send(bp, req);
10474 }
10475
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10476 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10477 {
10478 int rc;
10479
10480 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10481 goto skip_rss_ctx;
10482
10483 /* allocate context for vnic */
10484 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10485 if (rc) {
10486 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10487 vnic->vnic_id, rc);
10488 goto vnic_setup_err;
10489 }
10490 bp->rsscos_nr_ctxs++;
10491
10492 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10493 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10494 if (rc) {
10495 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10496 vnic->vnic_id, rc);
10497 goto vnic_setup_err;
10498 }
10499 bp->rsscos_nr_ctxs++;
10500 }
10501
10502 skip_rss_ctx:
10503 /* configure default vnic, ring grp */
10504 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10505 if (rc) {
10506 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10507 vnic->vnic_id, rc);
10508 goto vnic_setup_err;
10509 }
10510
10511 /* Enable RSS hashing on vnic */
10512 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10513 if (rc) {
10514 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10515 vnic->vnic_id, rc);
10516 goto vnic_setup_err;
10517 }
10518
10519 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10520 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10521 if (rc) {
10522 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10523 vnic->vnic_id, rc);
10524 }
10525 }
10526
10527 vnic_setup_err:
10528 return rc;
10529 }
10530
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10531 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10532 u8 valid)
10533 {
10534 struct hwrm_vnic_update_input *req;
10535 int rc;
10536
10537 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10538 if (rc)
10539 return rc;
10540
10541 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10542
10543 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10544 req->mru = cpu_to_le16(vnic->mru);
10545
10546 req->enables = cpu_to_le32(valid);
10547
10548 return hwrm_req_send(bp, req);
10549 }
10550
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10551 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10552 {
10553 int rc;
10554
10555 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10556 if (rc) {
10557 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10558 vnic->vnic_id, rc);
10559 return rc;
10560 }
10561 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10562 if (rc)
10563 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10564 vnic->vnic_id, rc);
10565 return rc;
10566 }
10567
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10568 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10569 {
10570 int rc, i, nr_ctxs;
10571
10572 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10573 for (i = 0; i < nr_ctxs; i++) {
10574 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10575 if (rc) {
10576 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10577 vnic->vnic_id, i, rc);
10578 break;
10579 }
10580 bp->rsscos_nr_ctxs++;
10581 }
10582 if (i < nr_ctxs)
10583 return -ENOMEM;
10584
10585 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10586 if (rc)
10587 return rc;
10588
10589 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10590 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10591 if (rc) {
10592 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10593 vnic->vnic_id, rc);
10594 }
10595 }
10596 return rc;
10597 }
10598
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10599 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10600 {
10601 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10602 return __bnxt_setup_vnic_p5(bp, vnic);
10603 else
10604 return __bnxt_setup_vnic(bp, vnic);
10605 }
10606
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10607 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10608 struct bnxt_vnic_info *vnic,
10609 u16 start_rx_ring_idx, int rx_rings)
10610 {
10611 int rc;
10612
10613 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10614 if (rc) {
10615 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10616 vnic->vnic_id, rc);
10617 return rc;
10618 }
10619 return bnxt_setup_vnic(bp, vnic);
10620 }
10621
bnxt_alloc_rfs_vnics(struct bnxt * bp)10622 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10623 {
10624 struct bnxt_vnic_info *vnic;
10625 int i, rc = 0;
10626
10627 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10628 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10629 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10630 }
10631
10632 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10633 return 0;
10634
10635 for (i = 0; i < bp->rx_nr_rings; i++) {
10636 u16 vnic_id = i + 1;
10637 u16 ring_id = i;
10638
10639 if (vnic_id >= bp->nr_vnics)
10640 break;
10641
10642 vnic = &bp->vnic_info[vnic_id];
10643 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10644 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10645 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10646 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10647 break;
10648 }
10649 return rc;
10650 }
10651
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10652 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10653 bool all)
10654 {
10655 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10656 struct bnxt_filter_base *usr_fltr, *tmp;
10657 struct bnxt_ntuple_filter *ntp_fltr;
10658 int i;
10659
10660 if (netif_running(bp->dev)) {
10661 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10662 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10663 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10664 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10665 }
10666 }
10667 if (!all)
10668 return;
10669
10670 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10671 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10672 usr_fltr->fw_vnic_id == rss_ctx->index) {
10673 ntp_fltr = container_of(usr_fltr,
10674 struct bnxt_ntuple_filter,
10675 base);
10676 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10677 bnxt_del_ntp_filter(bp, ntp_fltr);
10678 bnxt_del_one_usr_fltr(bp, usr_fltr);
10679 }
10680 }
10681
10682 if (vnic->rss_table)
10683 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10684 vnic->rss_table,
10685 vnic->rss_table_dma_addr);
10686 bp->num_rss_ctx--;
10687 }
10688
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10689 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10690 {
10691 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10692 struct ethtool_rxfh_context *ctx;
10693 unsigned long context;
10694
10695 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10696 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10697 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10698
10699 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10700 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10701 __bnxt_setup_vnic_p5(bp, vnic)) {
10702 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10703 rss_ctx->index);
10704 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10705 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10706 }
10707 }
10708 }
10709
bnxt_clear_rss_ctxs(struct bnxt * bp)10710 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10711 {
10712 struct ethtool_rxfh_context *ctx;
10713 unsigned long context;
10714
10715 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10716 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10717
10718 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10719 }
10720 }
10721
10722 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10723 static bool bnxt_promisc_ok(struct bnxt *bp)
10724 {
10725 #ifdef CONFIG_BNXT_SRIOV
10726 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10727 return false;
10728 #endif
10729 return true;
10730 }
10731
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10732 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10733 {
10734 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10735 unsigned int rc = 0;
10736
10737 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10738 if (rc) {
10739 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10740 rc);
10741 return rc;
10742 }
10743
10744 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10745 if (rc) {
10746 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10747 rc);
10748 return rc;
10749 }
10750 return rc;
10751 }
10752
10753 static int bnxt_cfg_rx_mode(struct bnxt *);
10754 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10755
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10756 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10757 {
10758 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10759 int rc = 0;
10760 unsigned int rx_nr_rings = bp->rx_nr_rings;
10761
10762 if (irq_re_init) {
10763 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10764 if (rc) {
10765 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10766 rc);
10767 goto err_out;
10768 }
10769 }
10770
10771 rc = bnxt_hwrm_ring_alloc(bp);
10772 if (rc) {
10773 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10774 goto err_out;
10775 }
10776
10777 rc = bnxt_hwrm_ring_grp_alloc(bp);
10778 if (rc) {
10779 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10780 goto err_out;
10781 }
10782
10783 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10784 rx_nr_rings--;
10785
10786 /* default vnic 0 */
10787 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10788 if (rc) {
10789 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10790 goto err_out;
10791 }
10792
10793 if (BNXT_VF(bp))
10794 bnxt_hwrm_func_qcfg(bp);
10795
10796 rc = bnxt_setup_vnic(bp, vnic);
10797 if (rc)
10798 goto err_out;
10799 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10800 bnxt_hwrm_update_rss_hash_cfg(bp);
10801
10802 if (bp->flags & BNXT_FLAG_RFS) {
10803 rc = bnxt_alloc_rfs_vnics(bp);
10804 if (rc)
10805 goto err_out;
10806 }
10807
10808 if (bp->flags & BNXT_FLAG_TPA) {
10809 rc = bnxt_set_tpa(bp, true);
10810 if (rc)
10811 goto err_out;
10812 }
10813
10814 if (BNXT_VF(bp))
10815 bnxt_update_vf_mac(bp);
10816
10817 /* Filter for default vnic 0 */
10818 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10819 if (rc) {
10820 if (BNXT_VF(bp) && rc == -ENODEV)
10821 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10822 else
10823 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10824 goto err_out;
10825 }
10826 vnic->uc_filter_count = 1;
10827
10828 vnic->rx_mask = 0;
10829 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10830 goto skip_rx_mask;
10831
10832 if (bp->dev->flags & IFF_BROADCAST)
10833 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10834
10835 if (bp->dev->flags & IFF_PROMISC)
10836 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10837
10838 if (bp->dev->flags & IFF_ALLMULTI) {
10839 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10840 vnic->mc_list_count = 0;
10841 } else if (bp->dev->flags & IFF_MULTICAST) {
10842 u32 mask = 0;
10843
10844 bnxt_mc_list_updated(bp, &mask);
10845 vnic->rx_mask |= mask;
10846 }
10847
10848 rc = bnxt_cfg_rx_mode(bp);
10849 if (rc)
10850 goto err_out;
10851
10852 skip_rx_mask:
10853 rc = bnxt_hwrm_set_coal(bp);
10854 if (rc)
10855 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10856 rc);
10857
10858 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10859 rc = bnxt_setup_nitroa0_vnic(bp);
10860 if (rc)
10861 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10862 rc);
10863 }
10864
10865 if (BNXT_VF(bp)) {
10866 bnxt_hwrm_func_qcfg(bp);
10867 netdev_update_features(bp->dev);
10868 }
10869
10870 return 0;
10871
10872 err_out:
10873 bnxt_hwrm_resource_free(bp, 0, true);
10874
10875 return rc;
10876 }
10877
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)10878 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10879 {
10880 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10881 return 0;
10882 }
10883
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)10884 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10885 {
10886 bnxt_init_cp_rings(bp);
10887 bnxt_init_rx_rings(bp);
10888 bnxt_init_tx_rings(bp);
10889 bnxt_init_ring_grps(bp, irq_re_init);
10890 bnxt_init_vnics(bp);
10891
10892 return bnxt_init_chip(bp, irq_re_init);
10893 }
10894
bnxt_set_real_num_queues(struct bnxt * bp)10895 static int bnxt_set_real_num_queues(struct bnxt *bp)
10896 {
10897 int rc;
10898 struct net_device *dev = bp->dev;
10899
10900 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10901 bp->tx_nr_rings_xdp);
10902 if (rc)
10903 return rc;
10904
10905 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10906 if (rc)
10907 return rc;
10908
10909 #ifdef CONFIG_RFS_ACCEL
10910 if (bp->flags & BNXT_FLAG_RFS)
10911 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10912 #endif
10913
10914 return rc;
10915 }
10916
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)10917 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10918 bool shared)
10919 {
10920 int _rx = *rx, _tx = *tx;
10921
10922 if (shared) {
10923 *rx = min_t(int, _rx, max);
10924 *tx = min_t(int, _tx, max);
10925 } else {
10926 if (max < 2)
10927 return -ENOMEM;
10928
10929 while (_rx + _tx > max) {
10930 if (_rx > _tx && _rx > 1)
10931 _rx--;
10932 else if (_tx > 1)
10933 _tx--;
10934 }
10935 *rx = _rx;
10936 *tx = _tx;
10937 }
10938 return 0;
10939 }
10940
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)10941 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10942 {
10943 return (tx - tx_xdp) / tx_sets + tx_xdp;
10944 }
10945
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)10946 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10947 {
10948 int tcs = bp->num_tc;
10949
10950 if (!tcs)
10951 tcs = 1;
10952 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10953 }
10954
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)10955 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10956 {
10957 int tcs = bp->num_tc;
10958
10959 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10960 bp->tx_nr_rings_xdp;
10961 }
10962
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)10963 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10964 bool sh)
10965 {
10966 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10967
10968 if (tx_cp != *tx) {
10969 int tx_saved = tx_cp, rc;
10970
10971 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10972 if (rc)
10973 return rc;
10974 if (tx_cp != tx_saved)
10975 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
10976 return 0;
10977 }
10978 return __bnxt_trim_rings(bp, rx, tx, max, sh);
10979 }
10980
bnxt_setup_msix(struct bnxt * bp)10981 static void bnxt_setup_msix(struct bnxt *bp)
10982 {
10983 const int len = sizeof(bp->irq_tbl[0].name);
10984 struct net_device *dev = bp->dev;
10985 int tcs, i;
10986
10987 tcs = bp->num_tc;
10988 if (tcs) {
10989 int i, off, count;
10990
10991 for (i = 0; i < tcs; i++) {
10992 count = bp->tx_nr_rings_per_tc;
10993 off = BNXT_TC_TO_RING_BASE(bp, i);
10994 netdev_set_tc_queue(dev, i, count, off);
10995 }
10996 }
10997
10998 for (i = 0; i < bp->cp_nr_rings; i++) {
10999 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11000 char *attr;
11001
11002 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11003 attr = "TxRx";
11004 else if (i < bp->rx_nr_rings)
11005 attr = "rx";
11006 else
11007 attr = "tx";
11008
11009 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11010 attr, i);
11011 bp->irq_tbl[map_idx].handler = bnxt_msix;
11012 }
11013 }
11014
11015 static int bnxt_init_int_mode(struct bnxt *bp);
11016
bnxt_change_msix(struct bnxt * bp,int total)11017 static int bnxt_change_msix(struct bnxt *bp, int total)
11018 {
11019 struct msi_map map;
11020 int i;
11021
11022 /* add MSIX to the end if needed */
11023 for (i = bp->total_irqs; i < total; i++) {
11024 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11025 if (map.index < 0)
11026 return bp->total_irqs;
11027 bp->irq_tbl[i].vector = map.virq;
11028 bp->total_irqs++;
11029 }
11030
11031 /* trim MSIX from the end if needed */
11032 for (i = bp->total_irqs; i > total; i--) {
11033 map.index = i - 1;
11034 map.virq = bp->irq_tbl[i - 1].vector;
11035 pci_msix_free_irq(bp->pdev, map);
11036 bp->total_irqs--;
11037 }
11038 return bp->total_irqs;
11039 }
11040
bnxt_setup_int_mode(struct bnxt * bp)11041 static int bnxt_setup_int_mode(struct bnxt *bp)
11042 {
11043 int rc;
11044
11045 if (!bp->irq_tbl) {
11046 rc = bnxt_init_int_mode(bp);
11047 if (rc || !bp->irq_tbl)
11048 return rc ?: -ENODEV;
11049 }
11050
11051 bnxt_setup_msix(bp);
11052
11053 rc = bnxt_set_real_num_queues(bp);
11054 return rc;
11055 }
11056
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11057 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11058 {
11059 return bp->hw_resc.max_rsscos_ctxs;
11060 }
11061
bnxt_get_max_func_vnics(struct bnxt * bp)11062 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11063 {
11064 return bp->hw_resc.max_vnics;
11065 }
11066
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11067 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11068 {
11069 return bp->hw_resc.max_stat_ctxs;
11070 }
11071
bnxt_get_max_func_cp_rings(struct bnxt * bp)11072 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11073 {
11074 return bp->hw_resc.max_cp_rings;
11075 }
11076
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11077 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11078 {
11079 unsigned int cp = bp->hw_resc.max_cp_rings;
11080
11081 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11082 cp -= bnxt_get_ulp_msix_num(bp);
11083
11084 return cp;
11085 }
11086
bnxt_get_max_func_irqs(struct bnxt * bp)11087 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11088 {
11089 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11090
11091 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11092 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11093
11094 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11095 }
11096
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11097 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11098 {
11099 bp->hw_resc.max_irqs = max_irqs;
11100 }
11101
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11102 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11103 {
11104 unsigned int cp;
11105
11106 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11107 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11108 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11109 else
11110 return cp - bp->cp_nr_rings;
11111 }
11112
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11113 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11114 {
11115 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11116 }
11117
bnxt_get_avail_msix(struct bnxt * bp,int num)11118 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11119 {
11120 int max_irq = bnxt_get_max_func_irqs(bp);
11121 int total_req = bp->cp_nr_rings + num;
11122
11123 if (max_irq < total_req) {
11124 num = max_irq - bp->cp_nr_rings;
11125 if (num <= 0)
11126 return 0;
11127 }
11128 return num;
11129 }
11130
bnxt_get_num_msix(struct bnxt * bp)11131 static int bnxt_get_num_msix(struct bnxt *bp)
11132 {
11133 if (!BNXT_NEW_RM(bp))
11134 return bnxt_get_max_func_irqs(bp);
11135
11136 return bnxt_nq_rings_in_use(bp);
11137 }
11138
bnxt_init_int_mode(struct bnxt * bp)11139 static int bnxt_init_int_mode(struct bnxt *bp)
11140 {
11141 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11142
11143 total_vecs = bnxt_get_num_msix(bp);
11144 max = bnxt_get_max_func_irqs(bp);
11145 if (total_vecs > max)
11146 total_vecs = max;
11147
11148 if (!total_vecs)
11149 return 0;
11150
11151 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11152 min = 2;
11153
11154 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11155 PCI_IRQ_MSIX);
11156 ulp_msix = bnxt_get_ulp_msix_num(bp);
11157 if (total_vecs < 0 || total_vecs < ulp_msix) {
11158 rc = -ENODEV;
11159 goto msix_setup_exit;
11160 }
11161
11162 tbl_size = total_vecs;
11163 if (pci_msix_can_alloc_dyn(bp->pdev))
11164 tbl_size = max;
11165 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11166 if (bp->irq_tbl) {
11167 for (i = 0; i < total_vecs; i++)
11168 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11169
11170 bp->total_irqs = total_vecs;
11171 /* Trim rings based upon num of vectors allocated */
11172 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11173 total_vecs - ulp_msix, min == 1);
11174 if (rc)
11175 goto msix_setup_exit;
11176
11177 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11178 bp->cp_nr_rings = (min == 1) ?
11179 max_t(int, tx_cp, bp->rx_nr_rings) :
11180 tx_cp + bp->rx_nr_rings;
11181
11182 } else {
11183 rc = -ENOMEM;
11184 goto msix_setup_exit;
11185 }
11186 return 0;
11187
11188 msix_setup_exit:
11189 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11190 kfree(bp->irq_tbl);
11191 bp->irq_tbl = NULL;
11192 pci_free_irq_vectors(bp->pdev);
11193 return rc;
11194 }
11195
bnxt_clear_int_mode(struct bnxt * bp)11196 static void bnxt_clear_int_mode(struct bnxt *bp)
11197 {
11198 pci_free_irq_vectors(bp->pdev);
11199
11200 kfree(bp->irq_tbl);
11201 bp->irq_tbl = NULL;
11202 }
11203
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11204 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11205 {
11206 bool irq_cleared = false;
11207 bool irq_change = false;
11208 int tcs = bp->num_tc;
11209 int irqs_required;
11210 int rc;
11211
11212 if (!bnxt_need_reserve_rings(bp))
11213 return 0;
11214
11215 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11216 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11217
11218 if (ulp_msix > bp->ulp_num_msix_want)
11219 ulp_msix = bp->ulp_num_msix_want;
11220 irqs_required = ulp_msix + bp->cp_nr_rings;
11221 } else {
11222 irqs_required = bnxt_get_num_msix(bp);
11223 }
11224
11225 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11226 irq_change = true;
11227 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11228 bnxt_ulp_irq_stop(bp);
11229 bnxt_clear_int_mode(bp);
11230 irq_cleared = true;
11231 }
11232 }
11233 rc = __bnxt_reserve_rings(bp);
11234 if (irq_cleared) {
11235 if (!rc)
11236 rc = bnxt_init_int_mode(bp);
11237 bnxt_ulp_irq_restart(bp, rc);
11238 } else if (irq_change && !rc) {
11239 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11240 rc = -ENOSPC;
11241 }
11242 if (rc) {
11243 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11244 return rc;
11245 }
11246 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11247 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11248 netdev_err(bp->dev, "tx ring reservation failure\n");
11249 netdev_reset_tc(bp->dev);
11250 bp->num_tc = 0;
11251 if (bp->tx_nr_rings_xdp)
11252 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11253 else
11254 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11255 return -ENOMEM;
11256 }
11257 return 0;
11258 }
11259
bnxt_free_irq(struct bnxt * bp)11260 static void bnxt_free_irq(struct bnxt *bp)
11261 {
11262 struct bnxt_irq *irq;
11263 int i;
11264
11265 #ifdef CONFIG_RFS_ACCEL
11266 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11267 bp->dev->rx_cpu_rmap = NULL;
11268 #endif
11269 if (!bp->irq_tbl || !bp->bnapi)
11270 return;
11271
11272 for (i = 0; i < bp->cp_nr_rings; i++) {
11273 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11274
11275 irq = &bp->irq_tbl[map_idx];
11276 if (irq->requested) {
11277 if (irq->have_cpumask) {
11278 irq_update_affinity_hint(irq->vector, NULL);
11279 free_cpumask_var(irq->cpu_mask);
11280 irq->have_cpumask = 0;
11281 }
11282 free_irq(irq->vector, bp->bnapi[i]);
11283 }
11284
11285 irq->requested = 0;
11286 }
11287 }
11288
bnxt_request_irq(struct bnxt * bp)11289 static int bnxt_request_irq(struct bnxt *bp)
11290 {
11291 int i, j, rc = 0;
11292 unsigned long flags = 0;
11293 #ifdef CONFIG_RFS_ACCEL
11294 struct cpu_rmap *rmap;
11295 #endif
11296
11297 rc = bnxt_setup_int_mode(bp);
11298 if (rc) {
11299 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11300 rc);
11301 return rc;
11302 }
11303 #ifdef CONFIG_RFS_ACCEL
11304 rmap = bp->dev->rx_cpu_rmap;
11305 #endif
11306 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11307 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11308 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11309
11310 #ifdef CONFIG_RFS_ACCEL
11311 if (rmap && bp->bnapi[i]->rx_ring) {
11312 rc = irq_cpu_rmap_add(rmap, irq->vector);
11313 if (rc)
11314 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11315 j);
11316 j++;
11317 }
11318 #endif
11319 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11320 bp->bnapi[i]);
11321 if (rc)
11322 break;
11323
11324 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
11325 irq->requested = 1;
11326
11327 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11328 int numa_node = dev_to_node(&bp->pdev->dev);
11329
11330 irq->have_cpumask = 1;
11331 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11332 irq->cpu_mask);
11333 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11334 if (rc) {
11335 netdev_warn(bp->dev,
11336 "Update affinity hint failed, IRQ = %d\n",
11337 irq->vector);
11338 break;
11339 }
11340 }
11341 }
11342 return rc;
11343 }
11344
bnxt_del_napi(struct bnxt * bp)11345 static void bnxt_del_napi(struct bnxt *bp)
11346 {
11347 int i;
11348
11349 if (!bp->bnapi)
11350 return;
11351
11352 for (i = 0; i < bp->rx_nr_rings; i++)
11353 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11354 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11355 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11356
11357 for (i = 0; i < bp->cp_nr_rings; i++) {
11358 struct bnxt_napi *bnapi = bp->bnapi[i];
11359
11360 __netif_napi_del(&bnapi->napi);
11361 }
11362 /* We called __netif_napi_del(), we need
11363 * to respect an RCU grace period before freeing napi structures.
11364 */
11365 synchronize_net();
11366 }
11367
bnxt_init_napi(struct bnxt * bp)11368 static void bnxt_init_napi(struct bnxt *bp)
11369 {
11370 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11371 unsigned int cp_nr_rings = bp->cp_nr_rings;
11372 struct bnxt_napi *bnapi;
11373 int i;
11374
11375 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11376 poll_fn = bnxt_poll_p5;
11377 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11378 cp_nr_rings--;
11379 for (i = 0; i < cp_nr_rings; i++) {
11380 bnapi = bp->bnapi[i];
11381 netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
11382 bnapi->index);
11383 }
11384 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11385 bnapi = bp->bnapi[cp_nr_rings];
11386 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11387 }
11388 }
11389
bnxt_disable_napi(struct bnxt * bp)11390 static void bnxt_disable_napi(struct bnxt *bp)
11391 {
11392 int i;
11393
11394 if (!bp->bnapi ||
11395 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11396 return;
11397
11398 for (i = 0; i < bp->cp_nr_rings; i++) {
11399 struct bnxt_napi *bnapi = bp->bnapi[i];
11400 struct bnxt_cp_ring_info *cpr;
11401
11402 cpr = &bnapi->cp_ring;
11403 if (bnapi->tx_fault)
11404 cpr->sw_stats->tx.tx_resets++;
11405 if (bnapi->in_reset)
11406 cpr->sw_stats->rx.rx_resets++;
11407 napi_disable(&bnapi->napi);
11408 }
11409 }
11410
bnxt_enable_napi(struct bnxt * bp)11411 static void bnxt_enable_napi(struct bnxt *bp)
11412 {
11413 int i;
11414
11415 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11416 for (i = 0; i < bp->cp_nr_rings; i++) {
11417 struct bnxt_napi *bnapi = bp->bnapi[i];
11418 struct bnxt_cp_ring_info *cpr;
11419
11420 bnapi->tx_fault = 0;
11421
11422 cpr = &bnapi->cp_ring;
11423 bnapi->in_reset = false;
11424
11425 if (bnapi->rx_ring) {
11426 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11427 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11428 }
11429 napi_enable(&bnapi->napi);
11430 }
11431 }
11432
bnxt_tx_disable(struct bnxt * bp)11433 void bnxt_tx_disable(struct bnxt *bp)
11434 {
11435 int i;
11436 struct bnxt_tx_ring_info *txr;
11437
11438 if (bp->tx_ring) {
11439 for (i = 0; i < bp->tx_nr_rings; i++) {
11440 txr = &bp->tx_ring[i];
11441 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11442 }
11443 }
11444 /* Make sure napi polls see @dev_state change */
11445 synchronize_net();
11446 /* Drop carrier first to prevent TX timeout */
11447 netif_carrier_off(bp->dev);
11448 /* Stop all TX queues */
11449 netif_tx_disable(bp->dev);
11450 }
11451
bnxt_tx_enable(struct bnxt * bp)11452 void bnxt_tx_enable(struct bnxt *bp)
11453 {
11454 int i;
11455 struct bnxt_tx_ring_info *txr;
11456
11457 for (i = 0; i < bp->tx_nr_rings; i++) {
11458 txr = &bp->tx_ring[i];
11459 WRITE_ONCE(txr->dev_state, 0);
11460 }
11461 /* Make sure napi polls see @dev_state change */
11462 synchronize_net();
11463 netif_tx_wake_all_queues(bp->dev);
11464 if (BNXT_LINK_IS_UP(bp))
11465 netif_carrier_on(bp->dev);
11466 }
11467
bnxt_report_fec(struct bnxt_link_info * link_info)11468 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11469 {
11470 u8 active_fec = link_info->active_fec_sig_mode &
11471 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11472
11473 switch (active_fec) {
11474 default:
11475 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11476 return "None";
11477 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11478 return "Clause 74 BaseR";
11479 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11480 return "Clause 91 RS(528,514)";
11481 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11482 return "Clause 91 RS544_1XN";
11483 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11484 return "Clause 91 RS(544,514)";
11485 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11486 return "Clause 91 RS272_1XN";
11487 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11488 return "Clause 91 RS(272,257)";
11489 }
11490 }
11491
bnxt_report_link(struct bnxt * bp)11492 void bnxt_report_link(struct bnxt *bp)
11493 {
11494 if (BNXT_LINK_IS_UP(bp)) {
11495 const char *signal = "";
11496 const char *flow_ctrl;
11497 const char *duplex;
11498 u32 speed;
11499 u16 fec;
11500
11501 netif_carrier_on(bp->dev);
11502 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11503 if (speed == SPEED_UNKNOWN) {
11504 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11505 return;
11506 }
11507 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11508 duplex = "full";
11509 else
11510 duplex = "half";
11511 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11512 flow_ctrl = "ON - receive & transmit";
11513 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11514 flow_ctrl = "ON - transmit";
11515 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11516 flow_ctrl = "ON - receive";
11517 else
11518 flow_ctrl = "none";
11519 if (bp->link_info.phy_qcfg_resp.option_flags &
11520 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11521 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11522 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11523 switch (sig_mode) {
11524 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11525 signal = "(NRZ) ";
11526 break;
11527 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11528 signal = "(PAM4 56Gbps) ";
11529 break;
11530 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11531 signal = "(PAM4 112Gbps) ";
11532 break;
11533 default:
11534 break;
11535 }
11536 }
11537 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11538 speed, signal, duplex, flow_ctrl);
11539 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11540 netdev_info(bp->dev, "EEE is %s\n",
11541 bp->eee.eee_active ? "active" :
11542 "not active");
11543 fec = bp->link_info.fec_cfg;
11544 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11545 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11546 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11547 bnxt_report_fec(&bp->link_info));
11548 } else {
11549 netif_carrier_off(bp->dev);
11550 netdev_err(bp->dev, "NIC Link is Down\n");
11551 }
11552 }
11553
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11554 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11555 {
11556 if (!resp->supported_speeds_auto_mode &&
11557 !resp->supported_speeds_force_mode &&
11558 !resp->supported_pam4_speeds_auto_mode &&
11559 !resp->supported_pam4_speeds_force_mode &&
11560 !resp->supported_speeds2_auto_mode &&
11561 !resp->supported_speeds2_force_mode)
11562 return true;
11563 return false;
11564 }
11565
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11566 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11567 {
11568 struct bnxt_link_info *link_info = &bp->link_info;
11569 struct hwrm_port_phy_qcaps_output *resp;
11570 struct hwrm_port_phy_qcaps_input *req;
11571 int rc = 0;
11572
11573 if (bp->hwrm_spec_code < 0x10201)
11574 return 0;
11575
11576 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11577 if (rc)
11578 return rc;
11579
11580 resp = hwrm_req_hold(bp, req);
11581 rc = hwrm_req_send(bp, req);
11582 if (rc)
11583 goto hwrm_phy_qcaps_exit;
11584
11585 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11586 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11587 struct ethtool_keee *eee = &bp->eee;
11588 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11589
11590 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11591 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11592 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11593 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11594 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11595 }
11596
11597 if (bp->hwrm_spec_code >= 0x10a01) {
11598 if (bnxt_phy_qcaps_no_speed(resp)) {
11599 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11600 netdev_warn(bp->dev, "Ethernet link disabled\n");
11601 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11602 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11603 netdev_info(bp->dev, "Ethernet link enabled\n");
11604 /* Phy re-enabled, reprobe the speeds */
11605 link_info->support_auto_speeds = 0;
11606 link_info->support_pam4_auto_speeds = 0;
11607 link_info->support_auto_speeds2 = 0;
11608 }
11609 }
11610 if (resp->supported_speeds_auto_mode)
11611 link_info->support_auto_speeds =
11612 le16_to_cpu(resp->supported_speeds_auto_mode);
11613 if (resp->supported_pam4_speeds_auto_mode)
11614 link_info->support_pam4_auto_speeds =
11615 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11616 if (resp->supported_speeds2_auto_mode)
11617 link_info->support_auto_speeds2 =
11618 le16_to_cpu(resp->supported_speeds2_auto_mode);
11619
11620 bp->port_count = resp->port_cnt;
11621
11622 hwrm_phy_qcaps_exit:
11623 hwrm_req_drop(bp, req);
11624 return rc;
11625 }
11626
bnxt_hwrm_mac_qcaps(struct bnxt * bp)11627 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
11628 {
11629 struct hwrm_port_mac_qcaps_output *resp;
11630 struct hwrm_port_mac_qcaps_input *req;
11631 int rc;
11632
11633 if (bp->hwrm_spec_code < 0x10a03)
11634 return;
11635
11636 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
11637 if (rc)
11638 return;
11639
11640 resp = hwrm_req_hold(bp, req);
11641 rc = hwrm_req_send_silent(bp, req);
11642 if (!rc)
11643 bp->mac_flags = resp->flags;
11644 hwrm_req_drop(bp, req);
11645 }
11646
bnxt_support_dropped(u16 advertising,u16 supported)11647 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11648 {
11649 u16 diff = advertising ^ supported;
11650
11651 return ((supported | diff) != supported);
11652 }
11653
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)11654 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
11655 {
11656 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
11657
11658 /* Check if any advertised speeds are no longer supported. The caller
11659 * holds the link_lock mutex, so we can modify link_info settings.
11660 */
11661 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11662 if (bnxt_support_dropped(link_info->advertising,
11663 link_info->support_auto_speeds2)) {
11664 link_info->advertising = link_info->support_auto_speeds2;
11665 return true;
11666 }
11667 return false;
11668 }
11669 if (bnxt_support_dropped(link_info->advertising,
11670 link_info->support_auto_speeds)) {
11671 link_info->advertising = link_info->support_auto_speeds;
11672 return true;
11673 }
11674 if (bnxt_support_dropped(link_info->advertising_pam4,
11675 link_info->support_pam4_auto_speeds)) {
11676 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
11677 return true;
11678 }
11679 return false;
11680 }
11681
bnxt_update_link(struct bnxt * bp,bool chng_link_state)11682 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
11683 {
11684 struct bnxt_link_info *link_info = &bp->link_info;
11685 struct hwrm_port_phy_qcfg_output *resp;
11686 struct hwrm_port_phy_qcfg_input *req;
11687 u8 link_state = link_info->link_state;
11688 bool support_changed;
11689 int rc;
11690
11691 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
11692 if (rc)
11693 return rc;
11694
11695 resp = hwrm_req_hold(bp, req);
11696 rc = hwrm_req_send(bp, req);
11697 if (rc) {
11698 hwrm_req_drop(bp, req);
11699 if (BNXT_VF(bp) && rc == -ENODEV) {
11700 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
11701 rc = 0;
11702 }
11703 return rc;
11704 }
11705
11706 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
11707 link_info->phy_link_status = resp->link;
11708 link_info->duplex = resp->duplex_cfg;
11709 if (bp->hwrm_spec_code >= 0x10800)
11710 link_info->duplex = resp->duplex_state;
11711 link_info->pause = resp->pause;
11712 link_info->auto_mode = resp->auto_mode;
11713 link_info->auto_pause_setting = resp->auto_pause;
11714 link_info->lp_pause = resp->link_partner_adv_pause;
11715 link_info->force_pause_setting = resp->force_pause;
11716 link_info->duplex_setting = resp->duplex_cfg;
11717 if (link_info->phy_link_status == BNXT_LINK_LINK) {
11718 link_info->link_speed = le16_to_cpu(resp->link_speed);
11719 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
11720 link_info->active_lanes = resp->active_lanes;
11721 } else {
11722 link_info->link_speed = 0;
11723 link_info->active_lanes = 0;
11724 }
11725 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
11726 link_info->force_pam4_link_speed =
11727 le16_to_cpu(resp->force_pam4_link_speed);
11728 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
11729 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
11730 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
11731 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
11732 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
11733 link_info->auto_pam4_link_speeds =
11734 le16_to_cpu(resp->auto_pam4_link_speed_mask);
11735 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
11736 link_info->lp_auto_link_speeds =
11737 le16_to_cpu(resp->link_partner_adv_speeds);
11738 link_info->lp_auto_pam4_link_speeds =
11739 resp->link_partner_pam4_adv_speeds;
11740 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
11741 link_info->phy_ver[0] = resp->phy_maj;
11742 link_info->phy_ver[1] = resp->phy_min;
11743 link_info->phy_ver[2] = resp->phy_bld;
11744 link_info->media_type = resp->media_type;
11745 link_info->phy_type = resp->phy_type;
11746 link_info->transceiver = resp->xcvr_pkg_type;
11747 link_info->phy_addr = resp->eee_config_phy_addr &
11748 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
11749 link_info->module_status = resp->module_status;
11750
11751 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
11752 struct ethtool_keee *eee = &bp->eee;
11753 u16 fw_speeds;
11754
11755 eee->eee_active = 0;
11756 if (resp->eee_config_phy_addr &
11757 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
11758 eee->eee_active = 1;
11759 fw_speeds = le16_to_cpu(
11760 resp->link_partner_adv_eee_link_speed_mask);
11761 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
11762 }
11763
11764 /* Pull initial EEE config */
11765 if (!chng_link_state) {
11766 if (resp->eee_config_phy_addr &
11767 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
11768 eee->eee_enabled = 1;
11769
11770 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
11771 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
11772
11773 if (resp->eee_config_phy_addr &
11774 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
11775 __le32 tmr;
11776
11777 eee->tx_lpi_enabled = 1;
11778 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
11779 eee->tx_lpi_timer = le32_to_cpu(tmr) &
11780 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
11781 }
11782 }
11783 }
11784
11785 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
11786 if (bp->hwrm_spec_code >= 0x10504) {
11787 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
11788 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11789 }
11790 /* TODO: need to add more logic to report VF link */
11791 if (chng_link_state) {
11792 if (link_info->phy_link_status == BNXT_LINK_LINK)
11793 link_info->link_state = BNXT_LINK_STATE_UP;
11794 else
11795 link_info->link_state = BNXT_LINK_STATE_DOWN;
11796 if (link_state != link_info->link_state)
11797 bnxt_report_link(bp);
11798 } else {
11799 /* always link down if not require to update link state */
11800 link_info->link_state = BNXT_LINK_STATE_DOWN;
11801 }
11802 hwrm_req_drop(bp, req);
11803
11804 if (!BNXT_PHY_CFG_ABLE(bp))
11805 return 0;
11806
11807 support_changed = bnxt_support_speed_dropped(link_info);
11808 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11809 bnxt_hwrm_set_link_setting(bp, true, false);
11810 return 0;
11811 }
11812
bnxt_get_port_module_status(struct bnxt * bp)11813 static void bnxt_get_port_module_status(struct bnxt *bp)
11814 {
11815 struct bnxt_link_info *link_info = &bp->link_info;
11816 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11817 u8 module_status;
11818
11819 if (bnxt_update_link(bp, true))
11820 return;
11821
11822 module_status = link_info->module_status;
11823 switch (module_status) {
11824 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11825 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11826 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11827 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11828 bp->pf.port_id);
11829 if (bp->hwrm_spec_code >= 0x10201) {
11830 netdev_warn(bp->dev, "Module part number %s\n",
11831 resp->phy_vendor_partnumber);
11832 }
11833 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11834 netdev_warn(bp->dev, "TX is disabled\n");
11835 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11836 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11837 }
11838 }
11839
11840 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11841 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11842 {
11843 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11844 if (bp->hwrm_spec_code >= 0x10201)
11845 req->auto_pause =
11846 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11847 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11848 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11849 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11850 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11851 req->enables |=
11852 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11853 } else {
11854 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11855 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11856 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11857 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11858 req->enables |=
11859 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11860 if (bp->hwrm_spec_code >= 0x10201) {
11861 req->auto_pause = req->force_pause;
11862 req->enables |= cpu_to_le32(
11863 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11864 }
11865 }
11866 }
11867
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11868 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11869 {
11870 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11871 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11872 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11873 req->enables |=
11874 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11875 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11876 } else if (bp->link_info.advertising) {
11877 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11878 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11879 }
11880 if (bp->link_info.advertising_pam4) {
11881 req->enables |=
11882 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11883 req->auto_link_pam4_speed_mask =
11884 cpu_to_le16(bp->link_info.advertising_pam4);
11885 }
11886 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11887 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11888 } else {
11889 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11890 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11891 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11892 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11893 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11894 (u32)bp->link_info.req_link_speed);
11895 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11896 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11897 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11898 } else {
11899 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11900 }
11901 }
11902
11903 /* tell chimp that the setting takes effect immediately */
11904 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11905 }
11906
bnxt_hwrm_set_pause(struct bnxt * bp)11907 int bnxt_hwrm_set_pause(struct bnxt *bp)
11908 {
11909 struct hwrm_port_phy_cfg_input *req;
11910 int rc;
11911
11912 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11913 if (rc)
11914 return rc;
11915
11916 bnxt_hwrm_set_pause_common(bp, req);
11917
11918 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11919 bp->link_info.force_link_chng)
11920 bnxt_hwrm_set_link_common(bp, req);
11921
11922 rc = hwrm_req_send(bp, req);
11923 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11924 /* since changing of pause setting doesn't trigger any link
11925 * change event, the driver needs to update the current pause
11926 * result upon successfully return of the phy_cfg command
11927 */
11928 bp->link_info.pause =
11929 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11930 bp->link_info.auto_pause_setting = 0;
11931 if (!bp->link_info.force_link_chng)
11932 bnxt_report_link(bp);
11933 }
11934 bp->link_info.force_link_chng = false;
11935 return rc;
11936 }
11937
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11938 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11939 struct hwrm_port_phy_cfg_input *req)
11940 {
11941 struct ethtool_keee *eee = &bp->eee;
11942
11943 if (eee->eee_enabled) {
11944 u16 eee_speeds;
11945 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11946
11947 if (eee->tx_lpi_enabled)
11948 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11949 else
11950 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11951
11952 req->flags |= cpu_to_le32(flags);
11953 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11954 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11955 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11956 } else {
11957 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11958 }
11959 }
11960
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)11961 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11962 {
11963 struct hwrm_port_phy_cfg_input *req;
11964 int rc;
11965
11966 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11967 if (rc)
11968 return rc;
11969
11970 if (set_pause)
11971 bnxt_hwrm_set_pause_common(bp, req);
11972
11973 bnxt_hwrm_set_link_common(bp, req);
11974
11975 if (set_eee)
11976 bnxt_hwrm_set_eee(bp, req);
11977 return hwrm_req_send(bp, req);
11978 }
11979
bnxt_hwrm_shutdown_link(struct bnxt * bp)11980 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11981 {
11982 struct hwrm_port_phy_cfg_input *req;
11983 int rc;
11984
11985 if (!BNXT_SINGLE_PF(bp))
11986 return 0;
11987
11988 if (pci_num_vf(bp->pdev) &&
11989 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11990 return 0;
11991
11992 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11993 if (rc)
11994 return rc;
11995
11996 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11997 rc = hwrm_req_send(bp, req);
11998 if (!rc) {
11999 mutex_lock(&bp->link_lock);
12000 /* Device is not obliged link down in certain scenarios, even
12001 * when forced. Setting the state unknown is consistent with
12002 * driver startup and will force link state to be reported
12003 * during subsequent open based on PORT_PHY_QCFG.
12004 */
12005 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12006 mutex_unlock(&bp->link_lock);
12007 }
12008 return rc;
12009 }
12010
bnxt_fw_reset_via_optee(struct bnxt * bp)12011 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12012 {
12013 #ifdef CONFIG_TEE_BNXT_FW
12014 int rc = tee_bnxt_fw_load();
12015
12016 if (rc)
12017 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12018
12019 return rc;
12020 #else
12021 netdev_err(bp->dev, "OP-TEE not supported\n");
12022 return -ENODEV;
12023 #endif
12024 }
12025
bnxt_try_recover_fw(struct bnxt * bp)12026 static int bnxt_try_recover_fw(struct bnxt *bp)
12027 {
12028 if (bp->fw_health && bp->fw_health->status_reliable) {
12029 int retry = 0, rc;
12030 u32 sts;
12031
12032 do {
12033 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12034 rc = bnxt_hwrm_poll(bp);
12035 if (!BNXT_FW_IS_BOOTING(sts) &&
12036 !BNXT_FW_IS_RECOVERING(sts))
12037 break;
12038 retry++;
12039 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12040
12041 if (!BNXT_FW_IS_HEALTHY(sts)) {
12042 netdev_err(bp->dev,
12043 "Firmware not responding, status: 0x%x\n",
12044 sts);
12045 rc = -ENODEV;
12046 }
12047 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12048 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12049 return bnxt_fw_reset_via_optee(bp);
12050 }
12051 return rc;
12052 }
12053
12054 return -ENODEV;
12055 }
12056
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12057 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12058 {
12059 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12060
12061 if (!BNXT_NEW_RM(bp))
12062 return; /* no resource reservations required */
12063
12064 hw_resc->resv_cp_rings = 0;
12065 hw_resc->resv_stat_ctxs = 0;
12066 hw_resc->resv_irqs = 0;
12067 hw_resc->resv_tx_rings = 0;
12068 hw_resc->resv_rx_rings = 0;
12069 hw_resc->resv_hw_ring_grps = 0;
12070 hw_resc->resv_vnics = 0;
12071 hw_resc->resv_rsscos_ctxs = 0;
12072 if (!fw_reset) {
12073 bp->tx_nr_rings = 0;
12074 bp->rx_nr_rings = 0;
12075 }
12076 }
12077
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12078 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12079 {
12080 int rc;
12081
12082 if (!BNXT_NEW_RM(bp))
12083 return 0; /* no resource reservations required */
12084
12085 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12086 if (rc)
12087 netdev_err(bp->dev, "resc_qcaps failed\n");
12088
12089 bnxt_clear_reservations(bp, fw_reset);
12090
12091 return rc;
12092 }
12093
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12094 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12095 {
12096 struct hwrm_func_drv_if_change_output *resp;
12097 struct hwrm_func_drv_if_change_input *req;
12098 bool fw_reset = !bp->irq_tbl;
12099 bool resc_reinit = false;
12100 int rc, retry = 0;
12101 u32 flags = 0;
12102
12103 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12104 return 0;
12105
12106 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12107 if (rc)
12108 return rc;
12109
12110 if (up)
12111 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12112 resp = hwrm_req_hold(bp, req);
12113
12114 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12115 while (retry < BNXT_FW_IF_RETRY) {
12116 rc = hwrm_req_send(bp, req);
12117 if (rc != -EAGAIN)
12118 break;
12119
12120 msleep(50);
12121 retry++;
12122 }
12123
12124 if (rc == -EAGAIN) {
12125 hwrm_req_drop(bp, req);
12126 return rc;
12127 } else if (!rc) {
12128 flags = le32_to_cpu(resp->flags);
12129 } else if (up) {
12130 rc = bnxt_try_recover_fw(bp);
12131 fw_reset = true;
12132 }
12133 hwrm_req_drop(bp, req);
12134 if (rc)
12135 return rc;
12136
12137 if (!up) {
12138 bnxt_inv_fw_health_reg(bp);
12139 return 0;
12140 }
12141
12142 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12143 resc_reinit = true;
12144 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12145 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12146 fw_reset = true;
12147 else
12148 bnxt_remap_fw_health_regs(bp);
12149
12150 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12151 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12152 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12153 return -ENODEV;
12154 }
12155 if (resc_reinit || fw_reset) {
12156 if (fw_reset) {
12157 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12158 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12159 bnxt_ulp_irq_stop(bp);
12160 bnxt_free_ctx_mem(bp, false);
12161 bnxt_dcb_free(bp);
12162 rc = bnxt_fw_init_one(bp);
12163 if (rc) {
12164 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12165 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12166 return rc;
12167 }
12168 bnxt_clear_int_mode(bp);
12169 rc = bnxt_init_int_mode(bp);
12170 if (rc) {
12171 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12172 netdev_err(bp->dev, "init int mode failed\n");
12173 return rc;
12174 }
12175 }
12176 rc = bnxt_cancel_reservations(bp, fw_reset);
12177 }
12178 return rc;
12179 }
12180
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12181 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12182 {
12183 struct hwrm_port_led_qcaps_output *resp;
12184 struct hwrm_port_led_qcaps_input *req;
12185 struct bnxt_pf_info *pf = &bp->pf;
12186 int rc;
12187
12188 bp->num_leds = 0;
12189 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12190 return 0;
12191
12192 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12193 if (rc)
12194 return rc;
12195
12196 req->port_id = cpu_to_le16(pf->port_id);
12197 resp = hwrm_req_hold(bp, req);
12198 rc = hwrm_req_send(bp, req);
12199 if (rc) {
12200 hwrm_req_drop(bp, req);
12201 return rc;
12202 }
12203 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12204 int i;
12205
12206 bp->num_leds = resp->num_leds;
12207 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12208 bp->num_leds);
12209 for (i = 0; i < bp->num_leds; i++) {
12210 struct bnxt_led_info *led = &bp->leds[i];
12211 __le16 caps = led->led_state_caps;
12212
12213 if (!led->led_group_id ||
12214 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12215 bp->num_leds = 0;
12216 break;
12217 }
12218 }
12219 }
12220 hwrm_req_drop(bp, req);
12221 return 0;
12222 }
12223
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12224 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12225 {
12226 struct hwrm_wol_filter_alloc_output *resp;
12227 struct hwrm_wol_filter_alloc_input *req;
12228 int rc;
12229
12230 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12231 if (rc)
12232 return rc;
12233
12234 req->port_id = cpu_to_le16(bp->pf.port_id);
12235 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12236 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12237 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12238
12239 resp = hwrm_req_hold(bp, req);
12240 rc = hwrm_req_send(bp, req);
12241 if (!rc)
12242 bp->wol_filter_id = resp->wol_filter_id;
12243 hwrm_req_drop(bp, req);
12244 return rc;
12245 }
12246
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12247 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12248 {
12249 struct hwrm_wol_filter_free_input *req;
12250 int rc;
12251
12252 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12253 if (rc)
12254 return rc;
12255
12256 req->port_id = cpu_to_le16(bp->pf.port_id);
12257 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12258 req->wol_filter_id = bp->wol_filter_id;
12259
12260 return hwrm_req_send(bp, req);
12261 }
12262
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12263 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12264 {
12265 struct hwrm_wol_filter_qcfg_output *resp;
12266 struct hwrm_wol_filter_qcfg_input *req;
12267 u16 next_handle = 0;
12268 int rc;
12269
12270 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12271 if (rc)
12272 return rc;
12273
12274 req->port_id = cpu_to_le16(bp->pf.port_id);
12275 req->handle = cpu_to_le16(handle);
12276 resp = hwrm_req_hold(bp, req);
12277 rc = hwrm_req_send(bp, req);
12278 if (!rc) {
12279 next_handle = le16_to_cpu(resp->next_handle);
12280 if (next_handle != 0) {
12281 if (resp->wol_type ==
12282 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12283 bp->wol = 1;
12284 bp->wol_filter_id = resp->wol_filter_id;
12285 }
12286 }
12287 }
12288 hwrm_req_drop(bp, req);
12289 return next_handle;
12290 }
12291
bnxt_get_wol_settings(struct bnxt * bp)12292 static void bnxt_get_wol_settings(struct bnxt *bp)
12293 {
12294 u16 handle = 0;
12295
12296 bp->wol = 0;
12297 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12298 return;
12299
12300 do {
12301 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12302 } while (handle && handle != 0xffff);
12303 }
12304
bnxt_eee_config_ok(struct bnxt * bp)12305 static bool bnxt_eee_config_ok(struct bnxt *bp)
12306 {
12307 struct ethtool_keee *eee = &bp->eee;
12308 struct bnxt_link_info *link_info = &bp->link_info;
12309
12310 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12311 return true;
12312
12313 if (eee->eee_enabled) {
12314 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12315 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12316
12317 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12318
12319 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12320 eee->eee_enabled = 0;
12321 return false;
12322 }
12323 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12324 linkmode_and(eee->advertised, advertising,
12325 eee->supported);
12326 return false;
12327 }
12328 }
12329 return true;
12330 }
12331
bnxt_update_phy_setting(struct bnxt * bp)12332 static int bnxt_update_phy_setting(struct bnxt *bp)
12333 {
12334 int rc;
12335 bool update_link = false;
12336 bool update_pause = false;
12337 bool update_eee = false;
12338 struct bnxt_link_info *link_info = &bp->link_info;
12339
12340 rc = bnxt_update_link(bp, true);
12341 if (rc) {
12342 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12343 rc);
12344 return rc;
12345 }
12346 if (!BNXT_SINGLE_PF(bp))
12347 return 0;
12348
12349 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12350 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12351 link_info->req_flow_ctrl)
12352 update_pause = true;
12353 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12354 link_info->force_pause_setting != link_info->req_flow_ctrl)
12355 update_pause = true;
12356 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12357 if (BNXT_AUTO_MODE(link_info->auto_mode))
12358 update_link = true;
12359 if (bnxt_force_speed_updated(link_info))
12360 update_link = true;
12361 if (link_info->req_duplex != link_info->duplex_setting)
12362 update_link = true;
12363 } else {
12364 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12365 update_link = true;
12366 if (bnxt_auto_speed_updated(link_info))
12367 update_link = true;
12368 }
12369
12370 /* The last close may have shutdown the link, so need to call
12371 * PHY_CFG to bring it back up.
12372 */
12373 if (!BNXT_LINK_IS_UP(bp))
12374 update_link = true;
12375
12376 if (!bnxt_eee_config_ok(bp))
12377 update_eee = true;
12378
12379 if (update_link)
12380 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12381 else if (update_pause)
12382 rc = bnxt_hwrm_set_pause(bp);
12383 if (rc) {
12384 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12385 rc);
12386 return rc;
12387 }
12388
12389 return rc;
12390 }
12391
12392 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12393
bnxt_reinit_after_abort(struct bnxt * bp)12394 static int bnxt_reinit_after_abort(struct bnxt *bp)
12395 {
12396 int rc;
12397
12398 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12399 return -EBUSY;
12400
12401 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12402 return -ENODEV;
12403
12404 rc = bnxt_fw_init_one(bp);
12405 if (!rc) {
12406 bnxt_clear_int_mode(bp);
12407 rc = bnxt_init_int_mode(bp);
12408 if (!rc) {
12409 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12410 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12411 }
12412 }
12413 return rc;
12414 }
12415
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12416 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12417 {
12418 struct bnxt_ntuple_filter *ntp_fltr;
12419 struct bnxt_l2_filter *l2_fltr;
12420
12421 if (list_empty(&fltr->list))
12422 return;
12423
12424 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12425 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12426 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12427 atomic_inc(&l2_fltr->refcnt);
12428 ntp_fltr->l2_fltr = l2_fltr;
12429 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12430 bnxt_del_ntp_filter(bp, ntp_fltr);
12431 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12432 fltr->sw_id);
12433 }
12434 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12435 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12436 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12437 bnxt_del_l2_filter(bp, l2_fltr);
12438 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12439 fltr->sw_id);
12440 }
12441 }
12442 }
12443
bnxt_cfg_usr_fltrs(struct bnxt * bp)12444 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12445 {
12446 struct bnxt_filter_base *usr_fltr, *tmp;
12447
12448 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12449 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12450 }
12451
bnxt_set_xps_mapping(struct bnxt * bp)12452 static int bnxt_set_xps_mapping(struct bnxt *bp)
12453 {
12454 int numa_node = dev_to_node(&bp->pdev->dev);
12455 unsigned int q_idx, map_idx, cpu, i;
12456 const struct cpumask *cpu_mask_ptr;
12457 int nr_cpus = num_online_cpus();
12458 cpumask_t *q_map;
12459 int rc = 0;
12460
12461 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12462 if (!q_map)
12463 return -ENOMEM;
12464
12465 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12466 * Each TC has the same number of TX queues. The nth TX queue for each
12467 * TC will have the same CPU mask.
12468 */
12469 for (i = 0; i < nr_cpus; i++) {
12470 map_idx = i % bp->tx_nr_rings_per_tc;
12471 cpu = cpumask_local_spread(i, numa_node);
12472 cpu_mask_ptr = get_cpu_mask(cpu);
12473 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12474 }
12475
12476 /* Register CPU mask for each TX queue except the ones marked for XDP */
12477 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12478 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12479 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12480 if (rc) {
12481 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12482 q_idx);
12483 break;
12484 }
12485 }
12486
12487 kfree(q_map);
12488
12489 return rc;
12490 }
12491
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12492 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12493 {
12494 int rc = 0;
12495
12496 netif_carrier_off(bp->dev);
12497 if (irq_re_init) {
12498 /* Reserve rings now if none were reserved at driver probe. */
12499 rc = bnxt_init_dflt_ring_mode(bp);
12500 if (rc) {
12501 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12502 return rc;
12503 }
12504 }
12505 rc = bnxt_reserve_rings(bp, irq_re_init);
12506 if (rc)
12507 return rc;
12508
12509 rc = bnxt_alloc_mem(bp, irq_re_init);
12510 if (rc) {
12511 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12512 goto open_err_free_mem;
12513 }
12514
12515 if (irq_re_init) {
12516 bnxt_init_napi(bp);
12517 rc = bnxt_request_irq(bp);
12518 if (rc) {
12519 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12520 goto open_err_irq;
12521 }
12522 }
12523
12524 rc = bnxt_init_nic(bp, irq_re_init);
12525 if (rc) {
12526 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12527 goto open_err_irq;
12528 }
12529
12530 bnxt_enable_napi(bp);
12531 bnxt_debug_dev_init(bp);
12532
12533 if (link_re_init) {
12534 mutex_lock(&bp->link_lock);
12535 rc = bnxt_update_phy_setting(bp);
12536 mutex_unlock(&bp->link_lock);
12537 if (rc) {
12538 netdev_warn(bp->dev, "failed to update phy settings\n");
12539 if (BNXT_SINGLE_PF(bp)) {
12540 bp->link_info.phy_retry = true;
12541 bp->link_info.phy_retry_expires =
12542 jiffies + 5 * HZ;
12543 }
12544 }
12545 }
12546
12547 if (irq_re_init) {
12548 udp_tunnel_nic_reset_ntf(bp->dev);
12549 rc = bnxt_set_xps_mapping(bp);
12550 if (rc)
12551 netdev_warn(bp->dev, "failed to set xps mapping\n");
12552 }
12553
12554 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12555 if (!static_key_enabled(&bnxt_xdp_locking_key))
12556 static_branch_enable(&bnxt_xdp_locking_key);
12557 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12558 static_branch_disable(&bnxt_xdp_locking_key);
12559 }
12560 set_bit(BNXT_STATE_OPEN, &bp->state);
12561 bnxt_enable_int(bp);
12562 /* Enable TX queues */
12563 bnxt_tx_enable(bp);
12564 mod_timer(&bp->timer, jiffies + bp->current_interval);
12565 /* Poll link status and check for SFP+ module status */
12566 mutex_lock(&bp->link_lock);
12567 bnxt_get_port_module_status(bp);
12568 mutex_unlock(&bp->link_lock);
12569
12570 /* VF-reps may need to be re-opened after the PF is re-opened */
12571 if (BNXT_PF(bp))
12572 bnxt_vf_reps_open(bp);
12573 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
12574 WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
12575 bnxt_ptp_init_rtc(bp, true);
12576 bnxt_ptp_cfg_tstamp_filters(bp);
12577 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12578 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12579 bnxt_cfg_usr_fltrs(bp);
12580 return 0;
12581
12582 open_err_irq:
12583 bnxt_del_napi(bp);
12584
12585 open_err_free_mem:
12586 bnxt_free_skbs(bp);
12587 bnxt_free_irq(bp);
12588 bnxt_free_mem(bp, true);
12589 return rc;
12590 }
12591
12592 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12593 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12594 {
12595 int rc = 0;
12596
12597 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12598 rc = -EIO;
12599 if (!rc)
12600 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12601 if (rc) {
12602 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12603 dev_close(bp->dev);
12604 }
12605 return rc;
12606 }
12607
12608 /* rtnl_lock held, open the NIC half way by allocating all resources, but
12609 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
12610 * self tests.
12611 */
bnxt_half_open_nic(struct bnxt * bp)12612 int bnxt_half_open_nic(struct bnxt *bp)
12613 {
12614 int rc = 0;
12615
12616 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12617 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12618 rc = -ENODEV;
12619 goto half_open_err;
12620 }
12621
12622 rc = bnxt_alloc_mem(bp, true);
12623 if (rc) {
12624 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12625 goto half_open_err;
12626 }
12627 bnxt_init_napi(bp);
12628 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12629 rc = bnxt_init_nic(bp, true);
12630 if (rc) {
12631 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12632 bnxt_del_napi(bp);
12633 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12634 goto half_open_err;
12635 }
12636 return 0;
12637
12638 half_open_err:
12639 bnxt_free_skbs(bp);
12640 bnxt_free_mem(bp, true);
12641 dev_close(bp->dev);
12642 return rc;
12643 }
12644
12645 /* rtnl_lock held, this call can only be made after a previous successful
12646 * call to bnxt_half_open_nic().
12647 */
bnxt_half_close_nic(struct bnxt * bp)12648 void bnxt_half_close_nic(struct bnxt *bp)
12649 {
12650 bnxt_hwrm_resource_free(bp, false, true);
12651 bnxt_del_napi(bp);
12652 bnxt_free_skbs(bp);
12653 bnxt_free_mem(bp, true);
12654 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12655 }
12656
bnxt_reenable_sriov(struct bnxt * bp)12657 void bnxt_reenable_sriov(struct bnxt *bp)
12658 {
12659 if (BNXT_PF(bp)) {
12660 struct bnxt_pf_info *pf = &bp->pf;
12661 int n = pf->active_vfs;
12662
12663 if (n)
12664 bnxt_cfg_hw_sriov(bp, &n, true);
12665 }
12666 }
12667
bnxt_open(struct net_device * dev)12668 static int bnxt_open(struct net_device *dev)
12669 {
12670 struct bnxt *bp = netdev_priv(dev);
12671 int rc;
12672
12673 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12674 rc = bnxt_reinit_after_abort(bp);
12675 if (rc) {
12676 if (rc == -EBUSY)
12677 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
12678 else
12679 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
12680 return -ENODEV;
12681 }
12682 }
12683
12684 rc = bnxt_hwrm_if_change(bp, true);
12685 if (rc)
12686 return rc;
12687
12688 rc = __bnxt_open_nic(bp, true, true);
12689 if (rc) {
12690 bnxt_hwrm_if_change(bp, false);
12691 } else {
12692 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12693 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12694 bnxt_queue_sp_work(bp,
12695 BNXT_RESTART_ULP_SP_EVENT);
12696 }
12697 }
12698
12699 return rc;
12700 }
12701
bnxt_drv_busy(struct bnxt * bp)12702 static bool bnxt_drv_busy(struct bnxt *bp)
12703 {
12704 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
12705 test_bit(BNXT_STATE_READ_STATS, &bp->state));
12706 }
12707
12708 static void bnxt_get_ring_stats(struct bnxt *bp,
12709 struct rtnl_link_stats64 *stats);
12710
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12711 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
12712 bool link_re_init)
12713 {
12714 /* Close the VF-reps before closing PF */
12715 if (BNXT_PF(bp))
12716 bnxt_vf_reps_close(bp);
12717
12718 /* Change device state to avoid TX queue wake up's */
12719 bnxt_tx_disable(bp);
12720
12721 clear_bit(BNXT_STATE_OPEN, &bp->state);
12722 smp_mb__after_atomic();
12723 while (bnxt_drv_busy(bp))
12724 msleep(20);
12725
12726 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12727 bnxt_clear_rss_ctxs(bp);
12728 /* Flush rings and disable interrupts */
12729 bnxt_shutdown_nic(bp, irq_re_init);
12730
12731 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
12732
12733 bnxt_debug_dev_exit(bp);
12734 bnxt_disable_napi(bp);
12735 del_timer_sync(&bp->timer);
12736 bnxt_free_skbs(bp);
12737
12738 /* Save ring stats before shutdown */
12739 if (bp->bnapi && irq_re_init) {
12740 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
12741 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
12742 }
12743 if (irq_re_init) {
12744 bnxt_free_irq(bp);
12745 bnxt_del_napi(bp);
12746 }
12747 bnxt_free_mem(bp, irq_re_init);
12748 }
12749
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12750 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12751 {
12752 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12753 /* If we get here, it means firmware reset is in progress
12754 * while we are trying to close. We can safely proceed with
12755 * the close because we are holding rtnl_lock(). Some firmware
12756 * messages may fail as we proceed to close. We set the
12757 * ABORT_ERR flag here so that the FW reset thread will later
12758 * abort when it gets the rtnl_lock() and sees the flag.
12759 */
12760 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
12761 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12762 }
12763
12764 #ifdef CONFIG_BNXT_SRIOV
12765 if (bp->sriov_cfg) {
12766 int rc;
12767
12768 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
12769 !bp->sriov_cfg,
12770 BNXT_SRIOV_CFG_WAIT_TMO);
12771 if (!rc)
12772 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
12773 else if (rc < 0)
12774 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
12775 }
12776 #endif
12777 __bnxt_close_nic(bp, irq_re_init, link_re_init);
12778 }
12779
bnxt_close(struct net_device * dev)12780 static int bnxt_close(struct net_device *dev)
12781 {
12782 struct bnxt *bp = netdev_priv(dev);
12783
12784 bnxt_close_nic(bp, true, true);
12785 bnxt_hwrm_shutdown_link(bp);
12786 bnxt_hwrm_if_change(bp, false);
12787 return 0;
12788 }
12789
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)12790 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
12791 u16 *val)
12792 {
12793 struct hwrm_port_phy_mdio_read_output *resp;
12794 struct hwrm_port_phy_mdio_read_input *req;
12795 int rc;
12796
12797 if (bp->hwrm_spec_code < 0x10a00)
12798 return -EOPNOTSUPP;
12799
12800 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
12801 if (rc)
12802 return rc;
12803
12804 req->port_id = cpu_to_le16(bp->pf.port_id);
12805 req->phy_addr = phy_addr;
12806 req->reg_addr = cpu_to_le16(reg & 0x1f);
12807 if (mdio_phy_id_is_c45(phy_addr)) {
12808 req->cl45_mdio = 1;
12809 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12810 req->dev_addr = mdio_phy_id_devad(phy_addr);
12811 req->reg_addr = cpu_to_le16(reg);
12812 }
12813
12814 resp = hwrm_req_hold(bp, req);
12815 rc = hwrm_req_send(bp, req);
12816 if (!rc)
12817 *val = le16_to_cpu(resp->reg_data);
12818 hwrm_req_drop(bp, req);
12819 return rc;
12820 }
12821
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)12822 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12823 u16 val)
12824 {
12825 struct hwrm_port_phy_mdio_write_input *req;
12826 int rc;
12827
12828 if (bp->hwrm_spec_code < 0x10a00)
12829 return -EOPNOTSUPP;
12830
12831 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12832 if (rc)
12833 return rc;
12834
12835 req->port_id = cpu_to_le16(bp->pf.port_id);
12836 req->phy_addr = phy_addr;
12837 req->reg_addr = cpu_to_le16(reg & 0x1f);
12838 if (mdio_phy_id_is_c45(phy_addr)) {
12839 req->cl45_mdio = 1;
12840 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12841 req->dev_addr = mdio_phy_id_devad(phy_addr);
12842 req->reg_addr = cpu_to_le16(reg);
12843 }
12844 req->reg_data = cpu_to_le16(val);
12845
12846 return hwrm_req_send(bp, req);
12847 }
12848
12849 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)12850 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12851 {
12852 struct mii_ioctl_data *mdio = if_mii(ifr);
12853 struct bnxt *bp = netdev_priv(dev);
12854 int rc;
12855
12856 switch (cmd) {
12857 case SIOCGMIIPHY:
12858 mdio->phy_id = bp->link_info.phy_addr;
12859
12860 fallthrough;
12861 case SIOCGMIIREG: {
12862 u16 mii_regval = 0;
12863
12864 if (!netif_running(dev))
12865 return -EAGAIN;
12866
12867 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12868 &mii_regval);
12869 mdio->val_out = mii_regval;
12870 return rc;
12871 }
12872
12873 case SIOCSMIIREG:
12874 if (!netif_running(dev))
12875 return -EAGAIN;
12876
12877 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12878 mdio->val_in);
12879
12880 case SIOCSHWTSTAMP:
12881 return bnxt_hwtstamp_set(dev, ifr);
12882
12883 case SIOCGHWTSTAMP:
12884 return bnxt_hwtstamp_get(dev, ifr);
12885
12886 default:
12887 /* do nothing */
12888 break;
12889 }
12890 return -EOPNOTSUPP;
12891 }
12892
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12893 static void bnxt_get_ring_stats(struct bnxt *bp,
12894 struct rtnl_link_stats64 *stats)
12895 {
12896 int i;
12897
12898 for (i = 0; i < bp->cp_nr_rings; i++) {
12899 struct bnxt_napi *bnapi = bp->bnapi[i];
12900 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12901 u64 *sw = cpr->stats.sw_stats;
12902
12903 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12904 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12905 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12906
12907 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12908 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12909 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12910
12911 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12912 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12913 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12914
12915 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12916 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12917 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12918
12919 stats->rx_missed_errors +=
12920 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12921
12922 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12923
12924 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12925
12926 stats->rx_dropped +=
12927 cpr->sw_stats->rx.rx_netpoll_discards +
12928 cpr->sw_stats->rx.rx_oom_discards;
12929 }
12930 }
12931
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12932 static void bnxt_add_prev_stats(struct bnxt *bp,
12933 struct rtnl_link_stats64 *stats)
12934 {
12935 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12936
12937 stats->rx_packets += prev_stats->rx_packets;
12938 stats->tx_packets += prev_stats->tx_packets;
12939 stats->rx_bytes += prev_stats->rx_bytes;
12940 stats->tx_bytes += prev_stats->tx_bytes;
12941 stats->rx_missed_errors += prev_stats->rx_missed_errors;
12942 stats->multicast += prev_stats->multicast;
12943 stats->rx_dropped += prev_stats->rx_dropped;
12944 stats->tx_dropped += prev_stats->tx_dropped;
12945 }
12946
12947 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)12948 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12949 {
12950 struct bnxt *bp = netdev_priv(dev);
12951
12952 set_bit(BNXT_STATE_READ_STATS, &bp->state);
12953 /* Make sure bnxt_close_nic() sees that we are reading stats before
12954 * we check the BNXT_STATE_OPEN flag.
12955 */
12956 smp_mb__after_atomic();
12957 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12958 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12959 *stats = bp->net_stats_prev;
12960 return;
12961 }
12962
12963 bnxt_get_ring_stats(bp, stats);
12964 bnxt_add_prev_stats(bp, stats);
12965
12966 if (bp->flags & BNXT_FLAG_PORT_STATS) {
12967 u64 *rx = bp->port_stats.sw_stats;
12968 u64 *tx = bp->port_stats.sw_stats +
12969 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12970
12971 stats->rx_crc_errors =
12972 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12973 stats->rx_frame_errors =
12974 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12975 stats->rx_length_errors =
12976 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12977 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12978 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12979 stats->rx_errors =
12980 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12981 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12982 stats->collisions =
12983 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12984 stats->tx_fifo_errors =
12985 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12986 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12987 }
12988 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12989 }
12990
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)12991 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12992 struct bnxt_total_ring_err_stats *stats,
12993 struct bnxt_cp_ring_info *cpr)
12994 {
12995 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12996 u64 *hw_stats = cpr->stats.sw_stats;
12997
12998 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12999 stats->rx_total_resets += sw_stats->rx.rx_resets;
13000 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13001 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13002 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13003 stats->rx_total_ring_discards +=
13004 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13005 stats->tx_total_resets += sw_stats->tx.tx_resets;
13006 stats->tx_total_ring_discards +=
13007 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13008 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13009 }
13010
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13011 void bnxt_get_ring_err_stats(struct bnxt *bp,
13012 struct bnxt_total_ring_err_stats *stats)
13013 {
13014 int i;
13015
13016 for (i = 0; i < bp->cp_nr_rings; i++)
13017 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13018 }
13019
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13020 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13021 {
13022 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13023 struct net_device *dev = bp->dev;
13024 struct netdev_hw_addr *ha;
13025 u8 *haddr;
13026 int mc_count = 0;
13027 bool update = false;
13028 int off = 0;
13029
13030 netdev_for_each_mc_addr(ha, dev) {
13031 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13032 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13033 vnic->mc_list_count = 0;
13034 return false;
13035 }
13036 haddr = ha->addr;
13037 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13038 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13039 update = true;
13040 }
13041 off += ETH_ALEN;
13042 mc_count++;
13043 }
13044 if (mc_count)
13045 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13046
13047 if (mc_count != vnic->mc_list_count) {
13048 vnic->mc_list_count = mc_count;
13049 update = true;
13050 }
13051 return update;
13052 }
13053
bnxt_uc_list_updated(struct bnxt * bp)13054 static bool bnxt_uc_list_updated(struct bnxt *bp)
13055 {
13056 struct net_device *dev = bp->dev;
13057 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13058 struct netdev_hw_addr *ha;
13059 int off = 0;
13060
13061 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13062 return true;
13063
13064 netdev_for_each_uc_addr(ha, dev) {
13065 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13066 return true;
13067
13068 off += ETH_ALEN;
13069 }
13070 return false;
13071 }
13072
bnxt_set_rx_mode(struct net_device * dev)13073 static void bnxt_set_rx_mode(struct net_device *dev)
13074 {
13075 struct bnxt *bp = netdev_priv(dev);
13076 struct bnxt_vnic_info *vnic;
13077 bool mc_update = false;
13078 bool uc_update;
13079 u32 mask;
13080
13081 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13082 return;
13083
13084 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13085 mask = vnic->rx_mask;
13086 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13087 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13088 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13089 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13090
13091 if (dev->flags & IFF_PROMISC)
13092 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13093
13094 uc_update = bnxt_uc_list_updated(bp);
13095
13096 if (dev->flags & IFF_BROADCAST)
13097 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13098 if (dev->flags & IFF_ALLMULTI) {
13099 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13100 vnic->mc_list_count = 0;
13101 } else if (dev->flags & IFF_MULTICAST) {
13102 mc_update = bnxt_mc_list_updated(bp, &mask);
13103 }
13104
13105 if (mask != vnic->rx_mask || uc_update || mc_update) {
13106 vnic->rx_mask = mask;
13107
13108 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13109 }
13110 }
13111
bnxt_cfg_rx_mode(struct bnxt * bp)13112 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13113 {
13114 struct net_device *dev = bp->dev;
13115 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13116 struct netdev_hw_addr *ha;
13117 int i, off = 0, rc;
13118 bool uc_update;
13119
13120 netif_addr_lock_bh(dev);
13121 uc_update = bnxt_uc_list_updated(bp);
13122 netif_addr_unlock_bh(dev);
13123
13124 if (!uc_update)
13125 goto skip_uc;
13126
13127 for (i = 1; i < vnic->uc_filter_count; i++) {
13128 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13129
13130 bnxt_hwrm_l2_filter_free(bp, fltr);
13131 bnxt_del_l2_filter(bp, fltr);
13132 }
13133
13134 vnic->uc_filter_count = 1;
13135
13136 netif_addr_lock_bh(dev);
13137 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13138 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13139 } else {
13140 netdev_for_each_uc_addr(ha, dev) {
13141 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13142 off += ETH_ALEN;
13143 vnic->uc_filter_count++;
13144 }
13145 }
13146 netif_addr_unlock_bh(dev);
13147
13148 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13149 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13150 if (rc) {
13151 if (BNXT_VF(bp) && rc == -ENODEV) {
13152 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13153 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13154 else
13155 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13156 rc = 0;
13157 } else {
13158 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13159 }
13160 vnic->uc_filter_count = i;
13161 return rc;
13162 }
13163 }
13164 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13165 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13166
13167 skip_uc:
13168 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13169 !bnxt_promisc_ok(bp))
13170 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13171 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13172 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13173 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13174 rc);
13175 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13176 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13177 vnic->mc_list_count = 0;
13178 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13179 }
13180 if (rc)
13181 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13182 rc);
13183
13184 return rc;
13185 }
13186
bnxt_can_reserve_rings(struct bnxt * bp)13187 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13188 {
13189 #ifdef CONFIG_BNXT_SRIOV
13190 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13191 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13192
13193 /* No minimum rings were provisioned by the PF. Don't
13194 * reserve rings by default when device is down.
13195 */
13196 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13197 return true;
13198
13199 if (!netif_running(bp->dev))
13200 return false;
13201 }
13202 #endif
13203 return true;
13204 }
13205
13206 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13207 static bool bnxt_rfs_supported(struct bnxt *bp)
13208 {
13209 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13210 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13211 return true;
13212 return false;
13213 }
13214 /* 212 firmware is broken for aRFS */
13215 if (BNXT_FW_MAJ(bp) == 212)
13216 return false;
13217 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13218 return true;
13219 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13220 return true;
13221 return false;
13222 }
13223
13224 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13225 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13226 {
13227 struct bnxt_hw_rings hwr = {0};
13228 int max_vnics, max_rss_ctxs;
13229
13230 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13231 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13232 return bnxt_rfs_supported(bp);
13233
13234 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13235 return false;
13236
13237 hwr.grp = bp->rx_nr_rings;
13238 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13239 if (new_rss_ctx)
13240 hwr.vnic++;
13241 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13242 max_vnics = bnxt_get_max_func_vnics(bp);
13243 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13244
13245 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13246 if (bp->rx_nr_rings > 1)
13247 netdev_warn(bp->dev,
13248 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13249 min(max_rss_ctxs - 1, max_vnics - 1));
13250 return false;
13251 }
13252
13253 if (!BNXT_NEW_RM(bp))
13254 return true;
13255
13256 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13257 * issue that will mess up the default VNIC if we reduce the
13258 * reservations.
13259 */
13260 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13261 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13262 return true;
13263
13264 bnxt_hwrm_reserve_rings(bp, &hwr);
13265 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13266 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13267 return true;
13268
13269 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13270 hwr.vnic = 1;
13271 hwr.rss_ctx = 0;
13272 bnxt_hwrm_reserve_rings(bp, &hwr);
13273 return false;
13274 }
13275
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13276 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13277 netdev_features_t features)
13278 {
13279 struct bnxt *bp = netdev_priv(dev);
13280 netdev_features_t vlan_features;
13281
13282 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13283 features &= ~NETIF_F_NTUPLE;
13284
13285 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13286 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13287
13288 if (!(features & NETIF_F_GRO))
13289 features &= ~NETIF_F_GRO_HW;
13290
13291 if (features & NETIF_F_GRO_HW)
13292 features &= ~NETIF_F_LRO;
13293
13294 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13295 * turned on or off together.
13296 */
13297 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13298 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13299 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13300 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13301 else if (vlan_features)
13302 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13303 }
13304 #ifdef CONFIG_BNXT_SRIOV
13305 if (BNXT_VF(bp) && bp->vf.vlan)
13306 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13307 #endif
13308 return features;
13309 }
13310
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13311 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13312 bool link_re_init, u32 flags, bool update_tpa)
13313 {
13314 bnxt_close_nic(bp, irq_re_init, link_re_init);
13315 bp->flags = flags;
13316 if (update_tpa)
13317 bnxt_set_ring_params(bp);
13318 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13319 }
13320
bnxt_set_features(struct net_device * dev,netdev_features_t features)13321 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13322 {
13323 bool update_tpa = false, update_ntuple = false;
13324 struct bnxt *bp = netdev_priv(dev);
13325 u32 flags = bp->flags;
13326 u32 changes;
13327 int rc = 0;
13328 bool re_init = false;
13329
13330 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13331 if (features & NETIF_F_GRO_HW)
13332 flags |= BNXT_FLAG_GRO;
13333 else if (features & NETIF_F_LRO)
13334 flags |= BNXT_FLAG_LRO;
13335
13336 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13337 flags &= ~BNXT_FLAG_TPA;
13338
13339 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13340 flags |= BNXT_FLAG_STRIP_VLAN;
13341
13342 if (features & NETIF_F_NTUPLE)
13343 flags |= BNXT_FLAG_RFS;
13344 else
13345 bnxt_clear_usr_fltrs(bp, true);
13346
13347 changes = flags ^ bp->flags;
13348 if (changes & BNXT_FLAG_TPA) {
13349 update_tpa = true;
13350 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13351 (flags & BNXT_FLAG_TPA) == 0 ||
13352 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13353 re_init = true;
13354 }
13355
13356 if (changes & ~BNXT_FLAG_TPA)
13357 re_init = true;
13358
13359 if (changes & BNXT_FLAG_RFS)
13360 update_ntuple = true;
13361
13362 if (flags != bp->flags) {
13363 u32 old_flags = bp->flags;
13364
13365 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13366 bp->flags = flags;
13367 if (update_tpa)
13368 bnxt_set_ring_params(bp);
13369 return rc;
13370 }
13371
13372 if (update_ntuple)
13373 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13374
13375 if (re_init)
13376 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13377
13378 if (update_tpa) {
13379 bp->flags = flags;
13380 rc = bnxt_set_tpa(bp,
13381 (flags & BNXT_FLAG_TPA) ?
13382 true : false);
13383 if (rc)
13384 bp->flags = old_flags;
13385 }
13386 }
13387 return rc;
13388 }
13389
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13390 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13391 u8 **nextp)
13392 {
13393 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13394 struct hop_jumbo_hdr *jhdr;
13395 int hdr_count = 0;
13396 u8 *nexthdr;
13397 int start;
13398
13399 /* Check that there are at most 2 IPv6 extension headers, no
13400 * fragment header, and each is <= 64 bytes.
13401 */
13402 start = nw_off + sizeof(*ip6h);
13403 nexthdr = &ip6h->nexthdr;
13404 while (ipv6_ext_hdr(*nexthdr)) {
13405 struct ipv6_opt_hdr *hp;
13406 int hdrlen;
13407
13408 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13409 *nexthdr == NEXTHDR_FRAGMENT)
13410 return false;
13411 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13412 skb_headlen(skb), NULL);
13413 if (!hp)
13414 return false;
13415 if (*nexthdr == NEXTHDR_AUTH)
13416 hdrlen = ipv6_authlen(hp);
13417 else
13418 hdrlen = ipv6_optlen(hp);
13419
13420 if (hdrlen > 64)
13421 return false;
13422
13423 /* The ext header may be a hop-by-hop header inserted for
13424 * big TCP purposes. This will be removed before sending
13425 * from NIC, so do not count it.
13426 */
13427 if (*nexthdr == NEXTHDR_HOP) {
13428 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13429 goto increment_hdr;
13430
13431 jhdr = (struct hop_jumbo_hdr *)hp;
13432 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13433 jhdr->nexthdr != IPPROTO_TCP)
13434 goto increment_hdr;
13435
13436 goto next_hdr;
13437 }
13438 increment_hdr:
13439 hdr_count++;
13440 next_hdr:
13441 nexthdr = &hp->nexthdr;
13442 start += hdrlen;
13443 }
13444 if (nextp) {
13445 /* Caller will check inner protocol */
13446 if (skb->encapsulation) {
13447 *nextp = nexthdr;
13448 return true;
13449 }
13450 *nextp = NULL;
13451 }
13452 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13453 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13454 }
13455
13456 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13457 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13458 {
13459 struct udphdr *uh = udp_hdr(skb);
13460 __be16 udp_port = uh->dest;
13461
13462 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13463 udp_port != bp->vxlan_gpe_port)
13464 return false;
13465 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13466 struct ethhdr *eh = inner_eth_hdr(skb);
13467
13468 switch (eh->h_proto) {
13469 case htons(ETH_P_IP):
13470 return true;
13471 case htons(ETH_P_IPV6):
13472 return bnxt_exthdr_check(bp, skb,
13473 skb_inner_network_offset(skb),
13474 NULL);
13475 }
13476 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13477 return true;
13478 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13479 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13480 NULL);
13481 }
13482 return false;
13483 }
13484
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13485 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13486 {
13487 switch (l4_proto) {
13488 case IPPROTO_UDP:
13489 return bnxt_udp_tunl_check(bp, skb);
13490 case IPPROTO_IPIP:
13491 return true;
13492 case IPPROTO_GRE: {
13493 switch (skb->inner_protocol) {
13494 default:
13495 return false;
13496 case htons(ETH_P_IP):
13497 return true;
13498 case htons(ETH_P_IPV6):
13499 fallthrough;
13500 }
13501 }
13502 case IPPROTO_IPV6:
13503 /* Check ext headers of inner ipv6 */
13504 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13505 NULL);
13506 }
13507 return false;
13508 }
13509
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13510 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13511 struct net_device *dev,
13512 netdev_features_t features)
13513 {
13514 struct bnxt *bp = netdev_priv(dev);
13515 u8 *l4_proto;
13516
13517 features = vlan_features_check(skb, features);
13518 switch (vlan_get_protocol(skb)) {
13519 case htons(ETH_P_IP):
13520 if (!skb->encapsulation)
13521 return features;
13522 l4_proto = &ip_hdr(skb)->protocol;
13523 if (bnxt_tunl_check(bp, skb, *l4_proto))
13524 return features;
13525 break;
13526 case htons(ETH_P_IPV6):
13527 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13528 &l4_proto))
13529 break;
13530 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13531 return features;
13532 break;
13533 }
13534 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13535 }
13536
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13537 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13538 u32 *reg_buf)
13539 {
13540 struct hwrm_dbg_read_direct_output *resp;
13541 struct hwrm_dbg_read_direct_input *req;
13542 __le32 *dbg_reg_buf;
13543 dma_addr_t mapping;
13544 int rc, i;
13545
13546 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13547 if (rc)
13548 return rc;
13549
13550 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13551 &mapping);
13552 if (!dbg_reg_buf) {
13553 rc = -ENOMEM;
13554 goto dbg_rd_reg_exit;
13555 }
13556
13557 req->host_dest_addr = cpu_to_le64(mapping);
13558
13559 resp = hwrm_req_hold(bp, req);
13560 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13561 req->read_len32 = cpu_to_le32(num_words);
13562
13563 rc = hwrm_req_send(bp, req);
13564 if (rc || resp->error_code) {
13565 rc = -EIO;
13566 goto dbg_rd_reg_exit;
13567 }
13568 for (i = 0; i < num_words; i++)
13569 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13570
13571 dbg_rd_reg_exit:
13572 hwrm_req_drop(bp, req);
13573 return rc;
13574 }
13575
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13576 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13577 u32 ring_id, u32 *prod, u32 *cons)
13578 {
13579 struct hwrm_dbg_ring_info_get_output *resp;
13580 struct hwrm_dbg_ring_info_get_input *req;
13581 int rc;
13582
13583 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13584 if (rc)
13585 return rc;
13586
13587 req->ring_type = ring_type;
13588 req->fw_ring_id = cpu_to_le32(ring_id);
13589 resp = hwrm_req_hold(bp, req);
13590 rc = hwrm_req_send(bp, req);
13591 if (!rc) {
13592 *prod = le32_to_cpu(resp->producer_index);
13593 *cons = le32_to_cpu(resp->consumer_index);
13594 }
13595 hwrm_req_drop(bp, req);
13596 return rc;
13597 }
13598
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)13599 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13600 {
13601 struct bnxt_tx_ring_info *txr;
13602 int i = bnapi->index, j;
13603
13604 bnxt_for_each_napi_tx(j, bnapi, txr)
13605 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13606 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13607 txr->tx_cons);
13608 }
13609
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)13610 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13611 {
13612 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13613 int i = bnapi->index;
13614
13615 if (!rxr)
13616 return;
13617
13618 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13619 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13620 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13621 rxr->rx_sw_agg_prod);
13622 }
13623
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)13624 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13625 {
13626 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13627 int i = bnapi->index;
13628
13629 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13630 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13631 }
13632
bnxt_dbg_dump_states(struct bnxt * bp)13633 static void bnxt_dbg_dump_states(struct bnxt *bp)
13634 {
13635 int i;
13636 struct bnxt_napi *bnapi;
13637
13638 for (i = 0; i < bp->cp_nr_rings; i++) {
13639 bnapi = bp->bnapi[i];
13640 if (netif_msg_drv(bp)) {
13641 bnxt_dump_tx_sw_state(bnapi);
13642 bnxt_dump_rx_sw_state(bnapi);
13643 bnxt_dump_cp_sw_state(bnapi);
13644 }
13645 }
13646 }
13647
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)13648 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13649 {
13650 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13651 struct hwrm_ring_reset_input *req;
13652 struct bnxt_napi *bnapi = rxr->bnapi;
13653 struct bnxt_cp_ring_info *cpr;
13654 u16 cp_ring_id;
13655 int rc;
13656
13657 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
13658 if (rc)
13659 return rc;
13660
13661 cpr = &bnapi->cp_ring;
13662 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13663 req->cmpl_ring = cpu_to_le16(cp_ring_id);
13664 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
13665 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
13666 return hwrm_req_send_silent(bp, req);
13667 }
13668
bnxt_reset_task(struct bnxt * bp,bool silent)13669 static void bnxt_reset_task(struct bnxt *bp, bool silent)
13670 {
13671 if (!silent)
13672 bnxt_dbg_dump_states(bp);
13673 if (netif_running(bp->dev)) {
13674 bnxt_close_nic(bp, !silent, false);
13675 bnxt_open_nic(bp, !silent, false);
13676 }
13677 }
13678
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)13679 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
13680 {
13681 struct bnxt *bp = netdev_priv(dev);
13682
13683 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
13684 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
13685 }
13686
bnxt_fw_health_check(struct bnxt * bp)13687 static void bnxt_fw_health_check(struct bnxt *bp)
13688 {
13689 struct bnxt_fw_health *fw_health = bp->fw_health;
13690 struct pci_dev *pdev = bp->pdev;
13691 u32 val;
13692
13693 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13694 return;
13695
13696 /* Make sure it is enabled before checking the tmr_counter. */
13697 smp_rmb();
13698 if (fw_health->tmr_counter) {
13699 fw_health->tmr_counter--;
13700 return;
13701 }
13702
13703 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13704 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
13705 fw_health->arrests++;
13706 goto fw_reset;
13707 }
13708
13709 fw_health->last_fw_heartbeat = val;
13710
13711 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13712 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
13713 fw_health->discoveries++;
13714 goto fw_reset;
13715 }
13716
13717 fw_health->tmr_counter = fw_health->tmr_multiplier;
13718 return;
13719
13720 fw_reset:
13721 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
13722 }
13723
bnxt_timer(struct timer_list * t)13724 static void bnxt_timer(struct timer_list *t)
13725 {
13726 struct bnxt *bp = from_timer(bp, t, timer);
13727 struct net_device *dev = bp->dev;
13728
13729 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
13730 return;
13731
13732 if (atomic_read(&bp->intr_sem) != 0)
13733 goto bnxt_restart_timer;
13734
13735 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
13736 bnxt_fw_health_check(bp);
13737
13738 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
13739 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
13740
13741 if (bnxt_tc_flower_enabled(bp))
13742 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
13743
13744 #ifdef CONFIG_RFS_ACCEL
13745 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
13746 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13747 #endif /*CONFIG_RFS_ACCEL*/
13748
13749 if (bp->link_info.phy_retry) {
13750 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
13751 bp->link_info.phy_retry = false;
13752 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
13753 } else {
13754 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
13755 }
13756 }
13757
13758 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13759 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13760
13761 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
13762 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
13763
13764 bnxt_restart_timer:
13765 mod_timer(&bp->timer, jiffies + bp->current_interval);
13766 }
13767
bnxt_rtnl_lock_sp(struct bnxt * bp)13768 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
13769 {
13770 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
13771 * set. If the device is being closed, bnxt_close() may be holding
13772 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
13773 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
13774 */
13775 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13776 rtnl_lock();
13777 }
13778
bnxt_rtnl_unlock_sp(struct bnxt * bp)13779 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
13780 {
13781 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13782 rtnl_unlock();
13783 }
13784
13785 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)13786 static void bnxt_reset(struct bnxt *bp, bool silent)
13787 {
13788 bnxt_rtnl_lock_sp(bp);
13789 if (test_bit(BNXT_STATE_OPEN, &bp->state))
13790 bnxt_reset_task(bp, silent);
13791 bnxt_rtnl_unlock_sp(bp);
13792 }
13793
13794 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)13795 static void bnxt_rx_ring_reset(struct bnxt *bp)
13796 {
13797 int i;
13798
13799 bnxt_rtnl_lock_sp(bp);
13800 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13801 bnxt_rtnl_unlock_sp(bp);
13802 return;
13803 }
13804 /* Disable and flush TPA before resetting the RX ring */
13805 if (bp->flags & BNXT_FLAG_TPA)
13806 bnxt_set_tpa(bp, false);
13807 for (i = 0; i < bp->rx_nr_rings; i++) {
13808 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13809 struct bnxt_cp_ring_info *cpr;
13810 int rc;
13811
13812 if (!rxr->bnapi->in_reset)
13813 continue;
13814
13815 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13816 if (rc) {
13817 if (rc == -EINVAL || rc == -EOPNOTSUPP)
13818 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13819 else
13820 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13821 rc);
13822 bnxt_reset_task(bp, true);
13823 break;
13824 }
13825 bnxt_free_one_rx_ring_skbs(bp, rxr);
13826 rxr->rx_prod = 0;
13827 rxr->rx_agg_prod = 0;
13828 rxr->rx_sw_agg_prod = 0;
13829 rxr->rx_next_cons = 0;
13830 rxr->bnapi->in_reset = false;
13831 bnxt_alloc_one_rx_ring(bp, i);
13832 cpr = &rxr->bnapi->cp_ring;
13833 cpr->sw_stats->rx.rx_resets++;
13834 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13835 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13836 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13837 }
13838 if (bp->flags & BNXT_FLAG_TPA)
13839 bnxt_set_tpa(bp, true);
13840 bnxt_rtnl_unlock_sp(bp);
13841 }
13842
bnxt_fw_fatal_close(struct bnxt * bp)13843 static void bnxt_fw_fatal_close(struct bnxt *bp)
13844 {
13845 bnxt_tx_disable(bp);
13846 bnxt_disable_napi(bp);
13847 bnxt_disable_int_sync(bp);
13848 bnxt_free_irq(bp);
13849 bnxt_clear_int_mode(bp);
13850 pci_disable_device(bp->pdev);
13851 }
13852
bnxt_fw_reset_close(struct bnxt * bp)13853 static void bnxt_fw_reset_close(struct bnxt *bp)
13854 {
13855 /* When firmware is in fatal state, quiesce device and disable
13856 * bus master to prevent any potential bad DMAs before freeing
13857 * kernel memory.
13858 */
13859 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13860 u16 val = 0;
13861
13862 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13863 if (val == 0xffff)
13864 bp->fw_reset_min_dsecs = 0;
13865 bnxt_fw_fatal_close(bp);
13866 }
13867 __bnxt_close_nic(bp, true, false);
13868 bnxt_vf_reps_free(bp);
13869 bnxt_clear_int_mode(bp);
13870 bnxt_hwrm_func_drv_unrgtr(bp);
13871 if (pci_is_enabled(bp->pdev))
13872 pci_disable_device(bp->pdev);
13873 bnxt_free_ctx_mem(bp, false);
13874 }
13875
is_bnxt_fw_ok(struct bnxt * bp)13876 static bool is_bnxt_fw_ok(struct bnxt *bp)
13877 {
13878 struct bnxt_fw_health *fw_health = bp->fw_health;
13879 bool no_heartbeat = false, has_reset = false;
13880 u32 val;
13881
13882 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13883 if (val == fw_health->last_fw_heartbeat)
13884 no_heartbeat = true;
13885
13886 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13887 if (val != fw_health->last_fw_reset_cnt)
13888 has_reset = true;
13889
13890 if (!no_heartbeat && has_reset)
13891 return true;
13892
13893 return false;
13894 }
13895
13896 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)13897 static void bnxt_force_fw_reset(struct bnxt *bp)
13898 {
13899 struct bnxt_fw_health *fw_health = bp->fw_health;
13900 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13901 u32 wait_dsecs;
13902
13903 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13904 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13905 return;
13906
13907 /* we have to serialize with bnxt_refclk_read()*/
13908 if (ptp) {
13909 unsigned long flags;
13910
13911 write_seqlock_irqsave(&ptp->ptp_lock, flags);
13912 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13913 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
13914 } else {
13915 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13916 }
13917 bnxt_fw_reset_close(bp);
13918 wait_dsecs = fw_health->master_func_wait_dsecs;
13919 if (fw_health->primary) {
13920 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13921 wait_dsecs = 0;
13922 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13923 } else {
13924 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13925 wait_dsecs = fw_health->normal_func_wait_dsecs;
13926 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13927 }
13928
13929 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13930 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13931 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13932 }
13933
bnxt_fw_exception(struct bnxt * bp)13934 void bnxt_fw_exception(struct bnxt *bp)
13935 {
13936 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13937 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13938 bnxt_ulp_stop(bp);
13939 bnxt_rtnl_lock_sp(bp);
13940 bnxt_force_fw_reset(bp);
13941 bnxt_rtnl_unlock_sp(bp);
13942 }
13943
13944 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13945 * < 0 on error.
13946 */
bnxt_get_registered_vfs(struct bnxt * bp)13947 static int bnxt_get_registered_vfs(struct bnxt *bp)
13948 {
13949 #ifdef CONFIG_BNXT_SRIOV
13950 int rc;
13951
13952 if (!BNXT_PF(bp))
13953 return 0;
13954
13955 rc = bnxt_hwrm_func_qcfg(bp);
13956 if (rc) {
13957 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13958 return rc;
13959 }
13960 if (bp->pf.registered_vfs)
13961 return bp->pf.registered_vfs;
13962 if (bp->sriov_cfg)
13963 return 1;
13964 #endif
13965 return 0;
13966 }
13967
bnxt_fw_reset(struct bnxt * bp)13968 void bnxt_fw_reset(struct bnxt *bp)
13969 {
13970 bnxt_ulp_stop(bp);
13971 bnxt_rtnl_lock_sp(bp);
13972 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13973 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13974 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13975 int n = 0, tmo;
13976
13977 /* we have to serialize with bnxt_refclk_read()*/
13978 if (ptp) {
13979 unsigned long flags;
13980
13981 write_seqlock_irqsave(&ptp->ptp_lock, flags);
13982 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13983 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
13984 } else {
13985 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13986 }
13987 if (bp->pf.active_vfs &&
13988 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13989 n = bnxt_get_registered_vfs(bp);
13990 if (n < 0) {
13991 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13992 n);
13993 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13994 dev_close(bp->dev);
13995 goto fw_reset_exit;
13996 } else if (n > 0) {
13997 u16 vf_tmo_dsecs = n * 10;
13998
13999 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14000 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14001 bp->fw_reset_state =
14002 BNXT_FW_RESET_STATE_POLL_VF;
14003 bnxt_queue_fw_reset_work(bp, HZ / 10);
14004 goto fw_reset_exit;
14005 }
14006 bnxt_fw_reset_close(bp);
14007 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14008 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14009 tmo = HZ / 10;
14010 } else {
14011 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14012 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14013 }
14014 bnxt_queue_fw_reset_work(bp, tmo);
14015 }
14016 fw_reset_exit:
14017 bnxt_rtnl_unlock_sp(bp);
14018 }
14019
bnxt_chk_missed_irq(struct bnxt * bp)14020 static void bnxt_chk_missed_irq(struct bnxt *bp)
14021 {
14022 int i;
14023
14024 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14025 return;
14026
14027 for (i = 0; i < bp->cp_nr_rings; i++) {
14028 struct bnxt_napi *bnapi = bp->bnapi[i];
14029 struct bnxt_cp_ring_info *cpr;
14030 u32 fw_ring_id;
14031 int j;
14032
14033 if (!bnapi)
14034 continue;
14035
14036 cpr = &bnapi->cp_ring;
14037 for (j = 0; j < cpr->cp_ring_count; j++) {
14038 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14039 u32 val[2];
14040
14041 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14042 continue;
14043
14044 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14045 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14046 continue;
14047 }
14048 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14049 bnxt_dbg_hwrm_ring_info_get(bp,
14050 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14051 fw_ring_id, &val[0], &val[1]);
14052 cpr->sw_stats->cmn.missed_irqs++;
14053 }
14054 }
14055 }
14056
14057 static void bnxt_cfg_ntp_filters(struct bnxt *);
14058
bnxt_init_ethtool_link_settings(struct bnxt * bp)14059 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14060 {
14061 struct bnxt_link_info *link_info = &bp->link_info;
14062
14063 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14064 link_info->autoneg = BNXT_AUTONEG_SPEED;
14065 if (bp->hwrm_spec_code >= 0x10201) {
14066 if (link_info->auto_pause_setting &
14067 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14068 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14069 } else {
14070 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14071 }
14072 bnxt_set_auto_speed(link_info);
14073 } else {
14074 bnxt_set_force_speed(link_info);
14075 link_info->req_duplex = link_info->duplex_setting;
14076 }
14077 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14078 link_info->req_flow_ctrl =
14079 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14080 else
14081 link_info->req_flow_ctrl = link_info->force_pause_setting;
14082 }
14083
bnxt_fw_echo_reply(struct bnxt * bp)14084 static void bnxt_fw_echo_reply(struct bnxt *bp)
14085 {
14086 struct bnxt_fw_health *fw_health = bp->fw_health;
14087 struct hwrm_func_echo_response_input *req;
14088 int rc;
14089
14090 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14091 if (rc)
14092 return;
14093 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14094 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14095 hwrm_req_send(bp, req);
14096 }
14097
bnxt_ulp_restart(struct bnxt * bp)14098 static void bnxt_ulp_restart(struct bnxt *bp)
14099 {
14100 bnxt_ulp_stop(bp);
14101 bnxt_ulp_start(bp, 0);
14102 }
14103
bnxt_sp_task(struct work_struct * work)14104 static void bnxt_sp_task(struct work_struct *work)
14105 {
14106 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14107
14108 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14109 smp_mb__after_atomic();
14110 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14111 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14112 return;
14113 }
14114
14115 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14116 bnxt_ulp_restart(bp);
14117 bnxt_reenable_sriov(bp);
14118 }
14119
14120 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14121 bnxt_cfg_rx_mode(bp);
14122
14123 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14124 bnxt_cfg_ntp_filters(bp);
14125 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14126 bnxt_hwrm_exec_fwd_req(bp);
14127 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14128 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14129 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14130 bnxt_hwrm_port_qstats(bp, 0);
14131 bnxt_hwrm_port_qstats_ext(bp, 0);
14132 bnxt_accumulate_all_stats(bp);
14133 }
14134
14135 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14136 int rc;
14137
14138 mutex_lock(&bp->link_lock);
14139 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14140 &bp->sp_event))
14141 bnxt_hwrm_phy_qcaps(bp);
14142
14143 rc = bnxt_update_link(bp, true);
14144 if (rc)
14145 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14146 rc);
14147
14148 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14149 &bp->sp_event))
14150 bnxt_init_ethtool_link_settings(bp);
14151 mutex_unlock(&bp->link_lock);
14152 }
14153 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14154 int rc;
14155
14156 mutex_lock(&bp->link_lock);
14157 rc = bnxt_update_phy_setting(bp);
14158 mutex_unlock(&bp->link_lock);
14159 if (rc) {
14160 netdev_warn(bp->dev, "update phy settings retry failed\n");
14161 } else {
14162 bp->link_info.phy_retry = false;
14163 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14164 }
14165 }
14166 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14167 mutex_lock(&bp->link_lock);
14168 bnxt_get_port_module_status(bp);
14169 mutex_unlock(&bp->link_lock);
14170 }
14171
14172 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14173 bnxt_tc_flow_stats_work(bp);
14174
14175 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14176 bnxt_chk_missed_irq(bp);
14177
14178 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14179 bnxt_fw_echo_reply(bp);
14180
14181 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14182 bnxt_hwmon_notify_event(bp);
14183
14184 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14185 * must be the last functions to be called before exiting.
14186 */
14187 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14188 bnxt_reset(bp, false);
14189
14190 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14191 bnxt_reset(bp, true);
14192
14193 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14194 bnxt_rx_ring_reset(bp);
14195
14196 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14197 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14198 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14199 bnxt_devlink_health_fw_report(bp);
14200 else
14201 bnxt_fw_reset(bp);
14202 }
14203
14204 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14205 if (!is_bnxt_fw_ok(bp))
14206 bnxt_devlink_health_fw_report(bp);
14207 }
14208
14209 smp_mb__before_atomic();
14210 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14211 }
14212
14213 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14214 int *max_cp);
14215
14216 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14217 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14218 int tx_xdp)
14219 {
14220 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14221 struct bnxt_hw_rings hwr = {0};
14222 int rx_rings = rx;
14223 int rc;
14224
14225 if (tcs)
14226 tx_sets = tcs;
14227
14228 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14229
14230 if (max_rx < rx_rings)
14231 return -ENOMEM;
14232
14233 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14234 rx_rings <<= 1;
14235
14236 hwr.rx = rx_rings;
14237 hwr.tx = tx * tx_sets + tx_xdp;
14238 if (max_tx < hwr.tx)
14239 return -ENOMEM;
14240
14241 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14242
14243 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14244 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14245 if (max_cp < hwr.cp)
14246 return -ENOMEM;
14247 hwr.stat = hwr.cp;
14248 if (BNXT_NEW_RM(bp)) {
14249 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14250 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14251 hwr.grp = rx;
14252 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14253 }
14254 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14255 hwr.cp_p5 = hwr.tx + rx;
14256 rc = bnxt_hwrm_check_rings(bp, &hwr);
14257 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14258 if (!bnxt_ulp_registered(bp->edev)) {
14259 hwr.cp += bnxt_get_ulp_msix_num(bp);
14260 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14261 }
14262 if (hwr.cp > bp->total_irqs) {
14263 int total_msix = bnxt_change_msix(bp, hwr.cp);
14264
14265 if (total_msix < hwr.cp) {
14266 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14267 hwr.cp, total_msix);
14268 rc = -ENOSPC;
14269 }
14270 }
14271 }
14272 return rc;
14273 }
14274
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14275 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14276 {
14277 if (bp->bar2) {
14278 pci_iounmap(pdev, bp->bar2);
14279 bp->bar2 = NULL;
14280 }
14281
14282 if (bp->bar1) {
14283 pci_iounmap(pdev, bp->bar1);
14284 bp->bar1 = NULL;
14285 }
14286
14287 if (bp->bar0) {
14288 pci_iounmap(pdev, bp->bar0);
14289 bp->bar0 = NULL;
14290 }
14291 }
14292
bnxt_cleanup_pci(struct bnxt * bp)14293 static void bnxt_cleanup_pci(struct bnxt *bp)
14294 {
14295 bnxt_unmap_bars(bp, bp->pdev);
14296 pci_release_regions(bp->pdev);
14297 if (pci_is_enabled(bp->pdev))
14298 pci_disable_device(bp->pdev);
14299 }
14300
bnxt_init_dflt_coal(struct bnxt * bp)14301 static void bnxt_init_dflt_coal(struct bnxt *bp)
14302 {
14303 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14304 struct bnxt_coal *coal;
14305 u16 flags = 0;
14306
14307 if (coal_cap->cmpl_params &
14308 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14309 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14310
14311 /* Tick values in micro seconds.
14312 * 1 coal_buf x bufs_per_record = 1 completion record.
14313 */
14314 coal = &bp->rx_coal;
14315 coal->coal_ticks = 10;
14316 coal->coal_bufs = 30;
14317 coal->coal_ticks_irq = 1;
14318 coal->coal_bufs_irq = 2;
14319 coal->idle_thresh = 50;
14320 coal->bufs_per_record = 2;
14321 coal->budget = 64; /* NAPI budget */
14322 coal->flags = flags;
14323
14324 coal = &bp->tx_coal;
14325 coal->coal_ticks = 28;
14326 coal->coal_bufs = 30;
14327 coal->coal_ticks_irq = 2;
14328 coal->coal_bufs_irq = 2;
14329 coal->bufs_per_record = 1;
14330 coal->flags = flags;
14331
14332 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14333 }
14334
14335 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14336 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14337 {
14338 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14339
14340 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14341 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14342 return true;
14343 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14344 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14345 return true;
14346 return false;
14347 }
14348
bnxt_fw_init_one_p1(struct bnxt * bp)14349 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14350 {
14351 int rc;
14352
14353 bp->fw_cap = 0;
14354 rc = bnxt_hwrm_ver_get(bp);
14355 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14356 * so wait before continuing with recovery.
14357 */
14358 if (rc)
14359 msleep(100);
14360 bnxt_try_map_fw_health_reg(bp);
14361 if (rc) {
14362 rc = bnxt_try_recover_fw(bp);
14363 if (rc)
14364 return rc;
14365 rc = bnxt_hwrm_ver_get(bp);
14366 if (rc)
14367 return rc;
14368 }
14369
14370 bnxt_nvm_cfg_ver_get(bp);
14371
14372 rc = bnxt_hwrm_func_reset(bp);
14373 if (rc)
14374 return -ENODEV;
14375
14376 bnxt_hwrm_fw_set_time(bp);
14377 return 0;
14378 }
14379
bnxt_fw_init_one_p2(struct bnxt * bp)14380 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14381 {
14382 int rc;
14383
14384 /* Get the MAX capabilities for this function */
14385 rc = bnxt_hwrm_func_qcaps(bp);
14386 if (rc) {
14387 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14388 rc);
14389 return -ENODEV;
14390 }
14391
14392 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14393 if (rc)
14394 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14395 rc);
14396
14397 if (bnxt_alloc_fw_health(bp)) {
14398 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14399 } else {
14400 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14401 if (rc)
14402 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14403 rc);
14404 }
14405
14406 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14407 if (rc)
14408 return -ENODEV;
14409
14410 rc = bnxt_alloc_crash_dump_mem(bp);
14411 if (rc)
14412 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14413 rc);
14414 if (!rc) {
14415 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14416 if (rc) {
14417 bnxt_free_crash_dump_mem(bp);
14418 netdev_warn(bp->dev,
14419 "hwrm crash dump mem failure rc: %d\n", rc);
14420 }
14421 }
14422
14423 if (bnxt_fw_pre_resv_vnics(bp))
14424 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14425
14426 bnxt_hwrm_func_qcfg(bp);
14427 bnxt_hwrm_vnic_qcaps(bp);
14428 bnxt_hwrm_port_led_qcaps(bp);
14429 bnxt_ethtool_init(bp);
14430 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14431 __bnxt_hwrm_ptp_qcfg(bp);
14432 bnxt_dcb_init(bp);
14433 bnxt_hwmon_init(bp);
14434 return 0;
14435 }
14436
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14437 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14438 {
14439 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14440 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14441 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14442 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14443 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14444 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14445 bp->rss_hash_delta = bp->rss_hash_cfg;
14446 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14447 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14448 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14449 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14450 }
14451 }
14452
bnxt_set_dflt_rfs(struct bnxt * bp)14453 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14454 {
14455 struct net_device *dev = bp->dev;
14456
14457 dev->hw_features &= ~NETIF_F_NTUPLE;
14458 dev->features &= ~NETIF_F_NTUPLE;
14459 bp->flags &= ~BNXT_FLAG_RFS;
14460 if (bnxt_rfs_supported(bp)) {
14461 dev->hw_features |= NETIF_F_NTUPLE;
14462 if (bnxt_rfs_capable(bp, false)) {
14463 bp->flags |= BNXT_FLAG_RFS;
14464 dev->features |= NETIF_F_NTUPLE;
14465 }
14466 }
14467 }
14468
bnxt_fw_init_one_p3(struct bnxt * bp)14469 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14470 {
14471 struct pci_dev *pdev = bp->pdev;
14472
14473 bnxt_set_dflt_rss_hash_type(bp);
14474 bnxt_set_dflt_rfs(bp);
14475
14476 bnxt_get_wol_settings(bp);
14477 if (bp->flags & BNXT_FLAG_WOL_CAP)
14478 device_set_wakeup_enable(&pdev->dev, bp->wol);
14479 else
14480 device_set_wakeup_capable(&pdev->dev, false);
14481
14482 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14483 bnxt_hwrm_coal_params_qcaps(bp);
14484 }
14485
14486 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14487
bnxt_fw_init_one(struct bnxt * bp)14488 int bnxt_fw_init_one(struct bnxt *bp)
14489 {
14490 int rc;
14491
14492 rc = bnxt_fw_init_one_p1(bp);
14493 if (rc) {
14494 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14495 return rc;
14496 }
14497 rc = bnxt_fw_init_one_p2(bp);
14498 if (rc) {
14499 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14500 return rc;
14501 }
14502 rc = bnxt_probe_phy(bp, false);
14503 if (rc)
14504 return rc;
14505 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14506 if (rc)
14507 return rc;
14508
14509 bnxt_fw_init_one_p3(bp);
14510 return 0;
14511 }
14512
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14513 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14514 {
14515 struct bnxt_fw_health *fw_health = bp->fw_health;
14516 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14517 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14518 u32 reg_type, reg_off, delay_msecs;
14519
14520 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14521 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14522 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14523 switch (reg_type) {
14524 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14525 pci_write_config_dword(bp->pdev, reg_off, val);
14526 break;
14527 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14528 writel(reg_off & BNXT_GRC_BASE_MASK,
14529 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14530 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14531 fallthrough;
14532 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14533 writel(val, bp->bar0 + reg_off);
14534 break;
14535 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14536 writel(val, bp->bar1 + reg_off);
14537 break;
14538 }
14539 if (delay_msecs) {
14540 pci_read_config_dword(bp->pdev, 0, &val);
14541 msleep(delay_msecs);
14542 }
14543 }
14544
bnxt_hwrm_reset_permitted(struct bnxt * bp)14545 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14546 {
14547 struct hwrm_func_qcfg_output *resp;
14548 struct hwrm_func_qcfg_input *req;
14549 bool result = true; /* firmware will enforce if unknown */
14550
14551 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14552 return result;
14553
14554 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14555 return result;
14556
14557 req->fid = cpu_to_le16(0xffff);
14558 resp = hwrm_req_hold(bp, req);
14559 if (!hwrm_req_send(bp, req))
14560 result = !!(le16_to_cpu(resp->flags) &
14561 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14562 hwrm_req_drop(bp, req);
14563 return result;
14564 }
14565
bnxt_reset_all(struct bnxt * bp)14566 static void bnxt_reset_all(struct bnxt *bp)
14567 {
14568 struct bnxt_fw_health *fw_health = bp->fw_health;
14569 int i, rc;
14570
14571 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14572 bnxt_fw_reset_via_optee(bp);
14573 bp->fw_reset_timestamp = jiffies;
14574 return;
14575 }
14576
14577 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14578 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14579 bnxt_fw_reset_writel(bp, i);
14580 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14581 struct hwrm_fw_reset_input *req;
14582
14583 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
14584 if (!rc) {
14585 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
14586 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
14587 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
14588 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
14589 rc = hwrm_req_send(bp, req);
14590 }
14591 if (rc != -ENODEV)
14592 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
14593 }
14594 bp->fw_reset_timestamp = jiffies;
14595 }
14596
bnxt_fw_reset_timeout(struct bnxt * bp)14597 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
14598 {
14599 return time_after(jiffies, bp->fw_reset_timestamp +
14600 (bp->fw_reset_max_dsecs * HZ / 10));
14601 }
14602
bnxt_fw_reset_abort(struct bnxt * bp,int rc)14603 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
14604 {
14605 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14606 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
14607 bnxt_dl_health_fw_status_update(bp, false);
14608 bp->fw_reset_state = 0;
14609 dev_close(bp->dev);
14610 }
14611
bnxt_fw_reset_task(struct work_struct * work)14612 static void bnxt_fw_reset_task(struct work_struct *work)
14613 {
14614 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
14615 int rc = 0;
14616
14617 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14618 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
14619 return;
14620 }
14621
14622 switch (bp->fw_reset_state) {
14623 case BNXT_FW_RESET_STATE_POLL_VF: {
14624 int n = bnxt_get_registered_vfs(bp);
14625 int tmo;
14626
14627 if (n < 0) {
14628 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14629 n, jiffies_to_msecs(jiffies -
14630 bp->fw_reset_timestamp));
14631 goto fw_reset_abort;
14632 } else if (n > 0) {
14633 if (bnxt_fw_reset_timeout(bp)) {
14634 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14635 bp->fw_reset_state = 0;
14636 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14637 n);
14638 goto ulp_start;
14639 }
14640 bnxt_queue_fw_reset_work(bp, HZ / 10);
14641 return;
14642 }
14643 bp->fw_reset_timestamp = jiffies;
14644 rtnl_lock();
14645 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14646 bnxt_fw_reset_abort(bp, rc);
14647 rtnl_unlock();
14648 goto ulp_start;
14649 }
14650 bnxt_fw_reset_close(bp);
14651 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14652 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14653 tmo = HZ / 10;
14654 } else {
14655 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14656 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14657 }
14658 rtnl_unlock();
14659 bnxt_queue_fw_reset_work(bp, tmo);
14660 return;
14661 }
14662 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
14663 u32 val;
14664
14665 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14666 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
14667 !bnxt_fw_reset_timeout(bp)) {
14668 bnxt_queue_fw_reset_work(bp, HZ / 5);
14669 return;
14670 }
14671
14672 if (!bp->fw_health->primary) {
14673 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
14674
14675 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14676 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14677 return;
14678 }
14679 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14680 }
14681 fallthrough;
14682 case BNXT_FW_RESET_STATE_RESET_FW:
14683 bnxt_reset_all(bp);
14684 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14685 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
14686 return;
14687 case BNXT_FW_RESET_STATE_ENABLE_DEV:
14688 bnxt_inv_fw_health_reg(bp);
14689 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
14690 !bp->fw_reset_min_dsecs) {
14691 u16 val;
14692
14693 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14694 if (val == 0xffff) {
14695 if (bnxt_fw_reset_timeout(bp)) {
14696 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
14697 rc = -ETIMEDOUT;
14698 goto fw_reset_abort;
14699 }
14700 bnxt_queue_fw_reset_work(bp, HZ / 1000);
14701 return;
14702 }
14703 }
14704 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14705 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
14706 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
14707 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
14708 bnxt_dl_remote_reload(bp);
14709 if (pci_enable_device(bp->pdev)) {
14710 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
14711 rc = -ENODEV;
14712 goto fw_reset_abort;
14713 }
14714 pci_set_master(bp->pdev);
14715 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
14716 fallthrough;
14717 case BNXT_FW_RESET_STATE_POLL_FW:
14718 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
14719 rc = bnxt_hwrm_poll(bp);
14720 if (rc) {
14721 if (bnxt_fw_reset_timeout(bp)) {
14722 netdev_err(bp->dev, "Firmware reset aborted\n");
14723 goto fw_reset_abort_status;
14724 }
14725 bnxt_queue_fw_reset_work(bp, HZ / 5);
14726 return;
14727 }
14728 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
14729 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
14730 fallthrough;
14731 case BNXT_FW_RESET_STATE_OPENING:
14732 while (!rtnl_trylock()) {
14733 bnxt_queue_fw_reset_work(bp, HZ / 10);
14734 return;
14735 }
14736 rc = bnxt_open(bp->dev);
14737 if (rc) {
14738 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
14739 bnxt_fw_reset_abort(bp, rc);
14740 rtnl_unlock();
14741 goto ulp_start;
14742 }
14743
14744 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
14745 bp->fw_health->enabled) {
14746 bp->fw_health->last_fw_reset_cnt =
14747 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14748 }
14749 bp->fw_reset_state = 0;
14750 /* Make sure fw_reset_state is 0 before clearing the flag */
14751 smp_mb__before_atomic();
14752 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14753 bnxt_ptp_reapply_pps(bp);
14754 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
14755 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
14756 bnxt_dl_health_fw_recovery_done(bp);
14757 bnxt_dl_health_fw_status_update(bp, true);
14758 }
14759 rtnl_unlock();
14760 bnxt_ulp_start(bp, 0);
14761 bnxt_reenable_sriov(bp);
14762 rtnl_lock();
14763 bnxt_vf_reps_alloc(bp);
14764 bnxt_vf_reps_open(bp);
14765 rtnl_unlock();
14766 break;
14767 }
14768 return;
14769
14770 fw_reset_abort_status:
14771 if (bp->fw_health->status_reliable ||
14772 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
14773 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14774
14775 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
14776 }
14777 fw_reset_abort:
14778 rtnl_lock();
14779 bnxt_fw_reset_abort(bp, rc);
14780 rtnl_unlock();
14781 ulp_start:
14782 bnxt_ulp_start(bp, rc);
14783 }
14784
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)14785 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
14786 {
14787 int rc;
14788 struct bnxt *bp = netdev_priv(dev);
14789
14790 SET_NETDEV_DEV(dev, &pdev->dev);
14791
14792 /* enable device (incl. PCI PM wakeup), and bus-mastering */
14793 rc = pci_enable_device(pdev);
14794 if (rc) {
14795 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14796 goto init_err;
14797 }
14798
14799 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
14800 dev_err(&pdev->dev,
14801 "Cannot find PCI device base address, aborting\n");
14802 rc = -ENODEV;
14803 goto init_err_disable;
14804 }
14805
14806 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
14807 if (rc) {
14808 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14809 goto init_err_disable;
14810 }
14811
14812 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
14813 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
14814 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
14815 rc = -EIO;
14816 goto init_err_release;
14817 }
14818
14819 pci_set_master(pdev);
14820
14821 bp->dev = dev;
14822 bp->pdev = pdev;
14823
14824 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
14825 * determines the BAR size.
14826 */
14827 bp->bar0 = pci_ioremap_bar(pdev, 0);
14828 if (!bp->bar0) {
14829 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14830 rc = -ENOMEM;
14831 goto init_err_release;
14832 }
14833
14834 bp->bar2 = pci_ioremap_bar(pdev, 4);
14835 if (!bp->bar2) {
14836 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
14837 rc = -ENOMEM;
14838 goto init_err_release;
14839 }
14840
14841 INIT_WORK(&bp->sp_task, bnxt_sp_task);
14842 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
14843
14844 spin_lock_init(&bp->ntp_fltr_lock);
14845 #if BITS_PER_LONG == 32
14846 spin_lock_init(&bp->db_lock);
14847 #endif
14848
14849 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
14850 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
14851
14852 timer_setup(&bp->timer, bnxt_timer, 0);
14853 bp->current_interval = BNXT_TIMER_INTERVAL;
14854
14855 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
14856 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14857
14858 clear_bit(BNXT_STATE_OPEN, &bp->state);
14859 return 0;
14860
14861 init_err_release:
14862 bnxt_unmap_bars(bp, pdev);
14863 pci_release_regions(pdev);
14864
14865 init_err_disable:
14866 pci_disable_device(pdev);
14867
14868 init_err:
14869 return rc;
14870 }
14871
14872 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)14873 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14874 {
14875 struct sockaddr *addr = p;
14876 struct bnxt *bp = netdev_priv(dev);
14877 int rc = 0;
14878
14879 if (!is_valid_ether_addr(addr->sa_data))
14880 return -EADDRNOTAVAIL;
14881
14882 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14883 return 0;
14884
14885 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14886 if (rc)
14887 return rc;
14888
14889 eth_hw_addr_set(dev, addr->sa_data);
14890 bnxt_clear_usr_fltrs(bp, true);
14891 if (netif_running(dev)) {
14892 bnxt_close_nic(bp, false, false);
14893 rc = bnxt_open_nic(bp, false, false);
14894 }
14895
14896 return rc;
14897 }
14898
14899 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)14900 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14901 {
14902 struct bnxt *bp = netdev_priv(dev);
14903
14904 if (netif_running(dev))
14905 bnxt_close_nic(bp, true, false);
14906
14907 WRITE_ONCE(dev->mtu, new_mtu);
14908
14909 /* MTU change may change the AGG ring settings if an XDP multi-buffer
14910 * program is attached. We need to set the AGG rings settings and
14911 * rx_skb_func accordingly.
14912 */
14913 if (READ_ONCE(bp->xdp_prog))
14914 bnxt_set_rx_skb_mode(bp, true);
14915
14916 bnxt_set_ring_params(bp);
14917
14918 if (netif_running(dev))
14919 return bnxt_open_nic(bp, true, false);
14920
14921 return 0;
14922 }
14923
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)14924 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14925 {
14926 struct bnxt *bp = netdev_priv(dev);
14927 bool sh = false;
14928 int rc, tx_cp;
14929
14930 if (tc > bp->max_tc) {
14931 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14932 tc, bp->max_tc);
14933 return -EINVAL;
14934 }
14935
14936 if (bp->num_tc == tc)
14937 return 0;
14938
14939 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14940 sh = true;
14941
14942 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14943 sh, tc, bp->tx_nr_rings_xdp);
14944 if (rc)
14945 return rc;
14946
14947 /* Needs to close the device and do hw resource re-allocations */
14948 if (netif_running(bp->dev))
14949 bnxt_close_nic(bp, true, false);
14950
14951 if (tc) {
14952 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14953 netdev_set_num_tc(dev, tc);
14954 bp->num_tc = tc;
14955 } else {
14956 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14957 netdev_reset_tc(dev);
14958 bp->num_tc = 0;
14959 }
14960 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14961 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14962 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14963 tx_cp + bp->rx_nr_rings;
14964
14965 if (netif_running(bp->dev))
14966 return bnxt_open_nic(bp, true, false);
14967
14968 return 0;
14969 }
14970
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)14971 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14972 void *cb_priv)
14973 {
14974 struct bnxt *bp = cb_priv;
14975
14976 if (!bnxt_tc_flower_enabled(bp) ||
14977 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14978 return -EOPNOTSUPP;
14979
14980 switch (type) {
14981 case TC_SETUP_CLSFLOWER:
14982 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14983 default:
14984 return -EOPNOTSUPP;
14985 }
14986 }
14987
14988 LIST_HEAD(bnxt_block_cb_list);
14989
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)14990 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14991 void *type_data)
14992 {
14993 struct bnxt *bp = netdev_priv(dev);
14994
14995 switch (type) {
14996 case TC_SETUP_BLOCK:
14997 return flow_block_cb_setup_simple(type_data,
14998 &bnxt_block_cb_list,
14999 bnxt_setup_tc_block_cb,
15000 bp, bp, true);
15001 case TC_SETUP_QDISC_MQPRIO: {
15002 struct tc_mqprio_qopt *mqprio = type_data;
15003
15004 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15005
15006 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15007 }
15008 default:
15009 return -EOPNOTSUPP;
15010 }
15011 }
15012
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15013 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15014 const struct sk_buff *skb)
15015 {
15016 struct bnxt_vnic_info *vnic;
15017
15018 if (skb)
15019 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15020
15021 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15022 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15023 }
15024
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15025 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15026 u32 idx)
15027 {
15028 struct hlist_head *head;
15029 int bit_id;
15030
15031 spin_lock_bh(&bp->ntp_fltr_lock);
15032 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15033 if (bit_id < 0) {
15034 spin_unlock_bh(&bp->ntp_fltr_lock);
15035 return -ENOMEM;
15036 }
15037
15038 fltr->base.sw_id = (u16)bit_id;
15039 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15040 fltr->base.flags |= BNXT_ACT_RING_DST;
15041 head = &bp->ntp_fltr_hash_tbl[idx];
15042 hlist_add_head_rcu(&fltr->base.hash, head);
15043 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15044 bnxt_insert_usr_fltr(bp, &fltr->base);
15045 bp->ntp_fltr_count++;
15046 spin_unlock_bh(&bp->ntp_fltr_lock);
15047 return 0;
15048 }
15049
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15050 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15051 struct bnxt_ntuple_filter *f2)
15052 {
15053 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15054 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15055 struct flow_keys *keys1 = &f1->fkeys;
15056 struct flow_keys *keys2 = &f2->fkeys;
15057
15058 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15059 keys1->basic.ip_proto != keys2->basic.ip_proto)
15060 return false;
15061
15062 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15063 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15064 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15065 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15066 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15067 return false;
15068 } else {
15069 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15070 &keys2->addrs.v6addrs.src) ||
15071 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15072 &masks2->addrs.v6addrs.src) ||
15073 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15074 &keys2->addrs.v6addrs.dst) ||
15075 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15076 &masks2->addrs.v6addrs.dst))
15077 return false;
15078 }
15079
15080 return keys1->ports.src == keys2->ports.src &&
15081 masks1->ports.src == masks2->ports.src &&
15082 keys1->ports.dst == keys2->ports.dst &&
15083 masks1->ports.dst == masks2->ports.dst &&
15084 keys1->control.flags == keys2->control.flags &&
15085 f1->l2_fltr == f2->l2_fltr;
15086 }
15087
15088 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15089 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15090 struct bnxt_ntuple_filter *fltr, u32 idx)
15091 {
15092 struct bnxt_ntuple_filter *f;
15093 struct hlist_head *head;
15094
15095 head = &bp->ntp_fltr_hash_tbl[idx];
15096 hlist_for_each_entry_rcu(f, head, base.hash) {
15097 if (bnxt_fltr_match(f, fltr))
15098 return f;
15099 }
15100 return NULL;
15101 }
15102
15103 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15104 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15105 u16 rxq_index, u32 flow_id)
15106 {
15107 struct bnxt *bp = netdev_priv(dev);
15108 struct bnxt_ntuple_filter *fltr, *new_fltr;
15109 struct flow_keys *fkeys;
15110 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15111 struct bnxt_l2_filter *l2_fltr;
15112 int rc = 0, idx;
15113 u32 flags;
15114
15115 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15116 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15117 atomic_inc(&l2_fltr->refcnt);
15118 } else {
15119 struct bnxt_l2_key key;
15120
15121 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15122 key.vlan = 0;
15123 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15124 if (!l2_fltr)
15125 return -EINVAL;
15126 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15127 bnxt_del_l2_filter(bp, l2_fltr);
15128 return -EINVAL;
15129 }
15130 }
15131 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15132 if (!new_fltr) {
15133 bnxt_del_l2_filter(bp, l2_fltr);
15134 return -ENOMEM;
15135 }
15136
15137 fkeys = &new_fltr->fkeys;
15138 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15139 rc = -EPROTONOSUPPORT;
15140 goto err_free;
15141 }
15142
15143 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15144 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15145 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15146 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15147 rc = -EPROTONOSUPPORT;
15148 goto err_free;
15149 }
15150 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15151 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15152 if (bp->hwrm_spec_code < 0x10601) {
15153 rc = -EPROTONOSUPPORT;
15154 goto err_free;
15155 }
15156 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15157 }
15158 flags = fkeys->control.flags;
15159 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15160 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15161 rc = -EPROTONOSUPPORT;
15162 goto err_free;
15163 }
15164 new_fltr->l2_fltr = l2_fltr;
15165
15166 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15167 rcu_read_lock();
15168 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15169 if (fltr) {
15170 rc = fltr->base.sw_id;
15171 rcu_read_unlock();
15172 goto err_free;
15173 }
15174 rcu_read_unlock();
15175
15176 new_fltr->flow_id = flow_id;
15177 new_fltr->base.rxq = rxq_index;
15178 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15179 if (!rc) {
15180 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15181 return new_fltr->base.sw_id;
15182 }
15183
15184 err_free:
15185 bnxt_del_l2_filter(bp, l2_fltr);
15186 kfree(new_fltr);
15187 return rc;
15188 }
15189 #endif
15190
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15191 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15192 {
15193 spin_lock_bh(&bp->ntp_fltr_lock);
15194 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15195 spin_unlock_bh(&bp->ntp_fltr_lock);
15196 return;
15197 }
15198 hlist_del_rcu(&fltr->base.hash);
15199 bnxt_del_one_usr_fltr(bp, &fltr->base);
15200 bp->ntp_fltr_count--;
15201 spin_unlock_bh(&bp->ntp_fltr_lock);
15202 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15203 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15204 kfree_rcu(fltr, base.rcu);
15205 }
15206
bnxt_cfg_ntp_filters(struct bnxt * bp)15207 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15208 {
15209 #ifdef CONFIG_RFS_ACCEL
15210 int i;
15211
15212 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15213 struct hlist_head *head;
15214 struct hlist_node *tmp;
15215 struct bnxt_ntuple_filter *fltr;
15216 int rc;
15217
15218 head = &bp->ntp_fltr_hash_tbl[i];
15219 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15220 bool del = false;
15221
15222 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15223 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15224 continue;
15225 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15226 fltr->flow_id,
15227 fltr->base.sw_id)) {
15228 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15229 fltr);
15230 del = true;
15231 }
15232 } else {
15233 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15234 fltr);
15235 if (rc)
15236 del = true;
15237 else
15238 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15239 }
15240
15241 if (del)
15242 bnxt_del_ntp_filter(bp, fltr);
15243 }
15244 }
15245 #endif
15246 }
15247
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15248 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15249 unsigned int entry, struct udp_tunnel_info *ti)
15250 {
15251 struct bnxt *bp = netdev_priv(netdev);
15252 unsigned int cmd;
15253
15254 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15255 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15256 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15257 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15258 else
15259 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15260
15261 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15262 }
15263
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15264 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15265 unsigned int entry, struct udp_tunnel_info *ti)
15266 {
15267 struct bnxt *bp = netdev_priv(netdev);
15268 unsigned int cmd;
15269
15270 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15271 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15272 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15273 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15274 else
15275 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15276
15277 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15278 }
15279
15280 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15281 .set_port = bnxt_udp_tunnel_set_port,
15282 .unset_port = bnxt_udp_tunnel_unset_port,
15283 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
15284 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15285 .tables = {
15286 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15287 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15288 },
15289 }, bnxt_udp_tunnels_p7 = {
15290 .set_port = bnxt_udp_tunnel_set_port,
15291 .unset_port = bnxt_udp_tunnel_unset_port,
15292 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
15293 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15294 .tables = {
15295 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15296 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15297 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15298 },
15299 };
15300
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15301 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15302 struct net_device *dev, u32 filter_mask,
15303 int nlflags)
15304 {
15305 struct bnxt *bp = netdev_priv(dev);
15306
15307 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15308 nlflags, filter_mask, NULL);
15309 }
15310
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15311 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15312 u16 flags, struct netlink_ext_ack *extack)
15313 {
15314 struct bnxt *bp = netdev_priv(dev);
15315 struct nlattr *attr, *br_spec;
15316 int rem, rc = 0;
15317
15318 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15319 return -EOPNOTSUPP;
15320
15321 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15322 if (!br_spec)
15323 return -EINVAL;
15324
15325 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15326 u16 mode;
15327
15328 mode = nla_get_u16(attr);
15329 if (mode == bp->br_mode)
15330 break;
15331
15332 rc = bnxt_hwrm_set_br_mode(bp, mode);
15333 if (!rc)
15334 bp->br_mode = mode;
15335 break;
15336 }
15337 return rc;
15338 }
15339
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15340 int bnxt_get_port_parent_id(struct net_device *dev,
15341 struct netdev_phys_item_id *ppid)
15342 {
15343 struct bnxt *bp = netdev_priv(dev);
15344
15345 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15346 return -EOPNOTSUPP;
15347
15348 /* The PF and it's VF-reps only support the switchdev framework */
15349 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15350 return -EOPNOTSUPP;
15351
15352 ppid->id_len = sizeof(bp->dsn);
15353 memcpy(ppid->id, bp->dsn, ppid->id_len);
15354
15355 return 0;
15356 }
15357
15358 static const struct net_device_ops bnxt_netdev_ops = {
15359 .ndo_open = bnxt_open,
15360 .ndo_start_xmit = bnxt_start_xmit,
15361 .ndo_stop = bnxt_close,
15362 .ndo_get_stats64 = bnxt_get_stats64,
15363 .ndo_set_rx_mode = bnxt_set_rx_mode,
15364 .ndo_eth_ioctl = bnxt_ioctl,
15365 .ndo_validate_addr = eth_validate_addr,
15366 .ndo_set_mac_address = bnxt_change_mac_addr,
15367 .ndo_change_mtu = bnxt_change_mtu,
15368 .ndo_fix_features = bnxt_fix_features,
15369 .ndo_set_features = bnxt_set_features,
15370 .ndo_features_check = bnxt_features_check,
15371 .ndo_tx_timeout = bnxt_tx_timeout,
15372 #ifdef CONFIG_BNXT_SRIOV
15373 .ndo_get_vf_config = bnxt_get_vf_config,
15374 .ndo_set_vf_mac = bnxt_set_vf_mac,
15375 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15376 .ndo_set_vf_rate = bnxt_set_vf_bw,
15377 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15378 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15379 .ndo_set_vf_trust = bnxt_set_vf_trust,
15380 #endif
15381 .ndo_setup_tc = bnxt_setup_tc,
15382 #ifdef CONFIG_RFS_ACCEL
15383 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15384 #endif
15385 .ndo_bpf = bnxt_xdp,
15386 .ndo_xdp_xmit = bnxt_xdp_xmit,
15387 .ndo_bridge_getlink = bnxt_bridge_getlink,
15388 .ndo_bridge_setlink = bnxt_bridge_setlink,
15389 };
15390
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15391 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15392 struct netdev_queue_stats_rx *stats)
15393 {
15394 struct bnxt *bp = netdev_priv(dev);
15395 struct bnxt_cp_ring_info *cpr;
15396 u64 *sw;
15397
15398 if (!bp->bnapi)
15399 return;
15400
15401 cpr = &bp->bnapi[i]->cp_ring;
15402 sw = cpr->stats.sw_stats;
15403
15404 stats->packets = 0;
15405 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15406 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15407 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15408
15409 stats->bytes = 0;
15410 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15411 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15412 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15413
15414 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15415 }
15416
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15417 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15418 struct netdev_queue_stats_tx *stats)
15419 {
15420 struct bnxt *bp = netdev_priv(dev);
15421 struct bnxt_napi *bnapi;
15422 u64 *sw;
15423
15424 if (!bp->tx_ring)
15425 return;
15426
15427 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15428 sw = bnapi->cp_ring.stats.sw_stats;
15429
15430 stats->packets = 0;
15431 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15432 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15433 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15434
15435 stats->bytes = 0;
15436 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15437 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15438 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15439 }
15440
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15441 static void bnxt_get_base_stats(struct net_device *dev,
15442 struct netdev_queue_stats_rx *rx,
15443 struct netdev_queue_stats_tx *tx)
15444 {
15445 struct bnxt *bp = netdev_priv(dev);
15446
15447 rx->packets = bp->net_stats_prev.rx_packets;
15448 rx->bytes = bp->net_stats_prev.rx_bytes;
15449 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15450
15451 tx->packets = bp->net_stats_prev.tx_packets;
15452 tx->bytes = bp->net_stats_prev.tx_bytes;
15453 }
15454
15455 static const struct netdev_stat_ops bnxt_stat_ops = {
15456 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15457 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15458 .get_base_stats = bnxt_get_base_stats,
15459 };
15460
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15461 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15462 {
15463 struct bnxt_rx_ring_info *rxr, *clone;
15464 struct bnxt *bp = netdev_priv(dev);
15465 struct bnxt_ring_struct *ring;
15466 int rc;
15467
15468 if (!bp->rx_ring)
15469 return -ENETDOWN;
15470
15471 rxr = &bp->rx_ring[idx];
15472 clone = qmem;
15473 memcpy(clone, rxr, sizeof(*rxr));
15474 bnxt_init_rx_ring_struct(bp, clone);
15475 bnxt_reset_rx_ring_struct(bp, clone);
15476
15477 clone->rx_prod = 0;
15478 clone->rx_agg_prod = 0;
15479 clone->rx_sw_agg_prod = 0;
15480 clone->rx_next_cons = 0;
15481
15482 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15483 if (rc)
15484 return rc;
15485
15486 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15487 if (rc < 0)
15488 goto err_page_pool_destroy;
15489
15490 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15491 MEM_TYPE_PAGE_POOL,
15492 clone->page_pool);
15493 if (rc)
15494 goto err_rxq_info_unreg;
15495
15496 ring = &clone->rx_ring_struct;
15497 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15498 if (rc)
15499 goto err_free_rx_ring;
15500
15501 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15502 ring = &clone->rx_agg_ring_struct;
15503 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15504 if (rc)
15505 goto err_free_rx_agg_ring;
15506
15507 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15508 if (rc)
15509 goto err_free_rx_agg_ring;
15510 }
15511
15512 if (bp->flags & BNXT_FLAG_TPA) {
15513 rc = bnxt_alloc_one_tpa_info(bp, clone);
15514 if (rc)
15515 goto err_free_tpa_info;
15516 }
15517
15518 bnxt_init_one_rx_ring_rxbd(bp, clone);
15519 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15520
15521 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15522 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15523 bnxt_alloc_one_rx_ring_page(bp, clone, idx);
15524 if (bp->flags & BNXT_FLAG_TPA)
15525 bnxt_alloc_one_tpa_info_data(bp, clone);
15526
15527 return 0;
15528
15529 err_free_tpa_info:
15530 bnxt_free_one_tpa_info(bp, clone);
15531 err_free_rx_agg_ring:
15532 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15533 err_free_rx_ring:
15534 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15535 err_rxq_info_unreg:
15536 xdp_rxq_info_unreg(&clone->xdp_rxq);
15537 err_page_pool_destroy:
15538 page_pool_destroy(clone->page_pool);
15539 if (bnxt_separate_head_pool())
15540 page_pool_destroy(clone->head_pool);
15541 clone->page_pool = NULL;
15542 clone->head_pool = NULL;
15543 return rc;
15544 }
15545
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15546 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15547 {
15548 struct bnxt_rx_ring_info *rxr = qmem;
15549 struct bnxt *bp = netdev_priv(dev);
15550 struct bnxt_ring_struct *ring;
15551
15552 bnxt_free_one_rx_ring_skbs(bp, rxr);
15553 bnxt_free_one_tpa_info(bp, rxr);
15554
15555 xdp_rxq_info_unreg(&rxr->xdp_rxq);
15556
15557 page_pool_destroy(rxr->page_pool);
15558 if (bnxt_separate_head_pool())
15559 page_pool_destroy(rxr->head_pool);
15560 rxr->page_pool = NULL;
15561 rxr->head_pool = NULL;
15562
15563 ring = &rxr->rx_ring_struct;
15564 bnxt_free_ring(bp, &ring->ring_mem);
15565
15566 ring = &rxr->rx_agg_ring_struct;
15567 bnxt_free_ring(bp, &ring->ring_mem);
15568
15569 kfree(rxr->rx_agg_bmap);
15570 rxr->rx_agg_bmap = NULL;
15571 }
15572
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)15573 static void bnxt_copy_rx_ring(struct bnxt *bp,
15574 struct bnxt_rx_ring_info *dst,
15575 struct bnxt_rx_ring_info *src)
15576 {
15577 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15578 struct bnxt_ring_struct *dst_ring, *src_ring;
15579 int i;
15580
15581 dst_ring = &dst->rx_ring_struct;
15582 dst_rmem = &dst_ring->ring_mem;
15583 src_ring = &src->rx_ring_struct;
15584 src_rmem = &src_ring->ring_mem;
15585
15586 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15587 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15588 WARN_ON(dst_rmem->flags != src_rmem->flags);
15589 WARN_ON(dst_rmem->depth != src_rmem->depth);
15590 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15591 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15592
15593 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15594 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15595 *dst_rmem->vmem = *src_rmem->vmem;
15596 for (i = 0; i < dst_rmem->nr_pages; i++) {
15597 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15598 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15599 }
15600
15601 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15602 return;
15603
15604 dst_ring = &dst->rx_agg_ring_struct;
15605 dst_rmem = &dst_ring->ring_mem;
15606 src_ring = &src->rx_agg_ring_struct;
15607 src_rmem = &src_ring->ring_mem;
15608
15609 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15610 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15611 WARN_ON(dst_rmem->flags != src_rmem->flags);
15612 WARN_ON(dst_rmem->depth != src_rmem->depth);
15613 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15614 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15615 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
15616
15617 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15618 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15619 *dst_rmem->vmem = *src_rmem->vmem;
15620 for (i = 0; i < dst_rmem->nr_pages; i++) {
15621 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15622 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15623 }
15624
15625 dst->rx_agg_bmap = src->rx_agg_bmap;
15626 }
15627
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)15628 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15629 {
15630 struct bnxt *bp = netdev_priv(dev);
15631 struct bnxt_rx_ring_info *rxr, *clone;
15632 struct bnxt_cp_ring_info *cpr;
15633 struct bnxt_vnic_info *vnic;
15634 int i, rc;
15635
15636 rxr = &bp->rx_ring[idx];
15637 clone = qmem;
15638
15639 rxr->rx_prod = clone->rx_prod;
15640 rxr->rx_agg_prod = clone->rx_agg_prod;
15641 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
15642 rxr->rx_next_cons = clone->rx_next_cons;
15643 rxr->rx_tpa = clone->rx_tpa;
15644 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
15645 rxr->page_pool = clone->page_pool;
15646 rxr->head_pool = clone->head_pool;
15647 rxr->xdp_rxq = clone->xdp_rxq;
15648
15649 bnxt_copy_rx_ring(bp, rxr, clone);
15650
15651 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
15652 if (rc)
15653 return rc;
15654 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
15655 if (rc)
15656 goto err_free_hwrm_rx_ring;
15657
15658 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
15659 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15660 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
15661
15662 cpr = &rxr->bnapi->cp_ring;
15663 cpr->sw_stats->rx.rx_resets++;
15664
15665 for (i = 0; i < bp->nr_vnics; i++) {
15666 vnic = &bp->vnic_info[i];
15667
15668 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
15669 if (rc) {
15670 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
15671 vnic->vnic_id, rc);
15672 return rc;
15673 }
15674 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
15675 bnxt_hwrm_vnic_update(bp, vnic,
15676 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15677 }
15678
15679 return 0;
15680
15681 err_free_hwrm_rx_ring:
15682 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15683 return rc;
15684 }
15685
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)15686 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
15687 {
15688 struct bnxt *bp = netdev_priv(dev);
15689 struct bnxt_rx_ring_info *rxr;
15690 struct bnxt_vnic_info *vnic;
15691 int i;
15692
15693 for (i = 0; i < bp->nr_vnics; i++) {
15694 vnic = &bp->vnic_info[i];
15695 vnic->mru = 0;
15696 bnxt_hwrm_vnic_update(bp, vnic,
15697 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15698 }
15699 /* Make sure NAPI sees that the VNIC is disabled */
15700 synchronize_net();
15701 rxr = &bp->rx_ring[idx];
15702 cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
15703 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15704 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
15705 rxr->rx_next_cons = 0;
15706 page_pool_disable_direct_recycling(rxr->page_pool);
15707 if (bnxt_separate_head_pool())
15708 page_pool_disable_direct_recycling(rxr->head_pool);
15709
15710 memcpy(qmem, rxr, sizeof(*rxr));
15711 bnxt_init_rx_ring_struct(bp, qmem);
15712
15713 return 0;
15714 }
15715
15716 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
15717 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
15718 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
15719 .ndo_queue_mem_free = bnxt_queue_mem_free,
15720 .ndo_queue_start = bnxt_queue_start,
15721 .ndo_queue_stop = bnxt_queue_stop,
15722 };
15723
bnxt_remove_one(struct pci_dev * pdev)15724 static void bnxt_remove_one(struct pci_dev *pdev)
15725 {
15726 struct net_device *dev = pci_get_drvdata(pdev);
15727 struct bnxt *bp = netdev_priv(dev);
15728
15729 if (BNXT_PF(bp))
15730 bnxt_sriov_disable(bp);
15731
15732 bnxt_rdma_aux_device_del(bp);
15733
15734 bnxt_ptp_clear(bp);
15735 unregister_netdev(dev);
15736
15737 bnxt_rdma_aux_device_uninit(bp);
15738
15739 bnxt_free_l2_filters(bp, true);
15740 bnxt_free_ntp_fltrs(bp, true);
15741 WARN_ON(bp->num_rss_ctx);
15742 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15743 /* Flush any pending tasks */
15744 cancel_work_sync(&bp->sp_task);
15745 cancel_delayed_work_sync(&bp->fw_reset_task);
15746 bp->sp_event = 0;
15747
15748 bnxt_dl_fw_reporters_destroy(bp);
15749 bnxt_dl_unregister(bp);
15750 bnxt_shutdown_tc(bp);
15751
15752 bnxt_clear_int_mode(bp);
15753 bnxt_hwrm_func_drv_unrgtr(bp);
15754 bnxt_free_hwrm_resources(bp);
15755 bnxt_hwmon_uninit(bp);
15756 bnxt_ethtool_free(bp);
15757 bnxt_dcb_free(bp);
15758 kfree(bp->ptp_cfg);
15759 bp->ptp_cfg = NULL;
15760 kfree(bp->fw_health);
15761 bp->fw_health = NULL;
15762 bnxt_cleanup_pci(bp);
15763 bnxt_free_ctx_mem(bp, true);
15764 bnxt_free_crash_dump_mem(bp);
15765 kfree(bp->rss_indir_tbl);
15766 bp->rss_indir_tbl = NULL;
15767 bnxt_free_port_stats(bp);
15768 free_netdev(dev);
15769 }
15770
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)15771 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
15772 {
15773 int rc = 0;
15774 struct bnxt_link_info *link_info = &bp->link_info;
15775
15776 bp->phy_flags = 0;
15777 rc = bnxt_hwrm_phy_qcaps(bp);
15778 if (rc) {
15779 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
15780 rc);
15781 return rc;
15782 }
15783 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
15784 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
15785 else
15786 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
15787
15788 bp->mac_flags = 0;
15789 bnxt_hwrm_mac_qcaps(bp);
15790
15791 if (!fw_dflt)
15792 return 0;
15793
15794 mutex_lock(&bp->link_lock);
15795 rc = bnxt_update_link(bp, false);
15796 if (rc) {
15797 mutex_unlock(&bp->link_lock);
15798 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
15799 rc);
15800 return rc;
15801 }
15802
15803 /* Older firmware does not have supported_auto_speeds, so assume
15804 * that all supported speeds can be autonegotiated.
15805 */
15806 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
15807 link_info->support_auto_speeds = link_info->support_speeds;
15808
15809 bnxt_init_ethtool_link_settings(bp);
15810 mutex_unlock(&bp->link_lock);
15811 return 0;
15812 }
15813
bnxt_get_max_irq(struct pci_dev * pdev)15814 static int bnxt_get_max_irq(struct pci_dev *pdev)
15815 {
15816 u16 ctrl;
15817
15818 if (!pdev->msix_cap)
15819 return 1;
15820
15821 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
15822 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
15823 }
15824
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)15825 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15826 int *max_cp)
15827 {
15828 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
15829 int max_ring_grps = 0, max_irq;
15830
15831 *max_tx = hw_resc->max_tx_rings;
15832 *max_rx = hw_resc->max_rx_rings;
15833 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
15834 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
15835 bnxt_get_ulp_msix_num_in_use(bp),
15836 hw_resc->max_stat_ctxs -
15837 bnxt_get_ulp_stat_ctxs_in_use(bp));
15838 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
15839 *max_cp = min_t(int, *max_cp, max_irq);
15840 max_ring_grps = hw_resc->max_hw_ring_grps;
15841 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
15842 *max_cp -= 1;
15843 *max_rx -= 2;
15844 }
15845 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15846 *max_rx >>= 1;
15847 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
15848 int rc;
15849
15850 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
15851 if (rc) {
15852 *max_rx = 0;
15853 *max_tx = 0;
15854 }
15855 /* On P5 chips, max_cp output param should be available NQs */
15856 *max_cp = max_irq;
15857 }
15858 *max_rx = min_t(int, *max_rx, max_ring_grps);
15859 }
15860
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15861 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
15862 {
15863 int rx, tx, cp;
15864
15865 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
15866 *max_rx = rx;
15867 *max_tx = tx;
15868 if (!rx || !tx || !cp)
15869 return -ENOMEM;
15870
15871 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
15872 }
15873
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15874 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15875 bool shared)
15876 {
15877 int rc;
15878
15879 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15880 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
15881 /* Not enough rings, try disabling agg rings. */
15882 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
15883 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15884 if (rc) {
15885 /* set BNXT_FLAG_AGG_RINGS back for consistency */
15886 bp->flags |= BNXT_FLAG_AGG_RINGS;
15887 return rc;
15888 }
15889 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
15890 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15891 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15892 bnxt_set_ring_params(bp);
15893 }
15894
15895 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
15896 int max_cp, max_stat, max_irq;
15897
15898 /* Reserve minimum resources for RoCE */
15899 max_cp = bnxt_get_max_func_cp_rings(bp);
15900 max_stat = bnxt_get_max_func_stat_ctxs(bp);
15901 max_irq = bnxt_get_max_func_irqs(bp);
15902 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
15903 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
15904 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
15905 return 0;
15906
15907 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
15908 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
15909 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
15910 max_cp = min_t(int, max_cp, max_irq);
15911 max_cp = min_t(int, max_cp, max_stat);
15912 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
15913 if (rc)
15914 rc = 0;
15915 }
15916 return rc;
15917 }
15918
15919 /* In initial default shared ring setting, each shared ring must have a
15920 * RX/TX ring pair.
15921 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)15922 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
15923 {
15924 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
15925 bp->rx_nr_rings = bp->cp_nr_rings;
15926 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
15927 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15928 }
15929
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)15930 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
15931 {
15932 int dflt_rings, max_rx_rings, max_tx_rings, rc;
15933 int avail_msix;
15934
15935 if (!bnxt_can_reserve_rings(bp))
15936 return 0;
15937
15938 if (sh)
15939 bp->flags |= BNXT_FLAG_SHARED_RINGS;
15940 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
15941 /* Reduce default rings on multi-port cards so that total default
15942 * rings do not exceed CPU count.
15943 */
15944 if (bp->port_count > 1) {
15945 int max_rings =
15946 max_t(int, num_online_cpus() / bp->port_count, 1);
15947
15948 dflt_rings = min_t(int, dflt_rings, max_rings);
15949 }
15950 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
15951 if (rc)
15952 return rc;
15953 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
15954 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
15955 if (sh)
15956 bnxt_trim_dflt_sh_rings(bp);
15957 else
15958 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
15959 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15960
15961 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
15962 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
15963 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
15964
15965 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
15966 bnxt_set_dflt_ulp_stat_ctxs(bp);
15967 }
15968
15969 rc = __bnxt_reserve_rings(bp);
15970 if (rc && rc != -ENODEV)
15971 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
15972 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15973 if (sh)
15974 bnxt_trim_dflt_sh_rings(bp);
15975
15976 /* Rings may have been trimmed, re-reserve the trimmed rings. */
15977 if (bnxt_need_reserve_rings(bp)) {
15978 rc = __bnxt_reserve_rings(bp);
15979 if (rc && rc != -ENODEV)
15980 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
15981 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15982 }
15983 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
15984 bp->rx_nr_rings++;
15985 bp->cp_nr_rings++;
15986 }
15987 if (rc) {
15988 bp->tx_nr_rings = 0;
15989 bp->rx_nr_rings = 0;
15990 }
15991 return rc;
15992 }
15993
bnxt_init_dflt_ring_mode(struct bnxt * bp)15994 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
15995 {
15996 int rc;
15997
15998 if (bp->tx_nr_rings)
15999 return 0;
16000
16001 bnxt_ulp_irq_stop(bp);
16002 bnxt_clear_int_mode(bp);
16003 rc = bnxt_set_dflt_rings(bp, true);
16004 if (rc) {
16005 if (BNXT_VF(bp) && rc == -ENODEV)
16006 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16007 else
16008 netdev_err(bp->dev, "Not enough rings available.\n");
16009 goto init_dflt_ring_err;
16010 }
16011 rc = bnxt_init_int_mode(bp);
16012 if (rc)
16013 goto init_dflt_ring_err;
16014
16015 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16016
16017 bnxt_set_dflt_rfs(bp);
16018
16019 init_dflt_ring_err:
16020 bnxt_ulp_irq_restart(bp, rc);
16021 return rc;
16022 }
16023
bnxt_restore_pf_fw_resources(struct bnxt * bp)16024 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16025 {
16026 int rc;
16027
16028 ASSERT_RTNL();
16029 bnxt_hwrm_func_qcaps(bp);
16030
16031 if (netif_running(bp->dev))
16032 __bnxt_close_nic(bp, true, false);
16033
16034 bnxt_ulp_irq_stop(bp);
16035 bnxt_clear_int_mode(bp);
16036 rc = bnxt_init_int_mode(bp);
16037 bnxt_ulp_irq_restart(bp, rc);
16038
16039 if (netif_running(bp->dev)) {
16040 if (rc)
16041 dev_close(bp->dev);
16042 else
16043 rc = bnxt_open_nic(bp, true, false);
16044 }
16045
16046 return rc;
16047 }
16048
bnxt_init_mac_addr(struct bnxt * bp)16049 static int bnxt_init_mac_addr(struct bnxt *bp)
16050 {
16051 int rc = 0;
16052
16053 if (BNXT_PF(bp)) {
16054 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16055 } else {
16056 #ifdef CONFIG_BNXT_SRIOV
16057 struct bnxt_vf_info *vf = &bp->vf;
16058 bool strict_approval = true;
16059
16060 if (is_valid_ether_addr(vf->mac_addr)) {
16061 /* overwrite netdev dev_addr with admin VF MAC */
16062 eth_hw_addr_set(bp->dev, vf->mac_addr);
16063 /* Older PF driver or firmware may not approve this
16064 * correctly.
16065 */
16066 strict_approval = false;
16067 } else {
16068 eth_hw_addr_random(bp->dev);
16069 }
16070 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16071 #endif
16072 }
16073 return rc;
16074 }
16075
bnxt_vpd_read_info(struct bnxt * bp)16076 static void bnxt_vpd_read_info(struct bnxt *bp)
16077 {
16078 struct pci_dev *pdev = bp->pdev;
16079 unsigned int vpd_size, kw_len;
16080 int pos, size;
16081 u8 *vpd_data;
16082
16083 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16084 if (IS_ERR(vpd_data)) {
16085 pci_warn(pdev, "Unable to read VPD\n");
16086 return;
16087 }
16088
16089 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16090 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16091 if (pos < 0)
16092 goto read_sn;
16093
16094 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16095 memcpy(bp->board_partno, &vpd_data[pos], size);
16096
16097 read_sn:
16098 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16099 PCI_VPD_RO_KEYWORD_SERIALNO,
16100 &kw_len);
16101 if (pos < 0)
16102 goto exit;
16103
16104 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16105 memcpy(bp->board_serialno, &vpd_data[pos], size);
16106 exit:
16107 kfree(vpd_data);
16108 }
16109
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16110 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16111 {
16112 struct pci_dev *pdev = bp->pdev;
16113 u64 qword;
16114
16115 qword = pci_get_dsn(pdev);
16116 if (!qword) {
16117 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16118 return -EOPNOTSUPP;
16119 }
16120
16121 put_unaligned_le64(qword, dsn);
16122
16123 bp->flags |= BNXT_FLAG_DSN_VALID;
16124 return 0;
16125 }
16126
bnxt_map_db_bar(struct bnxt * bp)16127 static int bnxt_map_db_bar(struct bnxt *bp)
16128 {
16129 if (!bp->db_size)
16130 return -ENODEV;
16131 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16132 if (!bp->bar1)
16133 return -ENOMEM;
16134 return 0;
16135 }
16136
bnxt_print_device_info(struct bnxt * bp)16137 void bnxt_print_device_info(struct bnxt *bp)
16138 {
16139 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16140 board_info[bp->board_idx].name,
16141 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16142
16143 pcie_print_link_status(bp->pdev);
16144 }
16145
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16146 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16147 {
16148 struct bnxt_hw_resc *hw_resc;
16149 struct net_device *dev;
16150 struct bnxt *bp;
16151 int rc, max_irqs;
16152
16153 if (pci_is_bridge(pdev))
16154 return -ENODEV;
16155
16156 if (!pdev->msix_cap) {
16157 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16158 return -ENODEV;
16159 }
16160
16161 /* Clear any pending DMA transactions from crash kernel
16162 * while loading driver in capture kernel.
16163 */
16164 if (is_kdump_kernel()) {
16165 pci_clear_master(pdev);
16166 pcie_flr(pdev);
16167 }
16168
16169 max_irqs = bnxt_get_max_irq(pdev);
16170 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16171 max_irqs);
16172 if (!dev)
16173 return -ENOMEM;
16174
16175 bp = netdev_priv(dev);
16176 bp->board_idx = ent->driver_data;
16177 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16178 bnxt_set_max_func_irqs(bp, max_irqs);
16179
16180 if (bnxt_vf_pciid(bp->board_idx))
16181 bp->flags |= BNXT_FLAG_VF;
16182
16183 /* No devlink port registration in case of a VF */
16184 if (BNXT_PF(bp))
16185 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16186
16187 rc = bnxt_init_board(pdev, dev);
16188 if (rc < 0)
16189 goto init_err_free;
16190
16191 dev->netdev_ops = &bnxt_netdev_ops;
16192 dev->stat_ops = &bnxt_stat_ops;
16193 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16194 dev->ethtool_ops = &bnxt_ethtool_ops;
16195 pci_set_drvdata(pdev, dev);
16196
16197 rc = bnxt_alloc_hwrm_resources(bp);
16198 if (rc)
16199 goto init_err_pci_clean;
16200
16201 mutex_init(&bp->hwrm_cmd_lock);
16202 mutex_init(&bp->link_lock);
16203
16204 rc = bnxt_fw_init_one_p1(bp);
16205 if (rc)
16206 goto init_err_pci_clean;
16207
16208 if (BNXT_PF(bp))
16209 bnxt_vpd_read_info(bp);
16210
16211 if (BNXT_CHIP_P5_PLUS(bp)) {
16212 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16213 if (BNXT_CHIP_P7(bp))
16214 bp->flags |= BNXT_FLAG_CHIP_P7;
16215 }
16216
16217 rc = bnxt_alloc_rss_indir_tbl(bp);
16218 if (rc)
16219 goto init_err_pci_clean;
16220
16221 rc = bnxt_fw_init_one_p2(bp);
16222 if (rc)
16223 goto init_err_pci_clean;
16224
16225 rc = bnxt_map_db_bar(bp);
16226 if (rc) {
16227 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16228 rc);
16229 goto init_err_pci_clean;
16230 }
16231
16232 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16233 NETIF_F_TSO | NETIF_F_TSO6 |
16234 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16235 NETIF_F_GSO_IPXIP4 |
16236 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16237 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16238 NETIF_F_RXCSUM | NETIF_F_GRO;
16239 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16240 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16241
16242 if (BNXT_SUPPORTS_TPA(bp))
16243 dev->hw_features |= NETIF_F_LRO;
16244
16245 dev->hw_enc_features =
16246 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16247 NETIF_F_TSO | NETIF_F_TSO6 |
16248 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16249 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16250 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16251 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16252 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16253 if (bp->flags & BNXT_FLAG_CHIP_P7)
16254 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16255 else
16256 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16257
16258 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16259 NETIF_F_GSO_GRE_CSUM;
16260 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16261 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16262 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16263 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16264 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16265 if (BNXT_SUPPORTS_TPA(bp))
16266 dev->hw_features |= NETIF_F_GRO_HW;
16267 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16268 if (dev->features & NETIF_F_GRO_HW)
16269 dev->features &= ~NETIF_F_LRO;
16270 dev->priv_flags |= IFF_UNICAST_FLT;
16271
16272 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16273 if (bp->tso_max_segs)
16274 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16275
16276 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16277 NETDEV_XDP_ACT_RX_SG;
16278
16279 #ifdef CONFIG_BNXT_SRIOV
16280 init_waitqueue_head(&bp->sriov_cfg_wait);
16281 #endif
16282 if (BNXT_SUPPORTS_TPA(bp)) {
16283 bp->gro_func = bnxt_gro_func_5730x;
16284 if (BNXT_CHIP_P4(bp))
16285 bp->gro_func = bnxt_gro_func_5731x;
16286 else if (BNXT_CHIP_P5_PLUS(bp))
16287 bp->gro_func = bnxt_gro_func_5750x;
16288 }
16289 if (!BNXT_CHIP_P4_PLUS(bp))
16290 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16291
16292 rc = bnxt_init_mac_addr(bp);
16293 if (rc) {
16294 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16295 rc = -EADDRNOTAVAIL;
16296 goto init_err_pci_clean;
16297 }
16298
16299 if (BNXT_PF(bp)) {
16300 /* Read the adapter's DSN to use as the eswitch switch_id */
16301 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16302 }
16303
16304 /* MTU range: 60 - FW defined max */
16305 dev->min_mtu = ETH_ZLEN;
16306 dev->max_mtu = bp->max_mtu;
16307
16308 rc = bnxt_probe_phy(bp, true);
16309 if (rc)
16310 goto init_err_pci_clean;
16311
16312 hw_resc = &bp->hw_resc;
16313 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16314 BNXT_L2_FLTR_MAX_FLTR;
16315 /* Older firmware may not report these filters properly */
16316 if (bp->max_fltr < BNXT_MAX_FLTR)
16317 bp->max_fltr = BNXT_MAX_FLTR;
16318 bnxt_init_l2_fltr_tbl(bp);
16319 __bnxt_set_rx_skb_mode(bp, false);
16320 bnxt_set_tpa_flags(bp);
16321 bnxt_init_ring_params(bp);
16322 bnxt_set_ring_params(bp);
16323 bnxt_rdma_aux_device_init(bp);
16324 rc = bnxt_set_dflt_rings(bp, true);
16325 if (rc) {
16326 if (BNXT_VF(bp) && rc == -ENODEV) {
16327 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16328 } else {
16329 netdev_err(bp->dev, "Not enough rings available.\n");
16330 rc = -ENOMEM;
16331 }
16332 goto init_err_pci_clean;
16333 }
16334
16335 bnxt_fw_init_one_p3(bp);
16336
16337 bnxt_init_dflt_coal(bp);
16338
16339 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16340 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16341
16342 rc = bnxt_init_int_mode(bp);
16343 if (rc)
16344 goto init_err_pci_clean;
16345
16346 /* No TC has been set yet and rings may have been trimmed due to
16347 * limited MSIX, so we re-initialize the TX rings per TC.
16348 */
16349 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16350
16351 if (BNXT_PF(bp)) {
16352 if (!bnxt_pf_wq) {
16353 bnxt_pf_wq =
16354 create_singlethread_workqueue("bnxt_pf_wq");
16355 if (!bnxt_pf_wq) {
16356 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16357 rc = -ENOMEM;
16358 goto init_err_pci_clean;
16359 }
16360 }
16361 rc = bnxt_init_tc(bp);
16362 if (rc)
16363 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16364 rc);
16365 }
16366
16367 bnxt_inv_fw_health_reg(bp);
16368 rc = bnxt_dl_register(bp);
16369 if (rc)
16370 goto init_err_dl;
16371
16372 INIT_LIST_HEAD(&bp->usr_fltr_list);
16373
16374 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16375 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16376 if (BNXT_SUPPORTS_QUEUE_API(bp))
16377 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16378
16379 rc = register_netdev(dev);
16380 if (rc)
16381 goto init_err_cleanup;
16382
16383 bnxt_dl_fw_reporters_create(bp);
16384
16385 bnxt_rdma_aux_device_add(bp);
16386
16387 bnxt_print_device_info(bp);
16388
16389 pci_save_state(pdev);
16390
16391 return 0;
16392 init_err_cleanup:
16393 bnxt_rdma_aux_device_uninit(bp);
16394 bnxt_dl_unregister(bp);
16395 init_err_dl:
16396 bnxt_shutdown_tc(bp);
16397 bnxt_clear_int_mode(bp);
16398
16399 init_err_pci_clean:
16400 bnxt_hwrm_func_drv_unrgtr(bp);
16401 bnxt_free_hwrm_resources(bp);
16402 bnxt_hwmon_uninit(bp);
16403 bnxt_ethtool_free(bp);
16404 bnxt_ptp_clear(bp);
16405 kfree(bp->ptp_cfg);
16406 bp->ptp_cfg = NULL;
16407 kfree(bp->fw_health);
16408 bp->fw_health = NULL;
16409 bnxt_cleanup_pci(bp);
16410 bnxt_free_ctx_mem(bp, true);
16411 bnxt_free_crash_dump_mem(bp);
16412 kfree(bp->rss_indir_tbl);
16413 bp->rss_indir_tbl = NULL;
16414
16415 init_err_free:
16416 free_netdev(dev);
16417 return rc;
16418 }
16419
bnxt_shutdown(struct pci_dev * pdev)16420 static void bnxt_shutdown(struct pci_dev *pdev)
16421 {
16422 struct net_device *dev = pci_get_drvdata(pdev);
16423 struct bnxt *bp;
16424
16425 if (!dev)
16426 return;
16427
16428 rtnl_lock();
16429 bp = netdev_priv(dev);
16430 if (!bp)
16431 goto shutdown_exit;
16432
16433 if (netif_running(dev))
16434 dev_close(dev);
16435
16436 bnxt_ptp_clear(bp);
16437 bnxt_clear_int_mode(bp);
16438 pci_disable_device(pdev);
16439
16440 if (system_state == SYSTEM_POWER_OFF) {
16441 pci_wake_from_d3(pdev, bp->wol);
16442 pci_set_power_state(pdev, PCI_D3hot);
16443 }
16444
16445 shutdown_exit:
16446 rtnl_unlock();
16447 }
16448
16449 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)16450 static int bnxt_suspend(struct device *device)
16451 {
16452 struct net_device *dev = dev_get_drvdata(device);
16453 struct bnxt *bp = netdev_priv(dev);
16454 int rc = 0;
16455
16456 bnxt_ulp_stop(bp);
16457
16458 rtnl_lock();
16459 if (netif_running(dev)) {
16460 netif_device_detach(dev);
16461 rc = bnxt_close(dev);
16462 }
16463 bnxt_hwrm_func_drv_unrgtr(bp);
16464 bnxt_ptp_clear(bp);
16465 pci_disable_device(bp->pdev);
16466 bnxt_free_ctx_mem(bp, false);
16467 rtnl_unlock();
16468 return rc;
16469 }
16470
bnxt_resume(struct device * device)16471 static int bnxt_resume(struct device *device)
16472 {
16473 struct net_device *dev = dev_get_drvdata(device);
16474 struct bnxt *bp = netdev_priv(dev);
16475 int rc = 0;
16476
16477 rtnl_lock();
16478 rc = pci_enable_device(bp->pdev);
16479 if (rc) {
16480 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16481 rc);
16482 goto resume_exit;
16483 }
16484 pci_set_master(bp->pdev);
16485 if (bnxt_hwrm_ver_get(bp)) {
16486 rc = -ENODEV;
16487 goto resume_exit;
16488 }
16489 rc = bnxt_hwrm_func_reset(bp);
16490 if (rc) {
16491 rc = -EBUSY;
16492 goto resume_exit;
16493 }
16494
16495 rc = bnxt_hwrm_func_qcaps(bp);
16496 if (rc)
16497 goto resume_exit;
16498
16499 bnxt_clear_reservations(bp, true);
16500
16501 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16502 rc = -ENODEV;
16503 goto resume_exit;
16504 }
16505 if (bp->fw_crash_mem)
16506 bnxt_hwrm_crash_dump_mem_cfg(bp);
16507
16508 if (bnxt_ptp_init(bp)) {
16509 kfree(bp->ptp_cfg);
16510 bp->ptp_cfg = NULL;
16511 }
16512 bnxt_get_wol_settings(bp);
16513 if (netif_running(dev)) {
16514 rc = bnxt_open(dev);
16515 if (!rc)
16516 netif_device_attach(dev);
16517 }
16518
16519 resume_exit:
16520 rtnl_unlock();
16521 bnxt_ulp_start(bp, rc);
16522 if (!rc)
16523 bnxt_reenable_sriov(bp);
16524 return rc;
16525 }
16526
16527 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16528 #define BNXT_PM_OPS (&bnxt_pm_ops)
16529
16530 #else
16531
16532 #define BNXT_PM_OPS NULL
16533
16534 #endif /* CONFIG_PM_SLEEP */
16535
16536 /**
16537 * bnxt_io_error_detected - called when PCI error is detected
16538 * @pdev: Pointer to PCI device
16539 * @state: The current pci connection state
16540 *
16541 * This function is called after a PCI bus error affecting
16542 * this device has been detected.
16543 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)16544 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
16545 pci_channel_state_t state)
16546 {
16547 struct net_device *netdev = pci_get_drvdata(pdev);
16548 struct bnxt *bp = netdev_priv(netdev);
16549 bool abort = false;
16550
16551 netdev_info(netdev, "PCI I/O error detected\n");
16552
16553 bnxt_ulp_stop(bp);
16554
16555 rtnl_lock();
16556 netif_device_detach(netdev);
16557
16558 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
16559 netdev_err(bp->dev, "Firmware reset already in progress\n");
16560 abort = true;
16561 }
16562
16563 if (abort || state == pci_channel_io_perm_failure) {
16564 rtnl_unlock();
16565 return PCI_ERS_RESULT_DISCONNECT;
16566 }
16567
16568 /* Link is not reliable anymore if state is pci_channel_io_frozen
16569 * so we disable bus master to prevent any potential bad DMAs before
16570 * freeing kernel memory.
16571 */
16572 if (state == pci_channel_io_frozen) {
16573 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
16574 bnxt_fw_fatal_close(bp);
16575 }
16576
16577 if (netif_running(netdev))
16578 __bnxt_close_nic(bp, true, true);
16579
16580 if (pci_is_enabled(pdev))
16581 pci_disable_device(pdev);
16582 bnxt_free_ctx_mem(bp, false);
16583 rtnl_unlock();
16584
16585 /* Request a slot slot reset. */
16586 return PCI_ERS_RESULT_NEED_RESET;
16587 }
16588
16589 /**
16590 * bnxt_io_slot_reset - called after the pci bus has been reset.
16591 * @pdev: Pointer to PCI device
16592 *
16593 * Restart the card from scratch, as if from a cold-boot.
16594 * At this point, the card has experienced a hard reset,
16595 * followed by fixups by BIOS, and has its config space
16596 * set up identically to what it was at cold boot.
16597 */
bnxt_io_slot_reset(struct pci_dev * pdev)16598 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
16599 {
16600 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
16601 struct net_device *netdev = pci_get_drvdata(pdev);
16602 struct bnxt *bp = netdev_priv(netdev);
16603 int retry = 0;
16604 int err = 0;
16605 int off;
16606
16607 netdev_info(bp->dev, "PCI Slot Reset\n");
16608
16609 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16610 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
16611 msleep(900);
16612
16613 rtnl_lock();
16614
16615 if (pci_enable_device(pdev)) {
16616 dev_err(&pdev->dev,
16617 "Cannot re-enable PCI device after reset.\n");
16618 } else {
16619 pci_set_master(pdev);
16620 /* Upon fatal error, our device internal logic that latches to
16621 * BAR value is getting reset and will restore only upon
16622 * rewriting the BARs.
16623 *
16624 * As pci_restore_state() does not re-write the BARs if the
16625 * value is same as saved value earlier, driver needs to
16626 * write the BARs to 0 to force restore, in case of fatal error.
16627 */
16628 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
16629 &bp->state)) {
16630 for (off = PCI_BASE_ADDRESS_0;
16631 off <= PCI_BASE_ADDRESS_5; off += 4)
16632 pci_write_config_dword(bp->pdev, off, 0);
16633 }
16634 pci_restore_state(pdev);
16635 pci_save_state(pdev);
16636
16637 bnxt_inv_fw_health_reg(bp);
16638 bnxt_try_map_fw_health_reg(bp);
16639
16640 /* In some PCIe AER scenarios, firmware may take up to
16641 * 10 seconds to become ready in the worst case.
16642 */
16643 do {
16644 err = bnxt_try_recover_fw(bp);
16645 if (!err)
16646 break;
16647 retry++;
16648 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
16649
16650 if (err) {
16651 dev_err(&pdev->dev, "Firmware not ready\n");
16652 goto reset_exit;
16653 }
16654
16655 err = bnxt_hwrm_func_reset(bp);
16656 if (!err)
16657 result = PCI_ERS_RESULT_RECOVERED;
16658
16659 bnxt_ulp_irq_stop(bp);
16660 bnxt_clear_int_mode(bp);
16661 err = bnxt_init_int_mode(bp);
16662 bnxt_ulp_irq_restart(bp, err);
16663 }
16664
16665 reset_exit:
16666 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16667 bnxt_clear_reservations(bp, true);
16668 rtnl_unlock();
16669
16670 return result;
16671 }
16672
16673 /**
16674 * bnxt_io_resume - called when traffic can start flowing again.
16675 * @pdev: Pointer to PCI device
16676 *
16677 * This callback is called when the error recovery driver tells
16678 * us that its OK to resume normal operation.
16679 */
bnxt_io_resume(struct pci_dev * pdev)16680 static void bnxt_io_resume(struct pci_dev *pdev)
16681 {
16682 struct net_device *netdev = pci_get_drvdata(pdev);
16683 struct bnxt *bp = netdev_priv(netdev);
16684 int err;
16685
16686 netdev_info(bp->dev, "PCI Slot Resume\n");
16687 rtnl_lock();
16688
16689 err = bnxt_hwrm_func_qcaps(bp);
16690 if (!err) {
16691 if (netif_running(netdev))
16692 err = bnxt_open(netdev);
16693 else
16694 err = bnxt_reserve_rings(bp, true);
16695 }
16696
16697 if (!err)
16698 netif_device_attach(netdev);
16699
16700 rtnl_unlock();
16701 bnxt_ulp_start(bp, err);
16702 if (!err)
16703 bnxt_reenable_sriov(bp);
16704 }
16705
16706 static const struct pci_error_handlers bnxt_err_handler = {
16707 .error_detected = bnxt_io_error_detected,
16708 .slot_reset = bnxt_io_slot_reset,
16709 .resume = bnxt_io_resume
16710 };
16711
16712 static struct pci_driver bnxt_pci_driver = {
16713 .name = DRV_MODULE_NAME,
16714 .id_table = bnxt_pci_tbl,
16715 .probe = bnxt_init_one,
16716 .remove = bnxt_remove_one,
16717 .shutdown = bnxt_shutdown,
16718 .driver.pm = BNXT_PM_OPS,
16719 .err_handler = &bnxt_err_handler,
16720 #if defined(CONFIG_BNXT_SRIOV)
16721 .sriov_configure = bnxt_sriov_configure,
16722 #endif
16723 };
16724
bnxt_init(void)16725 static int __init bnxt_init(void)
16726 {
16727 int err;
16728
16729 bnxt_debug_init();
16730 err = pci_register_driver(&bnxt_pci_driver);
16731 if (err) {
16732 bnxt_debug_exit();
16733 return err;
16734 }
16735
16736 return 0;
16737 }
16738
bnxt_exit(void)16739 static void __exit bnxt_exit(void)
16740 {
16741 pci_unregister_driver(&bnxt_pci_driver);
16742 if (bnxt_pf_wq)
16743 destroy_workqueue(bnxt_pf_wq);
16744 bnxt_debug_exit();
16745 }
16746
16747 module_init(bnxt_init);
16748 module_exit(bnxt_exit);
16749