Lines Matching +full:0 +full:x1806
144 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
145 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
146 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
147 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
148 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
149 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
150 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
151 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
153 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
154 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
155 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
156 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
157 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
158 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
159 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
160 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
161 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
162 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
163 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
164 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
166 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
167 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
168 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
171 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
178 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
179 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
180 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
181 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
182 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
183 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
184 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
185 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
186 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
196 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
197 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
198 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
200 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
211 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
212 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
213 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
214 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
218 { 0 }
266 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
267 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
268 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
272 .src = 0,
273 .dst = 0,
285 .src = cpu_to_be16(0xffff),
286 .dst = cpu_to_be16(0xffff),
298 .src = cpu_to_be16(0xffff),
299 .dst = cpu_to_be16(0xffff),
303 .src = cpu_to_be32(0xffffffff),
304 .dst = cpu_to_be32(0xffffffff),
400 rxr->rx_next_cons = 0xffff; in bnxt_sched_reset_rxr()
446 return 0; in bnxt_xmit_get_cfa_action()
457 txr->kick_pending = 0; in bnxt_txr_db_kick()
468 unsigned int length, pad = 0; in bnxt_start_xmit()
475 __le32 lflags = 0; in bnxt_start_xmit()
523 vlan_tag_flags = 0; in bnxt_start_xmit()
584 tx_push1->tx_bd_hsize_lflags = 0; in bnxt_start_xmit()
592 *end = 0; in bnxt_start_xmit()
596 for (j = 0; j < last_frag; j++) { in bnxt_start_xmit()
689 txbd1->tx_bd_mss = 0; in bnxt_start_xmit()
696 i = 0; in bnxt_start_xmit()
706 for (i = 0; i < last_frag; i++) { in bnxt_start_xmit()
713 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, in bnxt_start_xmit()
774 for (i = 0; i < last_frag; i++) { in bnxt_start_xmit()
786 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; in bnxt_start_xmit()
806 unsigned int tx_bytes = 0; in __bnxt_tx_int()
808 int tx_pkts = 0; in __bnxt_tx_int()
835 tx_buf->is_ts_pkt = 0; in __bnxt_tx_int()
838 tx_buf->is_push = 0; in __bnxt_tx_int()
846 for (j = 0; j < last; j++) { in __bnxt_tx_int()
909 *offset = 0; in __bnxt_alloc_rx_page()
964 return 0; in bnxt_alloc_rx_data()
1008 unsigned int offset = 0; in bnxt_alloc_rx_page()
1027 return 0; in bnxt_alloc_rx_page()
1065 for (i = 0; i < agg_bufs; i++) { in bnxt_reuse_rx_agg_bufs()
1114 unsigned int len = offset_and_len & 0xffff; in bnxt_rx_multi_page_skb()
1147 unsigned int len = offset_and_len & 0xffff; in bnxt_rx_page_skb()
1174 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); in bnxt_rx_page_skb()
1178 frag = &skb_shinfo(skb)->frags[0]; in bnxt_rx_page_skb()
1213 skb_put(skb, offset_and_len & 0xffff); in bnxt_rx_skb()
1227 u32 i, total_frag_len = 0; in __bnxt_rx_agg_pages()
1233 for (i = 0; i < agg_bufs; i++) { in __bnxt_rx_agg_pages()
1266 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { in __bnxt_rx_agg_pages()
1275 return 0; in __bnxt_rx_agg_pages()
1294 u32 total_frag_len = 0; in bnxt_rx_agg_pages_skb()
1316 u32 total_frag_len = 0; in bnxt_rx_agg_pages_xdp()
1319 shinfo->nr_frags = 0; in bnxt_rx_agg_pages_xdp()
1382 unsigned int metasize = 0; in bnxt_copy_xdp()
1407 u8 cmp_type, agg_bufs = 0; in bnxt_discard_rx()
1419 return 0; in bnxt_discard_rx()
1429 return 0; in bnxt_discard_rx()
1465 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata()
1477 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata_v2()
1543 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ in bnxt_tpa_start()
1551 tpa_info->gso_type = 0; in bnxt_tpa_start()
1560 tpa_info->agg_count = 0; in bnxt_tpa_start()
1575 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); in bnxt_abort_tpa()
1649 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5731x()
1656 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5731x()
1707 int len, nw_off, tcp_opt_len = 0; in bnxt_gro_func_5730x()
1722 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5730x()
1733 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5730x()
1800 u16 idx = 0, agg_id; in bnxt_tpa_end()
1807 if (rc < 0) in bnxt_tpa_end()
1822 tpa_info->agg_count = 0; in bnxt_tpa_end()
2037 * 0 - successful TPA_START, packet not completed yet
2063 int rc = 0; in bnxt_rx_pkt()
2119 /* 0xffff is forced error, don't print it */ in bnxt_rx_pkt()
2120 if (rxr->rx_next_cons != 0xffff) in bnxt_rx_pkt()
2151 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, in bnxt_rx_pkt()
2193 agg_bufs = 0; in bnxt_rx_pkt()
2206 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, in bnxt_rx_pkt()
2219 payload = 0; in bnxt_rx_pkt()
2290 memset(skb_hwtstamps(skb), 0, in bnxt_rx_pkt()
2370 u32 reg_type, reg_off, val = 0; in bnxt_fw_health_readl()
2397 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_agg_ring_id_to_grp_idx()
2524 rmem = &ctx_pg[0].ring_mem; in bnxt_bs_trace_init()
2531 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem; in bnxt_bs_trace_init()
2664 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", in bnxt_async_event_process()
2677 (data1 & 0x20000)) { in bnxt_async_event_process()
2741 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", in bnxt_async_event_process()
2775 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", in bnxt_async_event_process()
2789 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", in bnxt_async_event_process()
2799 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", in bnxt_async_event_process()
2806 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", in bnxt_async_event_process()
2818 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", in bnxt_async_event_process()
2860 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; in bnxt_async_event_process()
2878 return 0; in bnxt_async_event_process()
2917 return 0; in bnxt_hwrm_handler()
2922 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_vnic_is_active()
2924 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; in bnxt_vnic_is_active()
2957 int rx_pkts = 0; in __bnxt_poll_work()
2958 u8 event = 0; in __bnxt_poll_work()
2961 cpr->has_more_work = 0; in __bnxt_poll_work()
3009 if (likely(rc >= 0)) in __bnxt_poll_work()
3039 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; in __bnxt_poll_work()
3103 u32 rx_pkts = 0; in bnxt_poll_nitroa0()
3104 u8 event = 0; in bnxt_poll_nitroa0()
3119 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { in bnxt_poll_nitroa0()
3173 int work_done = 0; in bnxt_poll()
3177 return 0; in bnxt_poll()
3209 int i, work_done = 0; in __bnxt_poll_cqs()
3211 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs()
3229 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs_done()
3234 u32 tgl = 0; in __bnxt_poll_cqs_done()
3237 cpr2->had_nqe_notify = 0; in __bnxt_poll_cqs_done()
3245 cpr2->had_work_done = 0; in __bnxt_poll_cqs_done()
3259 int work_done = 0; in bnxt_poll_p5()
3264 return 0; in bnxt_poll_p5()
3267 cpr->has_more_work = 0; in bnxt_poll_p5()
3323 cpr_rx = &cpr->cp_ring_arr[0]; in bnxt_poll_p5()
3346 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_skbs()
3353 for (j = 0; j < max_idx;) { in bnxt_free_tx_skbs()
3365 tx_buf->action = 0; in bnxt_free_tx_skbs()
3392 for (k = 0; k < last; k++, j++) { in bnxt_free_tx_skbs()
3414 for (i = 0; i < max_idx; i++) { in bnxt_free_one_rx_ring()
3435 for (i = 0; i < max_idx; i++) { in bnxt_free_one_rx_agg_ring()
3454 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_tpa_info_data()
3491 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); in bnxt_free_one_rx_ring_skbs()
3501 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_free_rx_skbs()
3524 for (i = 0; i < len; i += ctxm->entry_size) in bnxt_init_ctx_mem()
3546 for (i = start_idx; rem_len; i++, source_offset = 0) { in __bnxt_copy_ring()
3565 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_free_ring()
3593 u64 valid_bit = 0; in bnxt_alloc_ring()
3598 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { in bnxt_alloc_ring()
3610 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_alloc_ring()
3623 if (rmem->nr_pages > 1 || rmem->depth > 0) { in bnxt_alloc_ring()
3640 return 0; in bnxt_alloc_ring()
3651 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_tpa_info()
3664 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_tpa_info()
3683 return 0; in bnxt_alloc_one_tpa_info()
3684 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_tpa_info()
3695 return 0; in bnxt_alloc_one_tpa_info()
3705 return 0; in bnxt_alloc_tpa_info()
3709 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_tpa_info()
3716 return 0; in bnxt_alloc_tpa_info()
3727 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_rings()
3757 struct page_pool_params pp = { 0 }; in bnxt_alloc_rx_page_pool()
3784 return 0; in bnxt_alloc_rx_page_pool()
3802 return 0; in bnxt_alloc_rx_agg_bmap()
3808 int i, rc = 0, agg_rings = 0, cpu; in bnxt_alloc_rx_rings()
3816 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rx_rings()
3831 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); in bnxt_alloc_rx_rings()
3832 if (rc < 0) in bnxt_alloc_rx_rings()
3873 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_rings()
3903 bp->tx_push_size = 0; in bnxt_alloc_tx_rings()
3911 push_size = 0; in bnxt_alloc_tx_rings()
3912 bp->tx_push_thresh = 0; in bnxt_alloc_tx_rings()
3918 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_tx_rings()
3956 return 0; in bnxt_alloc_tx_rings()
3980 return 0; in bnxt_alloc_cp_arrays()
3989 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_all_cp_arrays()
4002 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_all_cp_arrays()
4012 return 0; in bnxt_alloc_all_cp_arrays()
4022 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_cp_rings()
4039 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_free_cp_rings()
4048 cpr->cp_ring_count = 0; in bnxt_free_cp_rings()
4088 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_cp_rings()
4092 int cp_count = 0, k; in bnxt_alloc_cp_rings()
4093 int rx = 0, tx = 0; in bnxt_alloc_cp_rings()
4130 for (k = 0; k < cp_count; k++) { in bnxt_alloc_cp_rings()
4152 return 0; in bnxt_alloc_cp_rings()
4189 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); in bnxt_reset_rx_ring_struct()
4194 rmem->pg_tbl_map = 0; in bnxt_reset_rx_ring_struct()
4195 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_reset_rx_ring_struct()
4197 rmem->dma_arr[i] = 0; in bnxt_reset_rx_ring_struct()
4204 rmem->pg_tbl_map = 0; in bnxt_reset_rx_ring_struct()
4205 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_reset_rx_ring_struct()
4207 rmem->dma_arr[i] = 0; in bnxt_reset_rx_ring_struct()
4216 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_struct()
4234 rmem->vmem_size = 0; in bnxt_init_ring_struct()
4279 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { in bnxt_init_rxbd_pages()
4287 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { in bnxt_init_rxbd_pages()
4302 for (i = 0; i < bp->rx_ring_size; i++) { in bnxt_alloc_one_rx_ring_skb()
4321 for (i = 0; i < bp->rx_agg_ring_size; i++) { in bnxt_alloc_one_rx_ring_page()
4339 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_tpa_info_data()
4350 return 0; in bnxt_alloc_one_tpa_info_data()
4361 return 0; in bnxt_alloc_one_rx_ring()
4370 return 0; in bnxt_alloc_one_rx_ring()
4430 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_cp_rings()
4439 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_init_cp_rings()
4452 int i, rc = 0; in bnxt_init_rx_rings()
4462 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_init_rx_rings()
4478 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_init_tx_rings()
4490 return 0; in bnxt_init_tx_rings()
4510 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_grps()
4518 return 0; in bnxt_init_ring_grps()
4525 bp->nr_vnics = 0; in bnxt_free_vnics()
4550 return 0; in bnxt_alloc_vnics()
4558 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_init_vnics()
4564 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) in bnxt_init_vnics()
4590 bp->toeplitz_prefix = 0; in bnxt_init_vnics()
4591 for (k = 0; k < 8; k++) { in bnxt_init_vnics()
4648 u32 agg_factor = 0, agg_ring_size = 0; in bnxt_set_ring_params()
4657 bp->rx_agg_ring_size = 0; in bnxt_set_ring_params()
4658 bp->rx_agg_nr_pages = 0; in bnxt_set_ring_params()
4800 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_free_vnic_attributes()
4823 vnic->flags = 0; in bnxt_free_vnic_attributes()
4829 int i, rc = 0, size; in bnxt_alloc_vnic_attributes()
4834 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_alloc_vnic_attributes()
4840 if (mem_size > 0) { in bnxt_alloc_vnic_attributes()
4898 return 0; in bnxt_alloc_vnic_attributes()
4921 BNXT_HWRM_DMA_ALIGN, 0); in bnxt_alloc_hwrm_resources()
4927 return 0; in bnxt_alloc_hwrm_resources()
4960 return 0; in bnxt_alloc_stats_mem()
4971 for (i = 0; i < count; i++) in bnxt_fill_masks()
4979 for (i = 0; i < count; i++) in bnxt_copy_hw_masks()
4999 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qstat_ext()
5017 struct bnxt_napi *bnapi = bp->bnapi[0]; in bnxt_init_stats()
5055 bnxt_hwrm_port_qstats(bp, 0); in bnxt_init_stats()
5081 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_init_stats()
5103 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_ring_stats()
5121 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_stats()
5138 return 0; in bnxt_alloc_stats()
5152 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) in bnxt_alloc_stats()
5154 return 0; in bnxt_alloc_stats()
5163 return 0; in bnxt_alloc_stats()
5167 return 0; in bnxt_alloc_stats()
5169 if (bp->hwrm_spec_code >= 0x10902 || in bnxt_alloc_stats()
5175 return 0; in bnxt_alloc_stats()
5178 return 0; in bnxt_alloc_stats()
5188 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_clear_ring_indices()
5198 cpr->cp_raw_cons = 0; in bnxt_clear_ring_indices()
5201 txr->tx_prod = 0; in bnxt_clear_ring_indices()
5202 txr->tx_cons = 0; in bnxt_clear_ring_indices()
5203 txr->tx_hw_cons = 0; in bnxt_clear_ring_indices()
5208 rxr->rx_prod = 0; in bnxt_clear_ring_indices()
5209 rxr->rx_agg_prod = 0; in bnxt_clear_ring_indices()
5210 rxr->rx_sw_agg_prod = 0; in bnxt_clear_ring_indices()
5211 rxr->rx_next_cons = 0; in bnxt_clear_ring_indices()
5213 bnapi->events = 0; in bnxt_clear_ring_indices()
5262 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { in bnxt_free_ntp_fltrs()
5281 bp->ntp_fltr_count = 0; in bnxt_free_ntp_fltrs()
5286 int i, rc = 0; in bnxt_alloc_ntp_fltrs()
5289 return 0; in bnxt_alloc_ntp_fltrs()
5291 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) in bnxt_alloc_ntp_fltrs()
5294 bp->ntp_fltr_count = 0; in bnxt_alloc_ntp_fltrs()
5307 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { in bnxt_free_l2_filters()
5326 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) in bnxt_init_l2_fltr_tbl()
5378 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { in bnxt_alloc_mem()
5397 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_mem()
5425 j = 0; in bnxt_alloc_mem()
5429 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_mem()
5449 bnapi2->tx_ring[0] = txr; in bnxt_alloc_mem()
5500 return 0; in bnxt_alloc_mem()
5514 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int()
5543 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int_sync()
5554 atomic_set(&bp->intr_sem, 0); in bnxt_enable_int()
5555 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_int()
5600 memset(data, 0, sizeof(data)); in bnxt_hwrm_func_drv_rgtr()
5601 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { in bnxt_hwrm_func_drv_rgtr()
5610 for (i = 0; i < 8; i++) in bnxt_hwrm_func_drv_rgtr()
5621 memset(async_events_bmap, 0, sizeof(async_events_bmap)); in bnxt_hwrm_func_drv_rgtr()
5622 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { in bnxt_hwrm_func_drv_rgtr()
5634 for (i = 0; i < bmap_size; i++) { in bnxt_hwrm_func_drv_rgtr()
5639 for (i = 0; i < 8; i++) in bnxt_hwrm_func_drv_rgtr()
5664 return 0; in bnxt_hwrm_func_drv_unrgtr()
5681 return 0; in bnxt_hwrm_tunnel_dst_port_free()
5684 return 0; in bnxt_hwrm_tunnel_dst_port_free()
5695 bp->vxlan_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5700 bp->nge_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5705 bp->vxlan_gpe_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5870 return 0; in bnxt_get_rss_flow_tuple_len()
5876 u64 prefix = bp->toeplitz_prefix, hash = 0; in bnxt_toeplitz()
5879 int i, j, len = 0; in bnxt_toeplitz()
5884 return 0; in bnxt_toeplitz()
5896 for (i = 0, j = 8; i < len; i++, j++) { in bnxt_toeplitz()
5900 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { in bnxt_toeplitz()
5901 if (byte & 0x80) in bnxt_toeplitz()
5904 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; in bnxt_toeplitz()
5937 bp->max_fltr, 0); in bnxt_init_l2_filter()
5938 if (bit_id < 0) in bnxt_init_l2_filter()
5948 return 0; in bnxt_init_l2_filter()
6026 u16 target_id = 0xffff; in bnxt_hwrm_l2_filter_free()
6053 u16 target_id = 0xffff; in bnxt_hwrm_l2_filter_alloc()
6089 req->l2_ivlan_mask = cpu_to_le16(0xfff); in bnxt_hwrm_l2_filter_alloc()
6139 for (i = 0; i < 4; i++) in bnxt_fill_ipv6_mask()
6140 mask[i] = cpu_to_be32(~0); in bnxt_fill_ipv6_mask()
6220 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6221 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6222 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6223 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6225 req->src_ipaddr[0] = keys->addrs.v4addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6226 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6227 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6228 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6257 key.vlan = 0; in bnxt_hwrm_set_vnic_filter()
6273 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ in bnxt_hwrm_clear_vnic_filter()
6276 for (i = 0; i < num_of_vnics; i++) { in bnxt_hwrm_clear_vnic_filter()
6279 for (j = 0; j < vnic->uc_filter_count; j++) { in bnxt_hwrm_clear_vnic_filter()
6285 vnic->uc_filter_count = 0; in bnxt_hwrm_clear_vnic_filter()
6321 return 0; in bnxt_hwrm_vnic_set_tpa()
6329 u32 nsegs, n, segs = 0, flags; in bnxt_hwrm_vnic_set_tpa()
6415 return 0; in bnxt_alloc_rss_indir_tbl()
6436 rss_indir_tbl = &bp->rss_indir_tbl[0]; in bnxt_set_dflt_rss_indir_tbl()
6438 for (i = 0; i < max_entries; i++) in bnxt_set_dflt_rss_indir_tbl()
6443 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); in bnxt_set_dflt_rss_indir_tbl()
6448 u32 i, tbl_size, max_ring = 0; in bnxt_get_max_rss_ring()
6451 return 0; in bnxt_get_max_rss_ring()
6454 for (i = 0; i < tbl_size; i++) in bnxt_get_max_rss_ring()
6463 return 0; in bnxt_get_nr_rss_ctxs()
6478 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { in bnxt_fill_hw_rss_tbl()
6494 for (i = 0; i < tbl_size; i++) { in bnxt_fill_hw_rss_tbl_p5()
6545 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) in bnxt_hwrm_vnic_set_rss()
6546 return 0; in bnxt_hwrm_vnic_set_rss()
6554 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_set_rss()
6579 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { in bnxt_hwrm_vnic_set_rss_p5()
6604 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_update_rss_hash_cfg()
6608 bp->rss_hash_delta = 0; in bnxt_hwrm_update_rss_hash_cfg()
6658 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_hwrm_vnic_ctx_free()
6661 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { in bnxt_hwrm_vnic_ctx_free()
6666 bp->rsscos_nr_ctxs = 0; in bnxt_hwrm_vnic_ctx_free()
6701 unsigned int ring = 0, grp_idx; in bnxt_hwrm_vnic_cfg()
6702 u16 def_vlan = 0; in bnxt_hwrm_vnic_cfg()
6710 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; in bnxt_hwrm_vnic_cfg()
6723 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_cfg()
6724 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6728 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6733 req->rss_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6737 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { in bnxt_hwrm_vnic_cfg()
6741 req->cos_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6745 ring = 0; in bnxt_hwrm_vnic_cfg()
6753 req->lb_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6791 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_vnic_free()
6812 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { in bnxt_hwrm_vnic_alloc()
6823 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) in bnxt_hwrm_vnic_alloc()
6845 if (bp->hwrm_spec_code < 0x10600) in bnxt_hwrm_vnic_qcaps()
6846 return 0; in bnxt_hwrm_vnic_qcaps()
6907 return 0; in bnxt_hwrm_ring_grp_alloc()
6914 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_grp_alloc()
6946 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_grp_free()
6966 int rc, err = 0; in hwrm_ring_alloc_send_msg()
6973 req->enables = 0; in hwrm_ring_alloc_send_msg()
6980 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); in hwrm_ring_alloc_send_msg()
6982 req->fbo = 0; in hwrm_ring_alloc_send_msg()
6989 u16 flags = 0; in hwrm_ring_alloc_send_msg()
7012 u16 flags = 0; in hwrm_ring_alloc_send_msg()
7093 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_async_event_cr()
7161 db->doorbell = bp->bar1 + map_idx * 0x80; in bnxt_set_db()
7194 return 0; in bnxt_hwrm_rx_ring_alloc()
7217 return 0; in bnxt_hwrm_rx_agg_ring_alloc()
7223 int i, rc = 0; in bnxt_hwrm_ring_alloc()
7230 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7257 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7285 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7313 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7335 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_cancel_dim()
7349 u16 error_code = 0; in hwrm_ring_free_send_msg()
7353 return 0; in hwrm_ring_free_send_msg()
7373 return 0; in hwrm_ring_free_send_msg()
7428 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_free()
7444 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
7459 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_free()
7465 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { in bnxt_hwrm_ring_free()
7498 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_get_rings()
7499 return 0; in bnxt_hwrm_get_rings()
7505 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_get_rings()
7557 if (bp->hwrm_spec_code < 0x10601) in __bnxt_hwrm_get_tx_rings()
7558 return 0; in __bnxt_hwrm_get_tx_rings()
7580 u32 enables = 0; in __bnxt_hwrm_reserve_pf_rings()
7585 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_reserve_pf_rings()
7586 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7589 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7590 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; in __bnxt_hwrm_reserve_pf_rings()
7592 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; in __bnxt_hwrm_reserve_pf_rings()
7594 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7597 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7599 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; in __bnxt_hwrm_reserve_pf_rings()
7601 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; in __bnxt_hwrm_reserve_pf_rings()
7603 0; in __bnxt_hwrm_reserve_pf_rings()
7624 u32 enables = 0; in __bnxt_hwrm_reserve_vf_rings()
7629 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7631 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7632 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7633 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7636 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7638 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7640 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; in __bnxt_hwrm_reserve_vf_rings()
7642 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; in __bnxt_hwrm_reserve_vf_rings()
7674 return 0; in bnxt_hwrm_reserve_pf_rings()
7681 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_reserve_pf_rings()
7695 return 0; in bnxt_hwrm_reserve_vf_rings()
7741 return 0; in bnxt_get_total_rss_ctxs()
7799 bp->hwrm_spec_code >= 0x10601) in bnxt_need_reserve_rings()
7848 struct bnxt_hw_rings hwr = {0}; in __bnxt_reserve_rings()
7851 int ulp_msix = 0; in __bnxt_reserve_rings()
7856 return 0; in __bnxt_reserve_rings()
7861 bnxt_set_ulp_stat_ctxs(bp, 0); in __bnxt_reserve_rings()
7965 return 0; in bnxt_hwrm_check_vf_rings()
8006 if (bp->hwrm_spec_code < 0x10801) in bnxt_hwrm_check_rings()
8007 return 0; in bnxt_hwrm_check_rings()
8032 if (bp->hwrm_spec_code < 0x10902) in bnxt_hwrm_coal_params_qcaps()
8136 return 0; in __bnxt_hwrm_set_coal_nq()
8208 return 0; in bnxt_hwrm_set_tx_coal()
8210 return 0; in bnxt_hwrm_set_tx_coal()
8233 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_set_coal()
8247 if (bnapi->rx_ring && bnapi->tx_ring[0]) { in bnxt_hwrm_set_coal()
8285 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_free()
8312 return 0; in bnxt_hwrm_stat_ctx_alloc()
8322 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_alloc()
8351 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qcfg()
8393 if (bp->hwrm_spec_code < 0x10707 || in bnxt_hwrm_func_qcfg()
8435 ctxm->init_value = 0; in bnxt_init_ctx_initializer()
8443 for (type = 0; type < ctx_max; type++) { in bnxt_alloc_all_ctx_pg_info()
8456 return 0; in bnxt_alloc_all_ctx_pg_info()
8487 for (type = 0; type < BNXT_CTX_V2_MAX; ) { in bnxt_hwrm_func_backing_store_qcaps_v2()
8528 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; in bnxt_hwrm_func_backing_store_qcaps_v2()
8545 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || in bnxt_hwrm_func_backing_store_qcaps()
8547 return 0; in bnxt_hwrm_func_backing_store_qcaps()
8561 u8 init_val, init_idx = 0; in bnxt_hwrm_func_backing_store_qcaps()
8583 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8590 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8597 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8606 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8613 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8632 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8650 rc = 0; in bnxt_hwrm_func_backing_store_qcaps()
8671 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); in bnxt_hwrm_set_pg_attr()
8692 u32 flags = 0; in bnxt_hwrm_func_backing_store_cfg()
8699 return 0; in bnxt_hwrm_func_backing_store_cfg()
8793 for (i = 0, num_entries = &req->tqm_sp_num_entries, in bnxt_hwrm_func_backing_store_cfg()
8838 ctx_pg->nr_pages = 0; in bnxt_alloc_ctx_pg_tbls()
8854 for (i = 0; i < nr_tbls; i++) { in bnxt_alloc_ctx_pg_tbls()
8895 size_t len = 0, total_len = 0; in bnxt_copy_ctx_pg_tbls()
8912 head = 0; in bnxt_copy_ctx_pg_tbls()
8926 for (i = 0; i < nr_tbls; i++) { in bnxt_free_ctx_pg_tbls()
8943 ctx_pg->nr_pages = 0; in bnxt_free_ctx_pg_tbls()
8951 int i, rc = 0, n = 1; in bnxt_setup_ctxm_pg_tbls()
8962 for (i = 0; i < n && !rc; i++) { in bnxt_setup_ctxm_pg_tbls()
8978 int i, j, rc = 0, n = 1; in bnxt_hwrm_func_backing_store_cfg_v2()
8982 return 0; in bnxt_hwrm_func_backing_store_cfg_v2()
9006 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) in bnxt_hwrm_func_backing_store_cfg_v2()
9008 for (i = 0, j = 0; j < n && !rc; i++) { in bnxt_hwrm_func_backing_store_cfg_v2()
9035 int rc = 0; in bnxt_backing_store_cfg_v2()
9046 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", in bnxt_backing_store_cfg_v2()
9057 return 0; in bnxt_backing_store_cfg_v2()
9065 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { in bnxt_backing_store_cfg_v2()
9074 return 0; in bnxt_backing_store_cfg_v2()
9096 size_t len = 0, total_len = 0; in __bnxt_copy_ctx_mem()
9100 return 0; in __bnxt_copy_ctx_mem()
9104 for (i = 0; i < n; i++) { in __bnxt_copy_ctx_mem()
9118 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail); in bnxt_copy_ctx_mem()
9127 ctxm->last = 0; in bnxt_free_one_ctx_mem()
9136 for (i = 0; i < n; i++) in bnxt_free_one_ctx_mem()
9141 ctxm->mem_valid = 0; in bnxt_free_one_ctx_mem()
9143 memset(ctxm, 0, sizeof(*ctxm)); in bnxt_free_one_ctx_mem()
9154 for (type = 0; type < BNXT_CTX_V2_MAX; type++) in bnxt_free_ctx_mem()
9172 u32 extra_srqs = 0; in bnxt_alloc_ctx_mem()
9173 u32 extra_qps = 0; in bnxt_alloc_ctx_mem()
9186 return 0; in bnxt_alloc_ctx_mem()
9196 ena = 0; in bnxt_alloc_ctx_mem()
9286 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) in bnxt_alloc_ctx_mem()
9300 return 0; in bnxt_alloc_ctx_mem()
9310 return 0; in bnxt_hwrm_crash_dump_mem_cfg()
9316 if (BNXT_PAGE_SIZE == 0x2000) in bnxt_hwrm_crash_dump_mem_cfg()
9318 else if (BNXT_PAGE_SIZE == 0x10000) in bnxt_hwrm_crash_dump_mem_cfg()
9341 u32 mem_size = 0; in bnxt_alloc_crash_dump_mem()
9345 return 0; in bnxt_alloc_crash_dump_mem()
9374 return 0; in bnxt_alloc_crash_dump_mem()
9388 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_resc_qcaps()
9443 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { in __bnxt_hwrm_ptp_qcfg()
9477 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); in __bnxt_hwrm_ptp_qcfg()
9480 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; in __bnxt_hwrm_ptp_qcfg()
9487 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; in __bnxt_hwrm_ptp_qcfg()
9494 return 0; in __bnxt_hwrm_ptp_qcfg()
9515 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_func_qcaps()
9573 bp->tx_push_thresh = 0; in __bnxt_hwrm_func_qcaps()
9635 bp->fw_dbg_cap = 0; in bnxt_hwrm_dbg_qcaps()
9643 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_dbg_qcaps()
9672 if (bp->hwrm_spec_code >= 0x10803) { in bnxt_hwrm_func_qcaps()
9680 return 0; in bnxt_hwrm_func_qcaps()
9691 return 0; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9723 return 0; in __bnxt_alloc_fw_health()
9730 return 0; in __bnxt_alloc_fw_health()
9739 return 0; in bnxt_alloc_fw_health()
9748 return 0; in bnxt_alloc_fw_health()
9825 u32 reg_base = 0xffffffff; in bnxt_map_fw_health_regs()
9831 for (i = 0; i < 4; i++) { in bnxt_map_fw_health_regs()
9836 if (reg_base == 0xffffffff) in bnxt_map_fw_health_regs()
9844 if (reg_base == 0xffffffff) in bnxt_map_fw_health_regs()
9845 return 0; in bnxt_map_fw_health_regs()
9848 return 0; in bnxt_map_fw_health_regs()
9872 return 0; in bnxt_hwrm_error_recovery_qcfg()
9912 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { in bnxt_hwrm_error_recovery_qcfg()
9938 req->enables = 0; in bnxt_hwrm_func_reset()
9959 int rc = 0; in bnxt_hwrm_queue_qportcfg()
9981 for (i = 0, j = 0; i < bp->max_tc; i++) { in bnxt_hwrm_queue_qportcfg()
10051 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", in bnxt_hwrm_ver_get()
10054 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); in bnxt_hwrm_ver_get()
10070 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { in bnxt_hwrm_ver_get()
10150 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || in bnxt_hwrm_fw_set_time()
10151 bp->hwrm_spec_code < 0x10400) in bnxt_hwrm_fw_set_time()
10154 time64_to_tm(now, 0, &tm); in bnxt_hwrm_fw_set_time()
10184 for (i = 0; i < count; i++) { in __bnxt_accumulate_stats()
10212 /* Chip bug. Counter intermittently becomes 0. */ in bnxt_accumulate_all_stats()
10216 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_accumulate_all_stats()
10258 return 0; in bnxt_hwrm_port_qstats()
10286 return 0; in bnxt_hwrm_port_qstats_ext()
10300 sizeof(struct tx_port_stats_ext) : 0; in bnxt_hwrm_port_qstats_ext()
10313 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; in bnxt_hwrm_port_qstats_ext()
10315 bp->fw_rx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
10316 bp->fw_tx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
10325 bp->pri2cos_valid = 0; in bnxt_hwrm_port_qstats_ext()
10342 for (i = 0; i < 8; i++) { in bnxt_hwrm_port_qstats_ext()
10346 /* Per port queue IDs start from 0, 10, 20, etc */ in bnxt_hwrm_port_qstats_ext()
10353 for (j = 0; j < bp->max_q; j++) { in bnxt_hwrm_port_qstats_ext()
10376 u32 tpa_flags = 0; in bnxt_set_tpa()
10381 return 0; in bnxt_set_tpa()
10382 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_set_tpa()
10390 return 0; in bnxt_set_tpa()
10397 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_clear_vnic_rss()
10449 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_br_mode()
10460 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) in bnxt_hwrm_set_cache_line_size()
10461 return 0; in bnxt_hwrm_set_cache_line_size()
10467 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_cache_line_size()
10484 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); in __bnxt_setup_vnic()
10573 for (i = 0; i < nr_ctxs; i++) { in __bnxt_setup_vnic_p5()
10625 int i, rc = 0; in bnxt_alloc_rfs_vnics()
10629 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); in bnxt_alloc_rfs_vnics()
10633 return 0; in bnxt_alloc_rfs_vnics()
10635 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rfs_vnics()
10662 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { in bnxt_del_one_rss_ctx()
10699 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || in bnxt_hwrm_realloc_rss_ctx_vnic()
10735 unsigned int rc = 0; in bnxt_setup_nitroa0_vnic()
10759 int rc = 0; in bnxt_init_chip()
10786 /* default vnic 0 */ in bnxt_init_chip()
10787 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); in bnxt_init_chip()
10817 /* Filter for default vnic 0 */ in bnxt_init_chip()
10818 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); in bnxt_init_chip()
10828 vnic->rx_mask = 0; in bnxt_init_chip()
10840 vnic->mc_list_count = 0; in bnxt_init_chip()
10842 u32 mask = 0; in bnxt_init_chip()
10870 return 0; in bnxt_init_chip()
10873 bnxt_hwrm_resource_free(bp, 0, true); in bnxt_init_chip()
10881 return 0; in bnxt_shutdown_nic()
10938 return 0; in __bnxt_trim_rings()
10976 return 0; in bnxt_trim_rings()
10983 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_msix()
10991 for (i = 0; i < tcs; i++) { in bnxt_setup_msix()
10998 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_setup_msix()
11025 if (map.index < 0) in bnxt_change_msix()
11125 if (num <= 0) in bnxt_get_avail_msix()
11126 return 0; in bnxt_get_avail_msix()
11141 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; in bnxt_init_int_mode()
11149 return 0; in bnxt_init_int_mode()
11157 if (total_vecs < 0 || total_vecs < ulp_msix) { in bnxt_init_int_mode()
11167 for (i = 0; i < total_vecs; i++) in bnxt_init_int_mode()
11186 return 0; in bnxt_init_int_mode()
11213 return 0; in bnxt_reserve_rings()
11250 bp->num_tc = 0; in bnxt_reserve_rings()
11257 return 0; in bnxt_reserve_rings()
11272 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_irq()
11280 irq->have_cpumask = 0; in bnxt_free_irq()
11285 irq->requested = 0; in bnxt_free_irq()
11291 int i, j, rc = 0; in bnxt_request_irq()
11292 unsigned long flags = 0; in bnxt_request_irq()
11306 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_request_irq()
11352 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_del_napi()
11354 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) in bnxt_del_napi()
11357 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_del_napi()
11379 for (i = 0; i < cp_nr_rings; i++) { in bnxt_init_napi()
11398 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_napi()
11416 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_napi()
11420 bnapi->tx_fault = 0; in bnxt_enable_napi()
11439 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_disable()
11457 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_enable()
11459 WRITE_ONCE(txr->dev_state, 0); in bnxt_tx_enable()
11571 int rc = 0; in bnxt_hwrm_phy_qcaps()
11573 if (bp->hwrm_spec_code < 0x10201) in bnxt_hwrm_phy_qcaps()
11574 return 0; in bnxt_hwrm_phy_qcaps()
11597 if (bp->hwrm_spec_code >= 0x10a01) { in bnxt_hwrm_phy_qcaps()
11605 link_info->support_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
11606 link_info->support_pam4_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
11607 link_info->support_auto_speeds2 = 0; in bnxt_hwrm_phy_qcaps()
11633 if (bp->hwrm_spec_code < 0x10a03) in bnxt_hwrm_mac_qcaps()
11701 rc = 0; in bnxt_update_link()
11709 if (bp->hwrm_spec_code >= 0x10800) in bnxt_update_link()
11722 link_info->link_speed = 0; in bnxt_update_link()
11723 link_info->active_lanes = 0; in bnxt_update_link()
11741 link_info->phy_ver[0] = resp->phy_maj; in bnxt_update_link()
11755 eee->eee_active = 0; in bnxt_update_link()
11786 if (bp->hwrm_spec_code >= 0x10504) { in bnxt_update_link()
11805 return 0; in bnxt_update_link()
11810 return 0; in bnxt_update_link()
11829 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_get_port_module_status()
11844 if (bp->hwrm_spec_code >= 0x10201) in bnxt_hwrm_set_pause_common()
11860 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_hwrm_set_pause_common()
11930 bp->link_info.auto_pause_setting = 0; in bnxt_hwrm_set_pause()
11986 return 0; in bnxt_hwrm_shutdown_link()
11990 return 0; in bnxt_hwrm_shutdown_link()
12029 int retry = 0, rc; in bnxt_try_recover_fw()
12043 "Firmware not responding, status: 0x%x\n", in bnxt_try_recover_fw()
12064 hw_resc->resv_cp_rings = 0; in bnxt_clear_reservations()
12065 hw_resc->resv_stat_ctxs = 0; in bnxt_clear_reservations()
12066 hw_resc->resv_irqs = 0; in bnxt_clear_reservations()
12067 hw_resc->resv_tx_rings = 0; in bnxt_clear_reservations()
12068 hw_resc->resv_rx_rings = 0; in bnxt_clear_reservations()
12069 hw_resc->resv_hw_ring_grps = 0; in bnxt_clear_reservations()
12070 hw_resc->resv_vnics = 0; in bnxt_clear_reservations()
12071 hw_resc->resv_rsscos_ctxs = 0; in bnxt_clear_reservations()
12073 bp->tx_nr_rings = 0; in bnxt_clear_reservations()
12074 bp->rx_nr_rings = 0; in bnxt_clear_reservations()
12083 return 0; /* no resource reservations required */ in bnxt_cancel_reservations()
12100 int rc, retry = 0; in bnxt_hwrm_if_change()
12101 u32 flags = 0; in bnxt_hwrm_if_change()
12104 return 0; in bnxt_hwrm_if_change()
12139 return 0; in bnxt_hwrm_if_change()
12188 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
12189 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_port_led_qcaps()
12190 return 0; in bnxt_hwrm_port_led_qcaps()
12203 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { in bnxt_hwrm_port_led_qcaps()
12207 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * in bnxt_hwrm_port_led_qcaps()
12209 for (i = 0; i < bp->num_leds; i++) { in bnxt_hwrm_port_led_qcaps()
12215 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
12221 return 0; in bnxt_hwrm_port_led_qcaps()
12267 u16 next_handle = 0; in bnxt_hwrm_get_wol_fltrs()
12280 if (next_handle != 0) { in bnxt_hwrm_get_wol_fltrs()
12294 u16 handle = 0; in bnxt_get_wol_settings()
12296 bp->wol = 0; in bnxt_get_wol_settings()
12302 } while (handle && handle != 0xffff); in bnxt_get_wol_settings()
12320 eee->eee_enabled = 0; in bnxt_eee_config_ok()
12347 return 0; in bnxt_update_phy_setting()
12426 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_cfg_one_usr_fltr()
12459 int rc = 0; in bnxt_set_xps_mapping()
12469 for (i = 0; i < nr_cpus; i++) { in bnxt_set_xps_mapping()
12477 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { in bnxt_set_xps_mapping()
12494 int rc = 0; in __bnxt_open_nic()
12580 return 0; in __bnxt_open_nic()
12595 int rc = 0; in bnxt_open_nic()
12614 int rc = 0; in bnxt_half_open_nic()
12636 return 0; in bnxt_half_open_nic()
12773 else if (rc < 0) in bnxt_close_nic()
12787 return 0; in bnxt_close()
12797 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_read()
12806 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_read()
12828 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_write()
12837 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_write()
12862 u16 mii_regval = 0; in bnxt_ioctl()
12898 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_get_ring_stats()
13016 for (i = 0; i < bp->cp_nr_rings; i++) in bnxt_get_ring_err_stats()
13026 int mc_count = 0; in bnxt_mc_list_updated()
13028 int off = 0; in bnxt_mc_list_updated()
13033 vnic->mc_list_count = 0; in bnxt_mc_list_updated()
13059 int off = 0; in bnxt_uc_list_updated()
13100 vnic->mc_list_count = 0; in bnxt_set_rx_mode()
13117 int i, off = 0, rc; in bnxt_cfg_rx_mode()
13148 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { in bnxt_cfg_rx_mode()
13149 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); in bnxt_cfg_rx_mode()
13156 rc = 0; in bnxt_cfg_rx_mode()
13171 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
13177 vnic->mc_list_count = 0; in bnxt_cfg_rx_mode()
13178 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
13227 struct bnxt_hw_rings hwr = {0}; in bnxt_rfs_capable()
13271 hwr.rss_ctx = 0; in bnxt_rfs_capable()
13327 int rc = 0; in bnxt_set_features()
13350 if ((bp->flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
13351 (flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
13395 int hdr_count = 0; in bnxt_exthdr_check()
13432 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || in bnxt_exthdr_check()
13568 for (i = 0; i < num_words; i++) in bnxt_dbg_hwrm_rd_reg()
13638 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_dbg_dump_states()
13732 if (atomic_read(&bp->intr_sem) != 0) in bnxt_timer()
13807 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_rx_ring_reset()
13826 rxr->rx_prod = 0; in bnxt_rx_ring_reset()
13827 rxr->rx_agg_prod = 0; in bnxt_rx_ring_reset()
13828 rxr->rx_sw_agg_prod = 0; in bnxt_rx_ring_reset()
13829 rxr->rx_next_cons = 0; in bnxt_rx_ring_reset()
13860 u16 val = 0; in bnxt_fw_reset_close()
13863 if (val == 0xffff) in bnxt_fw_reset_close()
13864 bp->fw_reset_min_dsecs = 0; in bnxt_fw_reset_close()
13921 wait_dsecs = 0; in bnxt_force_fw_reset()
13945 * < 0 on error.
13953 return 0; in bnxt_get_registered_vfs()
13965 return 0; in bnxt_get_registered_vfs()
13975 int n = 0, tmo; in bnxt_fw_reset()
13990 if (n < 0) { in bnxt_fw_reset()
13996 } else if (n > 0) { in bnxt_fw_reset()
14027 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_chk_missed_irq()
14037 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_chk_missed_irq()
14051 fw_ring_id, &val[0], &val[1]); in bnxt_chk_missed_irq()
14065 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_init_ethtool_link_settings()
14101 bnxt_ulp_start(bp, 0); in bnxt_ulp_restart()
14130 bnxt_hwrm_port_qstats(bp, 0); in bnxt_sp_task()
14131 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_sp_task()
14221 struct bnxt_hw_rings hwr = {0}; in bnxt_check_rings()
14305 u16 flags = 0; in bnxt_init_dflt_coal()
14353 bp->fw_cap = 0; in bnxt_fw_init_one_p1()
14377 return 0; in bnxt_fw_init_one_p1()
14406 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); in bnxt_fw_init_one_p2()
14434 return 0; in bnxt_fw_init_one_p2()
14446 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { in bnxt_set_dflt_rss_hash_type()
14510 return 0; in bnxt_fw_init_one()
14530 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; in bnxt_fw_reset_writel()
14540 pci_read_config_dword(bp->pdev, 0, &val); in bnxt_fw_reset_writel()
14557 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_reset_permitted()
14578 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) in bnxt_reset_all()
14608 bp->fw_reset_state = 0; in bnxt_fw_reset_abort()
14615 int rc = 0; in bnxt_fw_reset_task()
14627 if (n < 0) { in bnxt_fw_reset_task()
14632 } else if (n > 0) { in bnxt_fw_reset_task()
14635 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
14694 if (val == 0xffff) { in bnxt_fw_reset_task()
14749 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
14750 /* Make sure fw_reset_state is 0 before clearing the flag */ in bnxt_fw_reset_task()
14760 bnxt_ulp_start(bp, 0); in bnxt_fw_reset_task()
14775 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); in bnxt_fw_reset_task()
14799 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { in bnxt_init_board()
14812 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && in bnxt_init_board()
14813 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { in bnxt_init_board()
14827 bp->bar0 = pci_ioremap_bar(pdev, 0); in bnxt_init_board()
14852 timer_setup(&bp->timer, bnxt_timer, 0); in bnxt_init_board()
14859 return 0; in bnxt_init_board()
14877 int rc = 0; in bnxt_change_mac_addr()
14883 return 0; in bnxt_change_mac_addr()
14921 return 0; in bnxt_change_mtu()
14937 return 0; in bnxt_setup_mq_tc()
14958 bp->num_tc = 0; in bnxt_setup_mq_tc()
14968 return 0; in bnxt_setup_mq_tc()
15032 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); in bnxt_insert_ntp_filter()
15033 if (bit_id < 0) { in bnxt_insert_ntp_filter()
15047 return 0; in bnxt_insert_ntp_filter()
15112 int rc = 0, idx; in bnxt_rx_flow_steer()
15116 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_rx_flow_steer()
15122 key.vlan = 0; in bnxt_rx_flow_steer()
15138 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { in bnxt_rx_flow_steer()
15152 if (bp->hwrm_spec_code < 0x10601) { in bnxt_rx_flow_steer()
15160 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { in bnxt_rx_flow_steer()
15212 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { in bnxt_cfg_ntp_filters()
15307 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, in bnxt_bridge_getlink()
15316 int rem, rc = 0; in bnxt_bridge_setlink()
15318 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) in bnxt_bridge_setlink()
15355 return 0; in bnxt_get_port_parent_id()
15404 stats->packets = 0; in bnxt_get_queue_stats_rx()
15409 stats->bytes = 0; in bnxt_get_queue_stats_rx()
15430 stats->packets = 0; in bnxt_get_queue_stats_tx()
15435 stats->bytes = 0; in bnxt_get_queue_stats_tx()
15477 clone->rx_prod = 0; in bnxt_queue_mem_alloc()
15478 clone->rx_agg_prod = 0; in bnxt_queue_mem_alloc()
15479 clone->rx_sw_agg_prod = 0; in bnxt_queue_mem_alloc()
15480 clone->rx_next_cons = 0; in bnxt_queue_mem_alloc()
15486 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); in bnxt_queue_mem_alloc()
15487 if (rc < 0) in bnxt_queue_mem_alloc()
15527 return 0; in bnxt_queue_mem_alloc()
15596 for (i = 0; i < dst_rmem->nr_pages; i++) { in bnxt_copy_rx_ring()
15620 for (i = 0; i < dst_rmem->nr_pages; i++) { in bnxt_copy_rx_ring()
15665 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_queue_start()
15679 return 0; in bnxt_queue_start()
15693 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_queue_stop()
15695 vnic->mru = 0; in bnxt_queue_stop()
15705 rxr->rx_next_cons = 0; in bnxt_queue_stop()
15713 return 0; in bnxt_queue_stop()
15746 bp->sp_event = 0; in bnxt_remove_one()
15773 int rc = 0; in bnxt_probe_phy()
15776 bp->phy_flags = 0; in bnxt_probe_phy()
15788 bp->mac_flags = 0; in bnxt_probe_phy()
15792 return 0; in bnxt_probe_phy()
15811 return 0; in bnxt_probe_phy()
15829 int max_ring_grps = 0, max_irq; in _bnxt_get_max_rings()
15852 *max_rx = 0; in _bnxt_get_max_rings()
15853 *max_tx = 0; in _bnxt_get_max_rings()
15905 return 0; in bnxt_get_dflt_rings()
15914 rc = 0; in bnxt_get_dflt_rings()
15936 return 0; in bnxt_set_dflt_rings()
15988 bp->tx_nr_rings = 0; in bnxt_set_dflt_rings()
15989 bp->rx_nr_rings = 0; in bnxt_set_dflt_rings()
15999 return 0; in bnxt_init_dflt_ring_mode()
16051 int rc = 0; in bnxt_init_mac_addr()
16091 if (pos < 0) in bnxt_vpd_read_info()
16101 if (pos < 0) in bnxt_vpd_read_info()
16124 return 0; in bnxt_pcie_dsn_get()
16134 return 0; in bnxt_map_db_bar()
16141 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); in bnxt_print_device_info()
16188 if (rc < 0) in bnxt_init_one()
16391 return 0; in bnxt_init_one()
16454 int rc = 0; in bnxt_suspend()
16475 int rc = 0; in bnxt_resume()
16501 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { in bnxt_resume()
16603 int retry = 0; in bnxt_io_slot_reset()
16604 int err = 0; in bnxt_io_slot_reset()
16626 * write the BARs to 0 to force restore, in case of fatal error. in bnxt_io_slot_reset()
16632 pci_write_config_dword(bp->pdev, off, 0); in bnxt_io_slot_reset()
16736 return 0; in bnxt_init()